text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
A Geosoft View (:class:`geosoft.gxpy.view.View` or :class:`geosoft.gxpy.view.View_3d`) contains graphical elements as
`Group` instances. Groups are named and are available to a user in a Geosoft viewer, which allows groups to
be turned on or off, modify the transparency, or be deleted.
2D views can only accept 2D groups, while a 3D view can accept both 2D and 3D groups. When a 2D group is placed
in a 3D view, the group is placed on a the active plane inside the 3D view
:Classes:
:`Group`: base class for named rendering groups in 2D and 3D views.
:`Draw`: 2D drawing group, handles 2D drawing to a view or plane in a 3D view
:`Draw_3d`: 3D grawing group for 3D objects placed in a 3d view
:`Color_symbols_group`: group for 2D symbols rendered based on data values
:`Aggregate_group`: group that contains a :class:`geosoft.gxpy.agg.Aggregate_image` instance
:`Color`: colour definition
:`Color_map`: maps values to colors
:`Pen`: pen definition, includes line colour, thickness and pattern, and fill.
:`Text_def`: defined text characteristics
:`VoxDisplayGroup`: a 'geosoft.gxpy.vox.VoxDisplay` in a `geosoft.gxpy.view.View_3d`
:Constants:
:GROUP_NAME_SIZE: `geosoft.gxpy.view.VIEW_NAME_SIZE`
:NEW: `geosoft.gxapi.MVIEW_GROUP_NEW`
:APPEND: `geosoft.gxapi.MVIEW_GROUP_APPEND`
:READ_ONLY: max(NEW, APPEND) + 1
:REPLACE: READ_ONLY + 1
:SMOOTH_NONE: `geosoft.gxapi.MVIEW_SMOOTH_NEAREST`
:SMOOTH_CUBIC: `geosoft.gxapi.MVIEW_SMOOTH_CUBIC`
:SMOOTH_AKIMA: `geosoft.gxapi.MVIEW_SMOOTH_AKIMA`
:TILE_RECTANGULAR: `geosoft.gxapi.MVIEW_TILE_RECTANGULAR`
:TILE_DIAGONAL: `geosoft.gxapi.MVIEW_TILE_DIAGONAL`
:TILE_TRIANGULAR: `geosoft.gxapi.MVIEW_TILE_TRIANGULAR`
:TILE_RANDOM: `geosoft.gxapi.MVIEW_TILE_RANDOM`
:UNIT_VIEW: 0
:UNIT_MAP: 2
:UNIT_VIEW_UNWARPED: 3
:GRATICULE_DOT: 0
:GRATICULE_LINE: 1
:GRATICULE_CROSS: 2
:LINE_STYLE_SOLID: 1
:LINE_STYLE_LONG: 2
:LINE_STYLE_DOTTED: 3
:LINE_STYLE_SHORT: 4
:LINE_STYLE_LONG_SHORT_LONG: 5
:LINE_STYLE_LONG_DOT_LONG: 6
:SYMBOL_NONE: 0
:SYMBOL_DOT: 1
:SYMBOL_PLUS: 2
:SYMBOL_X: 3
:SYMBOL_BOX: 4
:SYMBOL_TRIANGLE: 5
:SYMBOL_INVERTED_TRIANGLE: 6
:SYMBOL_HEXAGON: 7
:SYMBOL_SMALL_BOX: 8
:SYMBOL_SMALL_DIAMOND: 9
:SYMBOL_CIRCLE: 20
:SYMBOL_3D_SPHERE: 0
:SYMBOL_3D_CUBE: 1
:SYMBOL_3D_CYLINDER: 2
:SYMBOL_3D_CONE: 3
:FONT_WEIGHT_ULTRALIGHT: 1
:FONT_WEIGHT_LIGHT: 2
:FONT_WEIGHT_MEDIUM: 3
:FONT_WEIGHT_BOLD: 4
:FONT_WEIGHT_XBOLD: 5
:FONT_WEIGHT_XXBOLD: 6
:CMODEL_RGB: 0
:CMODEL_CMY: 1
:CMODEL_HSV: 2
:C_BLACK: 67108863
:C_RED: 33554687
:C_GREEN: 33619712
:C_BLUE: 50266112
:C_CYAN: 50331903
:C_MAGENTA: 50396928
:C_YELLOW: 67043328
:C_GREY: 41975936
:C_LT_RED: 54542336
:C_LT_GREEN: 54526016
:C_LT_BLUE: 50348096
:C_LT_CYAN: 50331712
:C_LT_MAGENTA: 50348032
:C_LT_YELLOW: 54525952
:C_LT_GREY: 54542400
:C_GREY10: 51910680
:C_GREY25: 54542400
:C_GREY50: 41975936
:C_WHITE: 50331648
:C_TRANSPARENT: 0
:REF_BOTTOM_LEFT: 0
:REF_BOTTOM_CENTER: 1
:REF_BOTTOM_RIGHT: 2
:REF_CENTER_LEFT: 3
:REF_CENTER: 4
:REF_CENTER_RIGHT: 5
:REF_TOP_LEFT: 6
:REF_TOP_CENTER: 7
:REF_TOP_RIGHT: 8
:GROUP_ALL: 0
:GROUP_MARKED: 1
:GROUP_VISIBLE: 2
:GROUP_AGG: 3
:GROUP_CSYMB: 4
:GROUP_VOXD: 5
:LOCATE_FIT: `geosoft.gxapi.MVIEW_RELOCATE_FIT`
:LOCATE_FIT_KEEP_ASPECT: `geosoft.gxapi.MVIEW_RELOCATE_ASPECT`
:LOCATE_CENTER: `geosoft.gxapi.MVIEW_RELOCATE_ASPECT_CENTER`
:COLOR_BAR_RIGHT: 0
:COLOR_BAR_LEFT: 1
:COLOR_BAR_BOTTOM: 2
:COLOR_BAR_TOP: 3
:COLOR_BAR_ANNOTATE_RIGHT: 1
:COLOR_BAR_ANNOTATE_LEFT: -1
:COLOR_BAR_ANNOTATE_TOP: 1
:COLOR_BAR_ANNOTATE_BOTTOM: -1
:CYLINDER_OPEN: 0
:CYLINDER_CLOSE_START: 1
:CYLINDER_CLOSE_END: 2
:CYLINDER_CLOSE_ALL: 3
:POINT_STYLE_DOT: 0
:POINT_STYLE_SPHERE: 1
:LINE3D_STYLE_LINE: 0
:LINE3D_STYLE_TUBE: 1
:LINE3D_STYLE_TUBE_JOINED: 2
:SURFACE_FLAT: `geosoft.gxapi.MVIEW_DRAWOBJ3D_MODE_FLAT`
:SURFACE_SMOOTH: `geosoft.gxapi.MVIEW_DRAWOBJ3D_MODE_SMOOTH`
.. note::
Regression tests provide usage examples:
`group drawing tests <https://github.com/GeosoftInc/gxpy/blob/master/geosoft/gxpy/tests/test_group.py>`_
.. seealso:: :mod:`geosoft.gxpy.view`, :mod:`geosoft.gxpy.map`
:class:`geosoft.gxapi.GXMVIEW`, :class:`geosoft.gxapi.GXMVU`
"""
from functools import wraps
import threading
import os
import numpy as np
import geosoft
import geosoft.gxapi as gxapi
from . import gx
from . import vv as gxvv
from . import geometry as gxgm
from . import coordinate_system as gxcs
from . import utility as gxu
from . import view as gxv
from . import agg as gxagg
from . import metadata as gxmeta
from . import vox_display as gxvoxd
from . import spatialdata as gxspd
__version__ = geosoft.__version__
def _t(s):
return geosoft.gxpy.system.translate(s)
MAX_TRANSPARENT = 4
class GroupException(geosoft.GXRuntimeError):
"""
Exceptions from :mod:`geosoft.gxpy.group`.
.. versionadded:: 9.2
"""
pass
GROUP_NAME_SIZE = gxv.VIEW_NAME_SIZE
NEW = gxapi.MVIEW_GROUP_NEW
APPEND = gxapi.MVIEW_GROUP_APPEND
READ_ONLY = max(NEW, APPEND) + 1
REPLACE = READ_ONLY + 1
SMOOTH_NONE = gxapi.MVIEW_SMOOTH_NEAREST
SMOOTH_CUBIC = gxapi.MVIEW_SMOOTH_CUBIC
SMOOTH_AKIMA = gxapi.MVIEW_SMOOTH_AKIMA
TILE_RECTANGULAR = gxapi.MVIEW_TILE_RECTANGULAR
TILE_DIAGONAL = gxapi.MVIEW_TILE_DIAGONAL
TILE_TRIANGULAR = gxapi.MVIEW_TILE_TRIANGULAR
TILE_RANDOM = gxapi.MVIEW_TILE_RANDOM
UNIT_VIEW = 0
UNIT_MAP = 2
UNIT_VIEW_UNWARPED = 3
GRATICULE_DOT = 0
GRATICULE_LINE = 1
GRATICULE_CROSS = 2
LINE_STYLE_SOLID = 1
LINE_STYLE_LONG = 2
LINE_STYLE_DOTTED = 3
LINE_STYLE_SHORT = 4
LINE_STYLE_LONG_SHORT_LONG = 5
LINE_STYLE_LONG_DOT_LONG = 6
SYMBOL_NONE = 0
SYMBOL_DOT = 1
SYMBOL_PLUS = 2
SYMBOL_X = 3
SYMBOL_BOX = 4
SYMBOL_TRIANGLE = 5
SYMBOL_INVERTED_TRIANGLE = 6
SYMBOL_HEXAGON = 7
SYMBOL_SMALL_BOX = 8
SYMBOL_SMALL_DIAMOND = 9
SYMBOL_CIRCLE = 20
SYMBOL_3D_SPHERE = 0
SYMBOL_3D_CUBE = 1
SYMBOL_3D_CYLINDER = 2
SYMBOL_3D_CONE = 3
_weight_factor = (1.0 / 48.0, 1.0 / 24.0, 1.0 / 16.0, 1.0 / 12.0, 0.145, 1.0 / 4.0)
FONT_WEIGHT_ULTRALIGHT = 1
FONT_WEIGHT_LIGHT = 2
FONT_WEIGHT_MEDIUM = 3
FONT_WEIGHT_BOLD = 4
FONT_WEIGHT_XBOLD = 5
FONT_WEIGHT_XXBOLD = 6
CMODEL_RGB = 0
CMODEL_CMY = 1
CMODEL_HSV = 2
C_BLACK = 67108863
C_RED = 33554687
C_GREEN = 33619712
C_BLUE = 50266112
C_CYAN = 50331903
C_MAGENTA = 50396928
C_YELLOW = 67043328
C_GREY = 41975936
C_LT_RED = 54542336
C_LT_GREEN = 54526016
C_LT_BLUE = 50348096
C_LT_CYAN = 50331712
C_LT_MAGENTA = 50348032
C_LT_YELLOW = 54525952
C_LT_GREY = 54542400
C_GREY10 = 51910680
C_GREY25 = 54542400
C_GREY50 = 41975936
C_WHITE = 50331648
C_TRANSPARENT = 0
REF_BOTTOM_LEFT = 0
REF_BOTTOM_CENTER = 1
REF_BOTTOM_RIGHT = 2
REF_CENTER_LEFT = 3
REF_CENTER = 4
REF_CENTER_RIGHT = 5
REF_TOP_LEFT = 6
REF_TOP_CENTER = 7
REF_TOP_RIGHT = 8
GROUP_ALL = 0
GROUP_MARKED = 1
GROUP_VISIBLE = 2
GROUP_AGG = 3
GROUP_CSYMB = 4
GROUP_VOXD = 5
LOCATE_FIT = gxapi.MVIEW_RELOCATE_FIT
LOCATE_FIT_KEEP_ASPECT = gxapi.MVIEW_RELOCATE_ASPECT
LOCATE_CENTER = gxapi.MVIEW_RELOCATE_ASPECT_CENTER
COLOR_BAR_RIGHT = 0
COLOR_BAR_LEFT = 1
COLOR_BAR_BOTTOM = 2
COLOR_BAR_TOP = 3
COLOR_BAR_ANNOTATE_RIGHT = 1
COLOR_BAR_ANNOTATE_LEFT = -1
COLOR_BAR_ANNOTATE_TOP = 1
COLOR_BAR_ANNOTATE_BOTTOM = -1
CYLINDER_OPEN = 0
CYLINDER_CLOSE_START = 1
CYLINDER_CLOSE_END = 2
CYLINDER_CLOSE_ALL = 3
POINT_STYLE_DOT = 0
POINT_STYLE_SPHERE = 1
LINE3D_STYLE_LINE = 0
LINE3D_STYLE_TUBE = 1
LINE3D_STYLE_TUBE_JOINED = 2
SURFACE_FLAT = gxapi.MVIEW_DRAWOBJ3D_MODE_FLAT
SURFACE_SMOOTH = gxapi.MVIEW_DRAWOBJ3D_MODE_SMOOTH
_uom_attr = '/geosoft/data/unit_of_measure'
def face_normals_np(faces, verticies):
"""
Return normals of the verticies based on tringular faces, assuming right-hand
winding of vertex for each face.
:param faces: faces as array of triangle indexes into verticies, shaped (-1, 3)
:param verticies: verticies as array of (x, y, z) shaped (-1, 3)
:return: face normals shaped (-1, 3)
The normal of a zero area face will be np.nan
.. versionadded:: 9.3.1
"""
tris = verticies[faces]
n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 1])
return gxu.vector_normalize(n)
def vertex_normals_np(faces, verticies, normal_area=True):
"""
Return normals of the verticies based on tringular faces, assuming right-hand
winding of vertex for each face.
:param faces: faces as array of triangle indexes into verticies, shaped (-1, 3)
:param verticies: verticies as array of (x, y, z) shaped (-1, 3)
:param normal_area: True to weight normals by the area of the connected faces.
:return: vertex normals shaped (-1, 3)
.. versionadded:: 9.3.1
"""
n = face_normals_np(faces, verticies)
if not normal_area:
n = gxu.vector_normalize(n)
normals = np.zeros(verticies.shape, dtype=np.float64)
normals[faces[:, 0]] += n
normals[faces[:, 1]] += n
normals[faces[:, 2]] += n
return gxu.vector_normalize(normals)
def vertex_normals_vv(faces, verticies, normal_area=True):
"""
Return normals of the verticies based on tringular faces, assuming right-hand
winding of vertex for each face.
:param faces: (i1, i2, i3) `geosoft.gxpy.vv.GXvv` faces as array of triangle indexes into verticies
:param verticies: (vx, vy, vz) `geosoft.gxpy.vv.GXvv` verticies
:param normal_area: True to weight normals by the area of the connected faces.
:return: (nx, ny, nz) `geosoft.gxpy.vv.GXvv` normals
.. versionadded:: 9.3.1
"""
faces = gxvv.np_from_vvset(faces)
verticies = gxvv.np_from_vvset(verticies)
n = vertex_normals_np(faces, verticies, normal_area=normal_area)
return gxvv.GXvv(n[:, 0]), gxvv.GXvv(n[:, 1]), gxvv.GXvv(n[:, 2])
def color_from_string(cstr):
"""
Return a Geosoft color number from a color string.
:param cstr: color string (see below)
:returns: color
Colour strings may be "R", "G", "B", "C", "M", "Y",
"H", "S", "V", or "K" or a combination of these
characters, each followed by up to three digits
specifying a number between 0 and 255.
An empty string will produce C_ANY_NONE.
You must stay in the same color model, RGB, CMY,
HSV or K.
For example "R", "R127G22", "H255S127V32"
Characters are not case sensitive.
.. versionadded:: 9.3
"""
return gxapi.GXMVIEW.color(str(cstr))
def edge_reference(area, reference):
"""
Location of a reference point of an area.
:param area: :class:`Point2` instance, or (x0, y0, x1, y1)
:param reference: reference point relative to the clip limits of the view to
which reference location. The points are:
::
6 7 8 top left, center, right
3 4 5 middle left, center, right
0 1 2 bottom left, center, right
:returns: Point desired reference location as a Point
.. versionadded:: 9.2
"""
if not isinstance(area, gxgm.Point2):
area = gxgm.Point2(area)
centroid = area.centroid
half_dim = gxgm.Point(area.dimension) * 0.5
xoff = yoff = 0.0
if reference in (0, 1, 2):
yoff = -half_dim.y
elif reference in (6, 7, 8):
yoff = half_dim.y
if reference in (0, 3, 6):
xoff = -half_dim.x
elif reference in (2, 5, 8):
xoff = half_dim.x
return centroid + gxgm.Point((xoff, yoff))
class Group:
"""
Geosoft group class.
:parameters:
:view: gxpy.View
:name: group name, default is "_".
:plane: plane number, or plane name if drawing to a 3D view. Default is plane number 0.
:view_lock: True to lock the view for a single-stream drawing group. Default is False.
:unit_of_measure: unit of measurement for data in this group, default is ''
:group_3d: True for a 3D drawing group, default assumes a 2D drawing group to a plane.
:mode: `APPEND` (default), `NEW` or `READ_ONLY`
:Properties:
:view: the :class:`geosoft.gxpy.view.View` instance that contains this group
:name: the name of the group
:unit_of_measure: the unit of measurement (uom) for this data in this group
:name_uom: uom decorated group name as it appears in a view
:extent: extent of the group in view units
:extent_map_cm: extent of the group in map cm
:drawing_coordinate_system: the coordinate system of drawing coordinates. Setting to None will reset drawing
coordinates to the view cs. If `drawing_coordinate_system` is set to some other cs the
drawing coordinates will be transformed into the view cs.
.. versionadded:: 9.2
.. versionchanged:: 9.3 added support for `unit_of_measure`
.. versionchanged:: 9.3.1 added mode=REPLACE and changed mode=NEW to always create a new unique group.
"""
def __enter__(self):
return self
def __exit__(self, xtype, xvalue, xtraceback):
self.__del__()
def __del__(self):
if hasattr(self, '_close'):
self._close()
def _close(self):
if hasattr(self, '_open'):
if self._open:
try:
self._drawing_coordinate_system = None
self._pen = None
self._text_def = None
# write metadata
if self._new_meta:
bf = gxapi.GXBF.create("", gxapi.BF_READWRITE_NEW)
try:
self._meta.gxmeta.serial(bf)
bf.seek(0, gxapi.BF_SEEK_START)
self.view.gxview.write_group_storage(self.number, "Geosoft_META", bf)
finally:
del bf
finally:
self._view.lock = False
self._view = None
self._open = False
self._meta = None
self._new_meta = False
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __str__(self):
if self.view.is_3d and not self.group_3d:
return '{}/{}/{}'.format(self.name, self.view.current_3d_drawing_plane, self.view.name)
return '{}/{}'.format(self.name, self.view.name)
def __init__(self,
view,
name='_',
plane=None,
view_lock=False,
mode=APPEND,
unit_of_measure='',
group_3d=False):
if (len(name) == 0) or (name == view.name):
name = name + '_'
_lock = threading.Lock()
_lock.acquire()
try:
if view.lock:
raise GroupException(_t('This view is locked by group {}.'.format(view.lock)))
if view_lock:
view.lock = name
finally:
_lock.release()
self.group_3d = False
if view.is_3d:
self.group_3d = group_3d
if not group_3d:
# setup a 2D drawing plane for this 2D group
if plane is None:
if view.current_3d_drawing_plane:
plane = view.current_3d_drawing_plane
else:
plane = 'Plane'
view.current_3d_drawing_plane = plane
self._view = view
self._name = name
self._mode = mode
self._new_meta = False
self._meta = None
if mode == REPLACE:
if self.view.gxview.exist_group(name):
self.view.delete_group(name)
elif mode == NEW:
# if the group exists, find a new unique group name
if self.view.gxview.exist_group(name):
self._name = gxu.unique_name(name, self.view.gxview.exist_group, separator='_')
elif self.view.gxview.exist_group(self.name):
group_number = self.view.gxview.find_group(self.name)
if self.view.gxview.group_storage_exists(group_number, "Geosoft_META"):
bf = self.view.gxview.read_group_storage(group_number, "Geosoft_META")
if bf.size():
try:
self._meta = gxmeta.Metadata(gxapi.GXMETA.create_s(bf))
finally:
del bf
if unit_of_measure:
self.unit_of_measure = unit_of_measure
self._view.gxview.start_group(self.name, mode)
self._open = True
def close(self):
""" Close the group, unlocks the view"""
self._close()
@property
def guid(self):
"""
The group GUID.
.. versionadded:: 9.3
"""
sr = gxapi.str_ref()
self.view.gxview.get_group_guid(self.number, sr)
return sr.value
@property
def view(self):
"""view that contains this group."""
return self._view
@property
def name(self):
"""group name"""
return self._name
@property
def drawing_plane(self):
""" drawing plane of this group, None for a group in a 2D view."""
if self.view.is_3d:
return self.view.current_3d_drawing_plane
else:
return None
@property
def unit_of_measure(self):
"""
Unit of measure for scalar data contained in this group. This is only relevant
for groups that contain scalar data, such as a Colour_symbols_group. For
the spatial unit_of_measure use :attr:`drawing_coordinate_system.unit_of_measure`
Can be set.
..versionadded:: 9.3
"""
gxm = self.gx_metadata
if gxm.has_attribute(_uom_attr):
return gxm.get_attribute(_uom_attr)
return ''
@unit_of_measure.setter
def unit_of_measure(self, uom):
gxm = self.gx_metadata
gxm.set_attribute(_uom_attr, str(uom))
self.gx_metadata = gxm
@property
def number(self):
"""group number in the view"""
return self.view.gxview.find_group(self.name)
def _extent(self, unit=UNIT_VIEW):
xmin = gxapi.float_ref()
ymin = gxapi.float_ref()
xmax = gxapi.float_ref()
ymax = gxapi.float_ref()
self.view.gxview.get_group_extent(self.name, xmin, ymin, xmax, ymax, unit)
return xmin.value, ymin.value, xmax.value, ymax.value
@property
def extent(self):
"""group extent as (xmin, ymin, xmax, ymax) in view units"""
return self._extent(UNIT_VIEW)
@property
def visible(self):
"""True if group is visible, can be set."""
return self.name in self.view.group_list_visible
@visible.setter
def visible(self, visibility):
if self.visible != visibility:
marked = self.view.group_list_marked
self.view.gxview.mark_all_groups(0)
self.view.gxview.mark_group(self.name, 1)
if visibility is True:
self.view.gxview.hide_marked_groups(0)
else:
self.view.gxview.hide_marked_groups(1)
self.view.gxview.mark_all_groups(0)
for g in marked:
self.view.gxview.mark_group(g, 1)
def extent_map_cm(self, extent=None):
"""
Return an extent in map cm.
:param extent: an extent in view units as a tuple (xmin, ymin, xmax, ymax), Default is the group extent.
.. versionadded:: 9.2
"""
if extent is None:
extent = self.extent
xmin, ymin = self.view.view_to_map_cm(extent[0], extent[1])
xmax, ymax = self.view.view_to_map_cm(extent[2], extent[3])
return xmin, ymin, xmax, ymax
def locate(self, location, reference=REF_CENTER):
"""
Locate the group relative to a point.
:param location: location (x, y) or a `geosoft.gxpy.geometry.Point`
:param reference: reference point relative to the clip limits of the view to
which reference location. The points are:
::
6 7 8 top left, center, right
3 4 5 center left, center, right
0 1 2 bottom left, center, right
.. versionadded:: 9.2
"""
area = gxgm.Point2(self.extent)
area -= area.centroid
area -= edge_reference(area, reference)
area += location
self.view.gxview.relocate_group(self.name,
area.p0.x, area.p0.y, area.p1.x, area.p1.y,
gxapi.MVIEW_RELOCATE_ASPECT_CENTER)
@property
def gx_metadata(self):
"""
The group metadata as a Geosoft `geosoft.gxpy.metadata.Metadata` instance. This metadata
may contain standard Geosoft metadata, such as unit_of_measure for data contained in the group,
and you can add your own metadata spexific to your application. See `geosoft.gxpy.metadata.Metadata`
for information about working with metadata.
Can be set, in which case the metadata is replaced by the new metadata. Normally you will get the current
metadata, add to or modify, then set it back.
.. versionadded:: 9.3
"""
if self._meta:
return self._meta
else:
return gxmeta.Metadata()
@gx_metadata.setter
def gx_metadata(self, meta):
self._new_meta = True
self._meta = meta
def _draw(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._mode == READ_ONLY:
raise _t('This view is read-only.')
if not self._pen:
self._init_pen()
if 'pen' in kwargs:
cur_pen = self.pen
try:
self.pen = kwargs.pop('pen')
func(self, *args, **kwargs)
finally:
self.pen = cur_pen
else:
func(self, *args, **kwargs)
return wrapper
def _make_point(p):
if isinstance(p, gxgm.Point):
return p
else:
return gxgm.Point(p)
def _make_point2(p2):
if isinstance(p2, gxgm.Point2):
return p2
else:
return gxgm.Point2(p2)
def _make_ppoint(p):
if isinstance(p, gxgm.PPoint):
return p
else:
return gxgm.PPoint(p)
class Draw(Group):
"""
Create (start) a drawing group for 2D drawing elements.
On a 3D view, 2D drawing elements are placed on the default drawing plane.
Drawing groups will lock the view such that only one drawing group can be instantiated at a time.
Use `with Draw() as group:` to ensure correct unlocking when complete.
Inherits from the `Group` base class. See `Group` arguments.
"""
def __init__(self, *args, **kwargs):
kwargs['view_lock'] = True
super().__init__(*args, **kwargs)
self._pen = None
self._text_def = None
self._drawing_coordinate_system = None
if self._mode != READ_ONLY:
self._init_pen()
self._text_def = Text_def(factor=self.view.units_per_map_cm)
def _set_dot_symbol(self):
# this is a hack because we cannot draw a box or a zero-length line, so
# instead we draw a filled box
self.view.gxview.symb_number(4)
self.view.gxview.symb_color(0)
self.view.gxview.symb_fill_color(self.pen.line_color.int_value)
self.view.gxview.symb_size(self.pen.line_thick)
@property
def group_opacity(self):
"""
Group opacity setting. Can be set
:return: opacity 0. to 1. (opaque)
.. versionadded 9.3.1
"""
fref = gxapi.float_ref()
self.view.gxview.get_group_transparency(self.name, fref)
return fref.value
@group_opacity.setter
def group_opacity(self, op):
self.view.gxview.set_group_transparency(self.name, min(max(float(op), 0.), 1.))
@property
def drawing_coordinate_system(self):
"""
The coordinate of incoming spatial data, which are converted to the coordinate system of the
view. This is normally the same as the view coordinate system, but it can be set to a different
coordinate system to have automatic reprojection occur during drawing.
"""
if self._drawing_coordinate_system is None:
return self.view.coordinate_system
return self._drawing_coordinate_system
@drawing_coordinate_system.setter
def drawing_coordinate_system(self, cs):
if cs is None:
self.view.gxview.set_user_ipj(self.view.coordinate_system.gxipj)
self._drawing_coordinate_system = None
else:
self._drawing_coordinate_system = gxcs.Coordinate_system(cs)
self.view.gxview.set_user_ipj(self._drawing_coordinate_system.gxipj)
@property
def pen(self):
"""the current drawing pen as a :class:`Pen` instance"""
return self._pen
@pen.setter
def pen(self, pen):
if self._mode == READ_ONLY:
raise _t('This view is read-only.')
if type(pen) is str:
pen = Pen.from_mapplot_string(pen)
if self._pen.line_color != pen.line_color:
self.view.gxview.line_color(pen.line_color.int_value)
if self._pen.line_thick != pen.line_thick:
self.view.gxview.line_thick(pen.line_thick)
if self._pen.line_smooth != pen.line_smooth:
self.view.gxview.line_smooth(pen.line_smooth)
if (self._pen.line_style != pen.line_style) or (self._pen.line_pitch != pen.line_pitch):
self.view.gxview.line_style(pen.line_style, pen.line_pitch)
if self._pen.fill_color != pen.fill_color:
self.view.gxview.fill_color(pen.fill_color.int_value)
if self._pen.pat_number != pen.pat_number:
self.view.gxview.pat_number(pen.pat_number)
if self._pen.pat_angle != pen.pat_angle:
self.view.gxview.pat_angle(pen.pat_angle)
if self._pen.pat_density != pen.pat_density:
self.view.gxview.pat_density(pen.pat_density)
if self._pen.pat_size != pen.pat_size:
self.view.gxview.pat_size(pen.pat_size)
if self._pen.pat_style != pen.pat_style:
self.view.gxview.pat_style(pen.pat_style)
if self._pen.pat_thick != pen.pat_thick:
self.view.gxview.pat_thick(pen.pat_thick)
self._pen = pen
def _init_pen(self):
scm = self.view.units_per_map_cm
pen = Pen(line_thick=0.02 * scm,
line_pitch=0.5 * scm,
pat_size=0.25 * scm,
pat_thick=0.02 * scm)
self.view.gxview.line_color(pen.line_color.int_value)
self.view.gxview.line_thick(pen.line_thick)
self.view.gxview.line_smooth(pen.line_smooth)
self.view.gxview.line_style(pen.line_style, pen.line_pitch)
self.view.gxview.fill_color(pen.fill_color.int_value)
self.view.gxview.pat_number(pen.pat_number)
self.view.gxview.pat_angle(pen.pat_angle)
self.view.gxview.pat_density(pen.pat_density)
self.view.gxview.pat_size(pen.pat_size)
self.view.gxview.pat_style(pen.pat_style)
self.view.gxview.pat_thick(pen.pat_thick)
self._pen = pen
def new_pen(self, **kwargs):
"""
Returns a pen that inherits default from the current view pen. Arguments are the same
as the `Pen` constructor. This using this ensures that default sizing of view unit-based
dimensions (such as `line_thick`) are not lost when new pens are created.
:param kwargs: see :class:`Pen`
:returns: :class:`Pen` instance
.. versionadded:: 9.2
"""
return Pen(default=self.pen, **kwargs)
@property
def text_def(self):
"""the current text definition as a :class:`Text_def` instance, can be set."""
return self._text_def
@text_def.setter
def text_def(self, text_def):
if self._mode == READ_ONLY:
raise _t('This view is read-only.')
if self._text_def != text_def:
self._text_def = text_def
self.view.gxview.text_font(text_def.font,
text_def.gfn,
text_def.weight,
text_def.italics)
self.view.gxview.text_size(text_def.height)
self.view.gxview.text_color(text_def.color.int_value)
def text_extent(self, str, text_def=None):
"""
Return the extent of a text string in view units relative to the current
text `text_def` setting, or the specified `text_def` setting.
:param str: text string
:param text_def: `text_def` instance, None for the current setting
:return: `geosoft.geometry.Point2` instance
.. versionadded:: 9.4
"""
x0 = gxapi.float_ref()
y0 = gxapi.float_ref()
x1 = gxapi.float_ref()
y1 = gxapi.float_ref()
if text_def:
cur_text = self._text_def
self.text_def = text_def
else:
cur_text = None
self.view.gxview.measure_text(str, x0, y0, x1, y1)
if cur_text:
self.text_def = cur_text
return gxgm.Point2(((x0.value, y0.value), (x1.value, y1.value)),
coordinate_system=self.view.coordinate_system)
@_draw
def point(self, p):
"""
Draw a point.
:param p: point location as `geosoft.gxpy.geometry.Point`
.. versionadded:: 9.3
"""
# just draw a box. TODO: MVIEW needs a way to draw a dot, and/or address issue #44
self._set_dot_symbol()
self.view.gxview.symbol(p.x, p.y)
@_draw
def polypoint(self, pp):
"""
Draw many points.
:param pp: point location as `geosoft.gxpy.geometry.PPoint`, or a pair of VVs (vvx, vvy), or
something that `gxpy.geometry.PPoint` can construct into a PP.
.. versionadded:: 9.3
"""
self._set_dot_symbol()
if not((len(pp) == 2) and isinstance(pp[0], gxvv.GXvv)):
pp = _make_ppoint(pp)
pp = (gxvv.GXvv(pp.x), gxvv.GXvv(pp.y))
self.view.gxview.symbols(pp[0].gxvv, pp[1].gxvv)
@_draw
def line(self, p2):
"""
Draw a line on the current plane
:param p2: :class:`geometry.Point2`, or (p1, p2)
.. versionadded:: 9.2
"""
p2 = _make_point2(p2)
self.view.gxview.line(p2.p0.x, p2.p0.y, p2.p1.x, p2.p1.y)
@_draw
def polyline(self, pp, close=False):
"""
Draw a polyline the current plane
:param pp: `geosoft.gxpy.geometry.PPoint` instance or something that can be constructed, or a
pair of `geosoft.gxpy.vv.GXvv` (xvv, yvv)
:param close: if True, draw a polygon, default is a polyline
.. note::
Smooth-line polygons must have at least 6 points for the closure to
appear continuous.
.. versionadded:: 9.2
"""
if not((len(pp) == 2) and isinstance(pp[0], gxvv.GXvv)):
pp = _make_ppoint(pp)
pp = (gxvv.GXvv(pp.x), gxvv.GXvv(pp.y))
if close:
self.view.gxview.poly_line(gxapi.MVIEW_DRAW_POLYGON, pp[0].gxvv, pp[1].gxvv)
else:
self.view.gxview.poly_line(gxapi.MVIEW_DRAW_POLYLINE, pp[0].gxvv, pp[1].gxvv)
@_draw
def polygon(self, pp):
"""
Draw a polygon on the current plane.
:param pp: :class:`geosoft.gxpy.geometry.PPoint`
.. note::
Smooth-line polygons must have at least 6 points for the closure to
appear continuous.
.. versionadded:: 9.2
"""
self.polyline(pp, True)
@_draw
def rectangle(self, p2):
"""
Draw a 2D rectangle on the current plane
:param p2: geometry.Point2, or (p1, p2), or (x0, y0, x2, y2)
.. versionadded:: 9.2
"""
p2 = _make_point2(p2)
self.view.gxview.rectangle(p2.p0.x, p2.p0.y, p2.p1.x, p2.p1.y)
@_draw
def graticule(self, dx=None, dy=None, ddx=None, ddy=None, style=GRATICULE_LINE):
"""
Draw a graticule reference on a view.
:param style: `GRATICULE_LINE`, `GRATICULE_CROSS` or `GRATICULE_DOT`
:param dx: vertical line separation
:param dy: horizontal line separation
:param ddx: horizontal cross size for `GRATICULE_CROSS`
:param ddy: vertical cross size for `GRATICULE_CROSS`
.. versionadded:: 9.2
"""
ext = self.extent
if dx is None:
dx = (ext[2] - ext[0]) * 0.2
ddx = dx * 0.25
if dy is None:
dy = (ext[3] - ext[1]) * 0.2
ddy = dy * 0.25
if ddy is None:
ddy = dy * 0.25
if ddx is None:
ddx = dx * 0.25
self.view.gxview.grid(dx, dy, ddx, ddy, style)
def text(self,
text,
location=(0, 0),
reference=REF_BOTTOM_LEFT,
angle=0.,
text_def=None):
"""
Draw text in the view.
:param text: text string. Use line-feed characters for multi-line text.
:param location: (x, y) or a `gxpy.geomerty.Point` location
:param reference: Text justification point relative text outline box.
The points are:
::
6 7 8 top left, center, right
3 4 5 middle left, center, right
0 1 2 bottom left, center, right
:param angle: baseline angle in degrees clockwise
:param text_def: text definition, if not set the current definition is used
.. versionadded:: 9.2
"""
if text_def:
cur_text = self._text_def
self.text_def = text_def
else:
cur_text = None
self.view.gxview.text_ref(reference)
self.view.gxview.text_angle(angle)
if not isinstance(location, gxgm.Point):
location = gxgm.Point(location)
self.view.gxview.text(text, location.x, location.y)
if cur_text:
self.text_def = cur_text
def contour(self, grid_file_name, parameters=None):
"""
Draw contours for a grid file.
:param grid_file_name: Grid file name
:param parameters: contour parameters, None for default contouring.
Parameters can be provided as a list of strings that correspond the contouring control file
(starting at control file line 2) as defined on the Geosoft Desktop help topic 'CONTCON'. The first
'MDF' line, which is used to specify the map scale and drawing location, is not required as the scale
and location is fixed by the view.
Following are the control file parameters names as they would appear in a text control file:
.. code::
border, lowtic, smooth, suppop, nth, gtitle / 'general': {}
pdef, ptick, pxval, pframe, psidel / 'special': {}
hlb, htl, hcr, htk, hxv, hsl / 'text': {}
ominl,cminl,skipa,skipb,skipc,skipd,skipe,skipf / 'line': {}
xval, digxv, minxv, markxv / 'hilo': {}
levopt, conbeg, conend, lbup, lbmult, logopt / 'levels': {}
cint,lintyp,catt,label,dense,digits,conlo,conhi / 'contours': [{}, {}, ...]
...
... up to 32 contour levels
...
Example parameters as text strings:
====================================== =================================================================
`parameter=` **Outcome**
`None` default contour based on the grid data range
`('','','','','','','10')` multiples of 10
`('','','','','','','10','50','250')` multiples of 10, 50 and 100, default attributes
`('','','','','','0','0,,,0')` single contour (levopt=0) at value 0 (cint=0), no label (label=0)
`('','','','','','0','0,,a=rt500,0')` red 500 micron thick contour at value 0, no label
====================================== =================================================================
Parameters may also be defined in a dictionary using explicit parameter names as shown in the text
control file template above. Each line of parameters is defined by the key name to the right on the `/`,
and the 'contours' entry is a list, and the values are dictionaries of the parameters to be defines.
Parameters that are not defined will have the documented default behaviour.
Example parameters as a dictionary:
================================================= ===========================================
`parameter=` **Outcome**
------------------------------------------------- -------------------------------------------
`{'contours':[{'cint':10}]}` multiples of 10
`{'contours':[{'cint':10},{cint':50}]}` multiples of 10 and 50, default attributes
`{'levels':{'levopt':0},[{'cint':10,'label':0}]}` single contour at data value = 0, no label
================================================= ===========================================
.. versionadded:: 9.2
.. versionadded:: 9.4 added parameter controls
"""
def parms(set_str, keys):
pstr = ''
items = keys.split(',')
if len(set_str):
for k in items:
pstr = pstr + str(set_str.get(k.strip(), '')) + ','
pstr = pstr[:-1] + ' /' + keys
return pstr
scale, ufac, x0, y0 = self.view.mdf()[1]
control_file = gx.gx().temp_file('.con')
with open(control_file, 'w+') as f:
f.write('{},{},{},{} /scale, ufac, x0, y0\n'.format(scale, ufac, x0, y0))
if parameters is None:
f.write(',,-1/\n')
elif isinstance(parameters, dict):
f.write('{}\n'.format(parms(parameters.get('general', {}),
'border, lowtic, smooth, suppop, nth, gtitle')))
f.write('{}\n'.format(parms(parameters.get('special', {}),
'pdef, ptick, pxval, pframe, psidel')))
f.write('{}\n'.format(parms(parameters.get('text', {}),
'hlb, htl, hcr, htk, hxv, hsl')))
f.write('{}\n'.format(parms(parameters.get('line', {}),
'ominl, cminl, skipa, skipb, skipc, skipd, skipe, skipf')))
f.write('{}\n'.format(parms(parameters.get('hilo', {}),
'xval, digxv, minxv, markxv')))
f.write('{}\n'.format(parms(parameters.get('levels', {}),
'levopt, conbeg, conend, lbup, lbmult, logopt')))
contours = parameters.get('contours', [])
if len(contours) == 0:
raise GroupException(_t('No contour levels specified.'))
for con in contours:
f.write('{}\n'.format(parms(con,
'cint, lintyp, catt, label, dense, digits, conlo, conhi')))
else:
for pline in parameters:
f.write(pline + '\n')
geosoft.gxapi.GXMVU.contour(self.view.gxview, control_file, grid_file_name)
gxu.delete_file(control_file)
class Draw_3d(Draw):
"""
Create a 3D drawing group within a 3D view.
3D drawing groups accept 3D drawing objects that can be created using methods of this class.
2D objects can also be drawn to a 3D group and will be placed on the default drawing plane
within the 3D view.
:param render_backfaces: True to turn backface rendering on.
.. versionadded:: 9.2
"""
def __init__(self,
view,
*args,
render_backfaces=False,
**kwargs):
if not isinstance(view, gxv.View_3d):
raise GroupException(_t('View is not 3D'))
kwargs['group_3d'] = True
super().__init__(view, *args, **kwargs)
if render_backfaces:
self.render_backfaces = True
@property
def render_backfaces(self):
"""
True if backface rendering is on, default is off (False).
Backface rendering controls the rendering of parts of solid objects that
would normally be hidden from view. If drawing solid objects that have
an open face, such as cylinders with an open end, backface rendering will be
be turned on. Once on it cannot be turned off for a view.
.. versionadded:: 9.2
"""
return bool(self.view.gxview.get_3d_group_flags(self.number) & 0b1)
@render_backfaces.setter
def render_backfaces(self, setting):
if not setting and self.render_backfaces:
raise GroupException(_t('Once backface rendering is on it cannot be turned off.'))
if not self.render_backfaces:
f3d = (self.view.gxview.get_3d_group_flags(self.number) & 0b11111110) | 0b1
self.view.gxview.set_3d_group_flags(self.number, f3d)
@_draw
def sphere(self, p, radius):
"""
Draw a sphere.
:param p: location as geometry.Point, or (x, y, z)
:param radius: sphere radius
.. versionadded:: 9.2
"""
# solids use the fill color as the object color
fci = self.pen.fill_color.int_value
self.view.gxview.fill_color(self.pen.line_color.int_value)
try:
p = _make_point(p)
self.view.gxview.sphere_3d(p.x, p.y, p.z, radius)
finally:
self.view.gxview.fill_color(fci)
self.view.add_extent(gxgm.Point2((p - radius, p + radius)))
@_draw
def box_3d(self, p2, wireframe=False):
"""
Draw a 3D box
:param p2: box corners as geometry.Point2, or (p0, p1), or (x0, y0, z0, x1, y1, z1)
:param wireframe: True to draw edges only
.. versionadded:: 9.2
"""
# solids use the fill color as the object color
fci = self.pen.fill_color.int_value
self.view.gxview.fill_color(self.pen.line_color.int_value)
pp = _make_point2(p2)
try:
if wireframe:
sq = gxgm.PPoint(((pp.p0.x, pp.p0.y, pp.p0.z),
(pp.p0.x, pp.p1.y, pp.p0.z),
(pp.p1.x, pp.p1.y, pp.p0.z),
(pp.p1.x, pp.p0.y, pp.p0.z),
(pp.p0.x, pp.p0.y, pp.p0.z)))
self.polyline_3d(sq, style=LINE3D_STYLE_TUBE_JOINED)
sq += (0, 0, pp.p1.z - pp.p0.z)
self.polyline_3d(sq, style=LINE3D_STYLE_TUBE_JOINED)
self.cylinder_3d(gxgm.Point2(((pp.p0.x, pp.p0.y, pp.p0.z), (pp.p0.x, pp.p0.y, pp.p1.z))),
radius=self.pen.line_thick * 0.5)
self.cylinder_3d(gxgm.Point2(((pp.p0.x, pp.p1.y, pp.p0.z), (pp.p0.x, pp.p1.y, pp.p1.z))),
radius=self.pen.line_thick * 0.5)
self.cylinder_3d(gxgm.Point2(((pp.p1.x, pp.p1.y, pp.p0.z), (pp.p1.x, pp.p1.y, pp.p1.z))),
radius=self.pen.line_thick * 0.5)
self.cylinder_3d(gxgm.Point2(((pp.p1.x, pp.p0.y, pp.p0.z), (pp.p1.x, pp.p0.y, pp.p1.z))),
radius=self.pen.line_thick * 0.5)
else:
self.view.gxview.box_3d(pp.p0.x, pp.p0.y, pp.p0.z,
pp.p1.x, pp.p1.y, pp.p1.z)
finally:
self.view.gxview.fill_color(fci)
self.view.add_extent(pp.extent)
@_draw
def cylinder_3d(self, p2, radius, r2=None, close=CYLINDER_CLOSE_ALL):
"""
Draw a cylinder.
:param p2: end points as geometry.Point2, or (p0, p1), or (x0, y0, z0, x1, y1, z1)
:param radius: cylinder radius.
:param r2: end radius if different from the start
:param close: one of:
::
CYLINDER_OPEN
CYLINDER_CLOSE_START
CYLINDER_CLOSE_END
CYLINDER_CLOSE_ALL
.. versionadded:: 9.2
"""
# solids use the fill color as the object color
fci = self.pen.fill_color.int_value
self.view.gxview.fill_color(self.pen.line_color.int_value)
if close != CYLINDER_CLOSE_ALL:
self.render_backfaces = True
try:
p2 = _make_point2(p2)
if r2 is None:
r2 = radius
self.view.gxview.cylinder_3d(p2.p0.x, p2.p0.y, p2.p0.z,
p2.p1.x, p2.p1.y, p2.p1.z,
radius, r2,
close)
finally:
self.view.gxview.fill_color(fci)
r = max(radius, r2)
ext = p2.extent
self.view.add_extent(gxgm.Point2((ext.p0 - r, ext.p1 + r)))
@_draw
def cone_3d(self, p2, radius):
"""
Draw a cone.
:param p2: end points as geometry.Point2, or (p0, p1), or (x0, y0, z0, x1, y1, z1).
:param radius: cone base radius, base is as the the first point of p2.
.. versionadded:: 9.2
"""
self.cylinder_3d(p2, radius, r2=0.)
def _poly_3d(self, points, ptype, smooth=gxapi.MVIEW_DRAWOBJ3D_MODE_FLAT):
vvx, vvy, vvz = points.make_xyz_vv()
null_vv = gxapi.GXVV.null()
self.view.gxview.draw_object_3d(ptype, smooth,
vvx.length, 0,
vvx.gxvv, vvy.gxvv, vvz.gxvv,
null_vv, null_vv, null_vv,
null_vv, null_vv, null_vv)
@_draw
def polypoint_3d(self, points, style=POINT_STYLE_DOT):
"""
Draw multiple points.
:param points: points to draw, :class:`geosoft.gxpy.geometry.PPoint` instance, or array-like [x,y,z]
:param style: POINT_STYLE_DOT or POINT_STYLE_SPHERE. Dots are fast and intended for point clouds.
The current pen thickness is used as the sphere sizes.
.. versionadded:: 9.2
"""
points = _make_ppoint(points)
radius = self.pen.line_thick * 0.5
if style == POINT_STYLE_DOT:
self._poly_3d(points, gxapi.MVIEW_DRAWOBJ3D_ENTITY_POINTS)
else:
for i in range(points.length):
self.sphere(points[i], radius=radius)
ext = points.extent
self.view.add_extent(gxgm.Point2((ext.p0 - radius, ext.p1 + radius)))
@_draw
def polyline_3d(self, points, style=LINE3D_STYLE_LINE):
"""
Draw a polyline.
:param points: verticies of the polyline, :class:`geosoft.gxpy.geometry.PPoint` instance, or array-like [x,y,z]
:param style: LINE3D_STYLE_LINE, LINE3D_STYLE_TUBE or LINE3D_STYLE_TUBE_JOINED.
Lines are single-pixel-wide. Tubes have width defined by the pen line thickness.
Joined tubes have a joints and rounded ends.
.. versionadded:: 9.2
"""
points = _make_ppoint(points)
if points.length < 2:
raise GroupException(_t('Need at least two points.'))
radius = self.pen.line_thick * 0.5
if style == LINE3D_STYLE_LINE:
vvx, vvy, vvz = points.make_xyz_vv()
self.view.gxview.poly_line_3d(vvx.gxvv, vvy.gxvv, vvz.gxvv)
else:
self.pen = Pen(fill_color=self.pen.line_color, default=self.pen)
for i in range(points.length-1):
self.cylinder_3d(gxgm.Point2((points[i], points[i+1])), radius=radius)
if style == LINE3D_STYLE_TUBE_JOINED:
for i in range(points.length):
self.sphere(points[i], radius=radius)
ext = points.extent
self.view.add_extent(gxgm.Point2((ext.p0 - radius, ext.p1 + radius)))
def polydata_3d(self,
data,
render_info_func=None,
passback=None):
"""
Create 3D objects rendered using data attributes.
:param data: iterable that yields items passed to your `render_info_func` callback
:param render_info_func: a callback that given `(item, passback)` returns the rendering `(symbol_type,
geometry, color_integer, attribute)`:
================== ======== =============== =========
Symbol Geometry Color Attribute
================== ======== =============== =========
SYMBOL_3D_SPHERE Point Color.int_value radius
SYMBOL_3D_CUBE Point2 Color.int_value None
SYMBOL_3D_CYLINDER Point2 Color.int_value radius
SYMBOL_3D_CONE Point2 Color.int_value radius
================== ======== =============== =========
:param passback: something passed back to your render_info_func function, default None.
**Example**
.. code::
import geosoft.gxpy.geometry as gxgm
import geosof.gxpy.view as gxv
import geosogt.gxpy.group as gxg
def render_spheres(xyz, cmap_radius):
color, radius = cmap_radius
return gxg.SYMBOL_3D_SPHERE, xyz, color.int_value, radius
data = gxgm.PPoint(((5, 5, 5), (7, 5, 5), (7, 7, 7)))
with gxv.View_3d.new('example_polydata') as v:
with gxg.Draw_3d(v, 'red_spheres') as g:
g.polydata_3d(data, render_spheres, (gxg.Color('r'), 0.25))
.. versionadded:: 9.2
"""
cint = None
for item in data:
render = render_info_func(item, passback)
if render:
symbol, geometry, color, attribute = render
if color != cint:
self.view.gxview.fill_color(color)
cint = color
if symbol == SYMBOL_3D_SPHERE:
self.view.gxview.sphere_3d(geometry[0], geometry[1], geometry[2], attribute)
if not isinstance(geometry, gxgm.Geometry):
geometry = gxgm.Point(geometry)
elif symbol == SYMBOL_3D_CUBE:
self.view.gxview.box_3d(geometry.p0.x, geometry.p0.y, geometry.p0.z,
geometry.p1.x, geometry.p1.y, geometry.p1.z)
elif symbol == SYMBOL_3D_CYLINDER:
self.view.gxview.cylinder_3d(geometry.p0.x, geometry.p0.y, geometry.p0.z,
geometry.p1.x, geometry.p1.y, geometry.p1.z,
attribute, attribute, CYLINDER_CLOSE_ALL)
elif symbol == SYMBOL_3D_CONE:
self.view.gxview.cylinder_3d(geometry.p0.x, geometry.p0.y, geometry.p0.z,
geometry.p1.x, geometry.p1.y, geometry.p1.z,
attribute, 0, CYLINDER_CLOSE_ALL)
else:
raise GroupException(_t('Symbol type not implemented'))
if attribute:
e = gxgm.Point2(geometry).extent
self.view.add_extent((e.p0 - attribute, e.p1 + attribute))
else:
self.view.add_extent(geometry.extent)
def _surface(self, faces, verticies, coordinate_system=None):
"""
TODO: awaiting resolution of #73
Draw a surface defined by faces and verticies
:param faces: triangle faces as indexes into verticies, numpy array (n_faces, 3)
:param verticies: verticies, numpy array (n_verticies, 3)
:param coordinate_system: `geosoft.gxpy.Coordinate_system` instance if not in the drawing CS.
.. versionadded:: 9.3.1
"""
n_faces = len(faces)
n_verticies = len(verticies)
if np.nanmax(faces) > n_verticies or np.nanmin(faces) < 0:
raise GroupException(_t('Faces refer to verticies out of range of verticies.'))
# TODO validate buffering and rendering performance once #73 is resolved.
n_buff = 1000
n_faces_written = 0
# normals
normals = vertex_normals_np(faces, verticies)
# coordinate_system
if isinstance(coordinate_system, gxcs.Coordinate_system):
gxipj = coordinate_system.gxipj
else:
gxipj = self.drawing_coordinate_system.gxipj
# TODO: implement variable colour once issue #73 is addressed
# color
color = self.pen.fill_color.int_value
if color == 0:
color = C_GREY
self.render_backfaces = True
while n_faces_written < n_faces:
n_write = min(n_buff, n_faces - n_faces_written)
n_last = n_faces_written + n_write
faces_buff = faces[n_faces_written: n_last]
verticies_buff = verticies[faces_buff].reshape(-1, 3)
vx, vy, vz = gxvv.vvset_from_np(verticies_buff)
vf1, vf2, vf3 = gxvv.vvset_from_np(faces_buff)
nx, ny, nz = gxvv.vvset_from_np(normals[faces_buff].reshape(-1, 3))
self.view.gxview.draw_surface_3d_ex(self.name,
vx.gxvv, vy.gxvv, vz.gxvv,
nx.gxvv, ny.gxvv, nz.gxvv,
gxapi.GXVV.null(), color,
vf1.gxvv, vf2.gxvv, vf3.gxvv,
gxipj)
n_faces_written += n_write
def surface_group_from_file(v3d, file_name, group_name=None, overwrite=False):
"""
Create a 3D surface group from a surface dataset file.
:param v3d: `geosoft.gxpy.view.View_3d` instance
:param file_name: surface dataset file name (extension .geosoft_surface).
See `geosoft.gxpy.surface.SurfaceDataset`.
:param group_name: group name, default is the base file name.
:param overwrite: True to overwrite existing group
.. versionadded:: 9.3.1
"""
if group_name is None:
group_name = os.path.basename(file_name)
group_name = os.path.splitext(group_name)[0]
if v3d.has_group(group_name) and not overwrite:
raise GroupException(_t('Cannot overwrite exing group "{}"').format(group_name))
v3d.gxview.draw_surface_3d_from_file(group_name, file_name)
ext = gxspd.extent_from_metadata_file(file_name)
v3d.add_extent(ext)
def contour(view, group_name, grid_file_name, parameters=None):
"""
Create a contour group from a grid file. A default contour interval is determined from the grid.
:param view: `geosoft.gxpy.view.View` instance
:param group_name: name for the contour group
:param grid_file_name: Grid file name
.. versionadded:: 9.3
"""
with Draw(view, group_name) as g:
g.contour(grid_file_name, parameters=parameters)
def legend_color_bar(view,
group_name,
cmap,
cmap2=None,
bar_location=COLOR_BAR_RIGHT,
location=None,
decimals=None,
annotation_height=0.2,
annotation_offset=None,
annotation_side=COLOR_BAR_ANNOTATE_RIGHT,
box_size=None,
bar_width=None,
max_bar_size=None,
minimum_gap=0,
post_end_values=False,
annotate_vertical=False,
division_line=1,
interval_1=None,
interval_2=None,
title=None):
"""
Draw a color bar legend from :class:Color_map coloring definitions.
:param view: :class:`gxpy.view.View` instance in which to place the bar
:param group_name: name for the color_bar group, overwrites group if it exists.
:param cmap: :class:`Color_map` instance
:param cmap2: optional orthogonal blended :class:`Color_map` instance. If making
a shaded-color legend, provide the shaded color map here.
:param bar_location: one of:
::
COLOR_BAR_RIGHT = 0
COLOR_BAR_LEFT = 1
COLOR_BAR_BOTTOM = 2
COLOR_BAR_TOP = 3
:param location: offset or (x, y) offset from `bar_location` reference point, in cm. The default is
determined to center the bar off the location side specified.
:param decimals: annotation decimal places
:param annotation_height: annotation number height (cm)
:param annotation_offset: offset of annotations from the bar (cm)
:param annotation_side: side of the bar for annotations
::
COLOR_BAR_ANNOTATE_RIGHT = 1
COLOR_BAR_ANNOTATE_LEFT = -1
COLOR_BAR_ANNOTATE_TOP = 1
COLOR_BAR_ANNOTATE_BOTTOM = -1
:param box_size: box size, height for vertical bars, width for horizontal bars
:param bar_width: width of the color boxes, horizontal for vertical bars, vertical for horizontal bars
:param max_bar_size: maximum bar size, default is the size of the view edge
:param minimum_gap: minimum gap to between annotations. Annotations are dropped in necessary.
:param post_end_values: post the maximum and minimum values
:param annotate_vertical: True to orient labels vertically
:param division_line: 0, no division lines, 1 - line, 2 - tick
:param interval_1: Major annotation increment, default annotates everything
:param interval_2: secondary smaller annotations, reduced to 1/10, 1/5, 1/4 or 1/2 of interval_1.
Default chooses something reasonable.
:param title: bar title, use new-lines for sub-titles. Default uses the title and unit_of_measure
from `cmap`.
.. versionadded:: 9.2
"""
# ensure group name is unique in the view
while group_name in view.group_list:
group_name += '_'
# default decimals
if decimals is None:
decimals = 1
minz = maxz = cmap.color_map[0][0]
for c in cmap.color_map:
z = c[0]
if z:
if z < minz:
minz = z
elif z > maxz:
maxz = z
delta = maxz - minz
while delta > 0 and delta < 100:
delta *= 10.
decimals += 1
itr = cmap.gxitr
with Draw(view, group_name) as g:
v_area = gxgm.Point2(view.extent_clip)
v_width = v_area.dimension[0]
v_height = v_area.dimension[1]
if (bar_location == COLOR_BAR_LEFT) or (bar_location == COLOR_BAR_RIGHT):
bar_orient = 0
default_bar_size = v_height * 0.8
if max_bar_size is None:
max_bar_size = v_height
else:
bar_orient = 1
default_bar_size = v_width * 0.8
if max_bar_size is None:
max_bar_size = v_width * 0.8
# bar cell sizing
def_box_size = default_bar_size / itr.get_size()
if box_size is None:
box_size = min(0.4 * view.units_per_map_cm, def_box_size)
else:
box_size *= view.units_per_map_cm
if bar_width is None:
if bar_location in (COLOR_BAR_LEFT, COLOR_BAR_RIGHT):
bar_width = max(0.4 * view.units_per_map_cm, box_size * 2.0)
else:
bar_width = max(0.4 * view.units_per_map_cm, box_size)
else:
bar_width *= view.units_per_map_cm
if max_bar_size is not None:
box_size = min(box_size, max_bar_size / itr.get_size())
annotation_height *= view.units_per_map_cm
if annotation_offset is None:
annotation_offset = annotation_height * 0.5
else:
annotation_offset *= view.units_per_map_cm
annotation_offset *= annotation_side
minimum_gap *= view.units_per_map_cm
cdict = {
"BAR_ORIENTATION": bar_orient,
"DECIMALS": decimals,
'ANNOFF': annotation_offset,
'BOX_SIZE': box_size,
'BAR_WIDTH': bar_width,
'MINIMUM_GAP': minimum_gap,
"X": v_area.centroid.x,
"Y": v_area.centroid.y,
"POST_MAXMIN": 1 if post_end_values else 0,
"LABEL_ORIENTATION": 0 if annotate_vertical else 1,
"DIVISION_STYLE": division_line,
}
if interval_1:
if interval_2 is None:
interval_2 = gxapi.rDUMMY
if interval_2 <= interval_1 / 10.:
interval_2 = interval_1 / 10.
elif interval_2 <= interval_1 / 5.:
interval_2 = interval_1 / 5.
elif interval_2 <= interval_1 / 4.:
interval_2 = interval_1 / 4.
elif interval_2 <= interval_1 / 2.:
interval_2 = interval_1 / 2.
else:
interval_2 = gxapi.rDUMMY
cdict["FIXED_INTERVAL"] = interval_1
cdict["FIXED_MINOR_INTERVAL"] = interval_2
g.text_def = Text_def(height=annotation_height)
if cmap2 is None:
itr2 = gxapi.GXITR.null()
else:
itr2 = cmap2.gxitr
gxapi.GXMVU.color_bar_reg(view.gxview, itr, itr2, gxu.reg_from_dict(cdict, 100, json_encode=False))
if title is None:
if cmap.unit_of_measure:
title = '{}\n({})'.format(cmap.title, cmap.unit_of_measure)
else:
title = cmap.title
if title:
title_height = annotation_height * 1.5
g.text_def = Text_def(height=title_height, weight=FONT_WEIGHT_BOLD)
p = gxgm.Point(edge_reference(gxgm.Point2(g.extent), REF_BOTTOM_CENTER))
p -= (0, title_height * 0.5)
if '\n' in title:
tline = title[:title.index('\n')]
title = title[title.index('\n') + 1:]
else:
tline = title
title = ''
g.text(tline, p, reference=REF_TOP_CENTER)
if title:
g.text_def = Text_def(height=title_height * 0.8, weight=FONT_WEIGHT_LIGHT)
p -= (0, title_height * 1.5)
g.text(title, p, reference=REF_TOP_CENTER)
# locate the bar
default_offset = 1.5 * view.units_per_map_cm
if location and (not hasattr(location, '__iter__')):
default_offset = location * view.units_per_map_cm
location = None
if location is not None:
location = location[0] * view.units_per_map_cm, location[1] * view.units_per_map_cm
area = gxgm.Point2(view.extent_clip)
if bar_location == COLOR_BAR_LEFT:
if location is None:
location = (-default_offset, 0)
xy = edge_reference(area, REF_CENTER_LEFT)
reference = REF_CENTER_RIGHT
elif bar_location == COLOR_BAR_BOTTOM:
if location is None:
location = (0, -default_offset)
xy = edge_reference(area, REF_BOTTOM_CENTER)
reference = REF_TOP_CENTER
elif bar_location == COLOR_BAR_TOP:
if location is None:
location = (0, default_offset)
xy = edge_reference(area, REF_TOP_CENTER)
reference = REF_BOTTOM_CENTER
else: # BAR_RIGHT
if location is None:
location = (default_offset, 0)
xy = edge_reference(area, REF_CENTER_RIGHT)
reference = REF_CENTER_LEFT
location = xy + location
g.locate(location, reference)
class Color:
"""
Colours, which are stored as a 32-bit color integer.
:param color: string descriptor (eg. 'R255G0B125'), color letter R, G, B, C, M, Y, H, S or V.;
tuple (r, g, b), (c, m, y) or (h, s, v), each item defined in the range 0 to 255;
32-bit color number, which can be an item selected from the following list:
::
C_BLACK
C_RED
C_GREEN
C_BLUE
C_CYAN
C_MAGENTA
C_YELLOW
C_GREY
C_LT_RED
C_LT_GREEN
C_LT_BLUE
C_LT_CYAN
C_LT_MAGENTA
C_LT_YELLOW
C_LT_GREY
C_GREY10
C_GREY25
C_GREY50
C_WHITE
C_TRANSPARENT
:param model: model of the tuple:
::
CMODEL_RGB (default)
CMODEL_CMY
CMODEL_HSV
.. versionadded:: 9.2
"""
def __init__(self, color, model=CMODEL_RGB):
if isinstance(color, Color):
self._color = color.int_value
elif isinstance(color, int):
self.int_value = color
elif isinstance(color, str):
self._color = gxapi.GXMVIEW.color(color)
else:
if model == CMODEL_CMY:
self.cmy = color
elif model == CMODEL_HSV:
hue = max(0, min(255, color[0]))
sat = max(0, min(255, color[1]))
val = max(0, min(255, color[2]))
self._color = gxapi.GXMVIEW.color_hsv(hue, sat, val)
else:
self.rgb = color
def __eq__(self, other):
return self.int_value == other.int_value
def __ne__(self, other):
return not self.__eq__(other)
@property
def int_value(self):
""" color as a 32-bit color integer, can be set"""
return self._color
@int_value.setter
def int_value(self, color):
if color < 0:
raise GroupException(_t('Invalid color integer {}, must be >= 0').format(color))
self._color = int(color)
@property
def rgb(self):
"""color as an (red, green, brue) tuple, can be set"""
if self.int_value == 0:
return None
r = gxapi.int_ref()
g = gxapi.int_ref()
b = gxapi.int_ref()
gxapi.GXMVIEW.color2_rgb(self._color, r, g, b)
return r.value, g.value, b.value
@rgb.setter
def rgb(self, rgb):
r = max(min(255, rgb[0]), 0)
g = max(min(255, rgb[1]), 0)
b = max(min(255, rgb[2]), 0)
self._color = gxapi.GXMVIEW.color_rgb(r, g, b)
@property
def cmy(self):
"""color as an (cyan, magenta, yellow) tuple, can be set"""
if self.int_value == 0:
return None
red, green, blue = self.rgb
return 255 - red, 255 - green, 255 - blue
@cmy.setter
def cmy(self, cmy):
self.rgb = (255 - cmy[0], 255 - cmy[1], 255 - cmy[2])
def adjust_brightness(self, brightness):
"""
Return a :class:`Color` instance adjusted for brightness.
.. versionadded:: 9.2
"""
if brightness == 0.:
return self
c, m, y = self.rgb
if brightness > 0.0:
w = round(brightness * 255)
c = max(c - w, 0)
m = max(m - w, 0)
y = max(y - w, 0)
return Color((c, m, y), model=CMODEL_CMY)
else:
k = round(-brightness * 255)
c = max(c + k, 255)
m = max(m + k, 255)
y = max(y + k, 255)
return Color((c, m, y), model=CMODEL_CMY)
def font_weight_from_line_thickness(line_thick, height):
"""
Returns font weight for a text height and line thickness.
:param line_thick: line thickness in same units as the text height
:param height: text height
:returns: one of:
::
FONT_WEIGHT_ULTRALIGHT
FONT_WEIGHT_LIGHT
FONT_WEIGHT_MEDIUM
FONT_WEIGHT_BOLD
FONT_WEIGHT_XBOLD
FONT_WEIGHT_XXBOLD
.. versionadded:: 9.2
"""
if height <= 0.:
return FONT_WEIGHT_ULTRALIGHT
ratio = line_thick / height
fw = 1
for f in _weight_factor:
if ratio <= f:
return fw
fw += 1
return FONT_WEIGHT_MEDIUM
def thickness_from_font_weight(weight, height):
"""
Returns the line thickness appropriate for a text weight.
:param weight: one of:
::
FONT_WEIGHT_ULTRALIGHT
FONT_WEIGHT_LIGHT
FONT_WEIGHT_MEDIUM
FONT_WEIGHT_BOLD
FONT_WEIGHT_XBOLD
FONT_WEIGHT_XXBOLD
:param height: font height
.. versionadded:: 9.2
"""
return height * _weight_factor[weight - 1]
class Text_def:
"""
Text definition:
:param font: font name. TrueType fonts are assumed unless the name ends with '.gfn',
which is a Geosoft gfn font.
:param weight: one of:
::
FONT_WEIGHT_ULTRALIGHT
FONT_WEIGHT_LIGHT
FONT_WEIGHT_MEDIUM
FONT_WEIGHT_BOLD
FONT_WEIGHT_XBOLD
FONT_WEIGHT_XXBOLD
:param line_thick: line thickness from which to determine a weight, which is calculated from the
ratio of line thickness to height.
:param italics: True for italics fonts
:param height: text height, default 0.25
:param factor: default spatial properties are multiplied by this factor. This is useful
for creating text scaled to the units of a view. The default text properties
are scaled to cm.
:Properties:
:height: font height in view units
:font: font name
:weight: font weight, one of FONT_WEIGHT
:line_thick: font line thickness for gfn stroke fonts
:italics: True for italics
:slant: Slant angle for stroke fonts, 0 if normal, 15 for italics
:mapplot_string: mapplot compatible text definition string
.. versionadded:: 9.2
"""
def __init__(self, **kwargs):
self._color = None
self._font = None
self._height = None
self._gfn = None
self._weight = None
self._italics = None
if 'default' in kwargs:
def_pen = kwargs.pop('default')
self.__dict__ = def_pen.__dict__.copy()
else:
self.color = Color(C_BLACK)
self.height = 0.25
self.font = 'DEFAULT'
self.gfn = True
self.weight = None
self.italics = False
factor = kwargs.pop('factor', 1.)
if factor != 1.0:
self.height *= factor
line_thick = None
for k in kwargs:
if k == 'color':
self.color = kwargs[k]
elif k == 'line_thick':
line_thick = kwargs[k]
elif k == 'font':
self.font = kwargs[k]
elif k in self.__dict__:
self.__dict__[k] = kwargs[k]
else:
raise GroupException(_t('Invalid text definition parameter ({})'.format(k)))
if self.weight is None:
if line_thick is None:
self.weight = FONT_WEIGHT_MEDIUM
else:
self.weight = font_weight_from_line_thickness(line_thick, self.height)
def __eq__(self, other):
if hasattr(other, '__dict__'):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return self.__dict__ != other.__dict__
@property
def color(self):
"""text color as a :class:`Color` instance, can be set"""
return self._color
@color.setter
def color(self, color):
if isinstance(color, Color):
self._color = color
else:
self._color = Color(color)
@property
def font(self):
"""text font name, can be set."""
return self._font
@font.setter
def font(self, font):
if font:
if '.gfn' in font.lower():
self.gfn = True
self._font = font.lower().replace('.gfn', '')
else:
self.gfn = False
self._font = font.replace('(TT)', '')
else:
self._font = 'DEFAULT'
self.gfn = True
@property
def line_thick(self):
"""text line thickness determined from the font weight, can be set."""
return thickness_from_font_weight(self.weight, self.height)
@line_thick.setter
def line_thick(self, line_thick):
self.weight = font_weight_from_line_thickness(line_thick, self.height)
@property
def slant(self):
"""text slant, 15 for italics, 0 for not italics, can be set. If set, any slant
greater than 5 will result in a 15 degree slant to create italics."""
if self.italics:
return 15
else:
return 0
@slant.setter
def slant(self, slant):
if slant > 5:
self.italics = True
else:
self.italics = False
@property
def mapplot_string(self):
"""
Mapplot text definition string, assumes scaling in cm.
"""
if 'default' in self._font.lower():
font = 'DEFAULT'
elif not self.gfn:
font = self._font.strip() + '(TT)'
else:
font = self._font
return '{},,,{},"{}"'.format(self.height, self.slant, font)
class Pen:
"""
Geosoft Pen class.
The default dimensioned properties (`line_thick`, `line_pitch`,
`pat_size` and `pat_thick`) assume the view units are cm, and this is usually
only the case for the base view. For views in other units either
explicitly define the dimention in view units, or pass `factor` set
the the view :attr:`geosoft.gxpy.view.View.units_per_map_cm`.
:param line_color: line :class:`Color` instance, default is black
:param fill_color: fill :class:`Color` instance, default is transparent
:param line_thick: line thickness, default is 0.01
:param line_style: line pattern style
::
LINE_STYLE_SOLID (default)
LINE_STYLE_LONG
LINE_STYLE_DOTTED
LINE_STYLE_SHORT
LINE_STYLE_LONG_SHORT_LONG
LINE_STYLE_LONG_DOT_LONG
:param line_pitch: line style pitch, default is 0.5
:param line_smooth: smooth line:
::
SMOOTH_NONE (default)
SMOOTH_AKIMA
SMOOTH_CUBIC
:param pat_number: pattern number for filled patterns (refer to `etc/default.pat`) default 0, flood fill
:param pat_angle: pattern angle, default 0
:param pat_density: pattern density, default 1
:param pat_size: pattern size, default 1.0
:param pat_style: pattern style:
::
TILE_RECTANGULAR (default)
TILE_DIAGONAL
TILE_TRIANGULAR
TILE_RANDOM
:param pat_thick: pattern line thickness, default 0.01
:param default: default :class:`Pen` instance, if specified defaults are established from this
:param factor: default spatial properties are multiplied by this factor. This is useful
for creating pens scaled to the units of a view. The default pen properties
are scaled to cm. Typically you will pass :attr:`geosoft.gxpy.view.View.units_per_map_cm`.
.. versionadded: 9.2
"""
def __init__(self, **kwargs):
self._line_color = None
self._line_thick = None
self._line_style = None
self._line_pitch = None
self._line_smooth = None
self._fill_color = None
self._pat_number = None
self._pat_angle = None
self._pat_density = None
self._pat_size = None
self._pat_style = None
self.__pat_thick = None
if 'default' in kwargs:
def_pen = kwargs.pop('default')
self.__dict__ = def_pen.__dict__.copy()
else:
self.line_color = Color(C_BLACK)
self.line_thick = 0.01
self.line_style = LINE_STYLE_SOLID
self.line_pitch = 0.5
self.line_smooth = SMOOTH_NONE
self.fill_color = Color(C_TRANSPARENT)
self.pat_number = 0
self.pat_angle = 0
self.pat_density = 1
self.pat_size = 1
self.pat_style = TILE_RECTANGULAR
self.pat_thick = self.line_thick
factor = kwargs.pop('factor', 1.)
if factor != 1.0:
self.line_thick *= factor
self.line_pitch *= factor
self.pat_size *= factor
self.pat_thick *= factor
for k in kwargs:
if k == 'line_color':
self.line_color = kwargs[k]
elif k == 'fill_color':
self.fill_color = kwargs[k]
elif k in self.__dict__:
self.__dict__[k] = kwargs[k]
else:
raise GroupException(_t('Invalid pen parameter ({})'.format(k)))
@classmethod
def from_mapplot_string(cls, cstr):
"""
Create a :class:`Pen` instance from a mapplot-style string descriptor using either a
krgbKRGB or kcmyKCMY color model. Lower case letters indicate line color, uppercase
indicates fill color, 'k', 'K' for black. Each letter may be followed by an intensity
between 0 and 255. If an intensity is not specified 255 is assumed.
Line thickness can be defined by 't' followed by a thickness in 1000'th of the view unit,
which for the default 'base' view would be microns.
:param cstr: mapplot-style color definition
Examples:
=========== ==============================================
'r' red line
'R' red fill
'rG64' red line, light-green fill
'c64' light cyan line, equivalent to 'R191G255B255'
'c64K96' light cyan line, light-grey fill
'bt500' blue line, 0.5 units thick
=========== ==============================================
.. versionadded:: 9.2
"""
def color_model(colstr):
s = colstr.lower()
for c in 'cmy':
if c in s:
return 'cmyk'
return 'rgbk'
def get_part(colstr, c, default=255):
if c not in colstr:
return 0
start = colstr.index(c)
end = start + 1
for c in colstr[end:]:
if not (c in '0123456789'):
break
end += 1
if end == start + 1:
return default
return int(colstr[start + 1:end])
def add_k(c, k):
return max(c[0] - k, 0), max(c[1] - k, 0), max(c[2] - k, 0)
def has_color(colstr, cc):
for c in cc:
if c in colstr:
return True
return False
def color(colstr, cc):
if has_color(colstr, cc):
k = get_part(colstr, cc[3])
if has_color(colstr, cc[:3]):
if model[0] == 'c' or model[0] == 'C':
return add_k((255 - get_part(colstr, cc[0]),
255 - get_part(colstr, cc[1]),
255 - get_part(colstr, cc[2])),
k)
else:
return add_k((get_part(colstr, cc[0]),
get_part(colstr, cc[1]),
get_part(colstr, cc[2])),
k)
else:
return add_k((255, 255, 255), k)
else:
return C_TRANSPARENT
model = color_model(cstr)
line_color = color(cstr, model)
fill_color = color(cstr, model.upper())
line_thick = max(1, get_part(cstr, 't', 1)) * 0.001
return cls(line_color=line_color, fill_color=fill_color, line_thick=line_thick)
def __eq__(self, other):
for k, v in self.__dict__.items():
if other.__dict__[k] != v:
return False
return True
@property
def line_color(self):
"""pen line color as a :class:`color` instance, can be set."""
return self._line_color
@line_color.setter
def line_color(self, color):
if isinstance(color, Color):
self._line_color = color
else:
self._line_color = Color(color)
@property
def fill_color(self):
return self._fill_color
@fill_color.setter
def fill_color(self, color):
"""pen fill color as a :class:`color` instance, can be set."""
if isinstance(color, Color):
self._fill_color = color
else:
self._fill_color = Color(color)
@property
def mapplot_string(self):
"""line/fill colour and thickness string suing mapplor format, eg. 'kR125B64t1000'"""
s = ''
if self._line_color.int_value != C_TRANSPARENT:
if self._line_color.int_value == C_BLACK:
s += 'k'
else:
c = self._line_color.rgb
s += 'r{}g{}b{}'.format(c[0], c[1], c[2])
if self._fill_color.int_value != C_TRANSPARENT:
if self._line_color.int_value == C_BLACK:
s += 'K'
else:
c = self._fill_color.rgb
s += 'R{}G{}B{}'.format(c[0], c[1], c[2])
return s + 't{}'.format(int(self.line_thick * 1000.))
class Color_symbols_group(Group):
"""
Data represented as colored symbols based on a :class:`Color_map`.
:Constructors:
============ =======================================
:func:`new` create a new symbol group in a view
:func:`open` open an existing symbol group in a view
============ =======================================
"""
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
def __del__(self):
if hasattr(self, '_gxcsymb'):
self._gxcsymb = None
if hasattr(self, '_close'):
self._close()
def __init__(self, view, group_name, **kwargs):
self._gxcsymb = None
super().__init__(view, group_name, **kwargs)
@classmethod
def new(cls,
view,
name,
data,
color_map,
symbol_def=None,
symbol=SYMBOL_CIRCLE,
mode=REPLACE,
**kwargs):
"""
Create a new color symbols group with color mapping. If the group exists a new unique name is
constructed.
:param view: the view in which to place the group
:param name: group name
:param data: 2d numpy data array [[x, y, value], ...] or an iterable that yields
`((x, y), value)`, or `((x, y, z), value, ...)`. Only the first `value` is used,
an in the case of an iterable that yields (x, y, z) the z is ignored.
:param color_map: symbol fill color :class:`Color_map`.
Symbols are filled with the color lookup using `data`.
:param symbol_def: :class:`Text_def` defines the symbol font to use, normally
`symbols.gfn` is expected, and if used the symbols defined by the `SYMBOL` manifest
are valid. For other fonts you will get the symbol requested. The default is
`Text_def(font='symbols.gfn', color='k', weight=FONT_WEIGHT_ULTRALIGHT)`
:param symbol: the symbol to plot, normally one of `SYMBOL`.
:param mode: REPLACE (default) or NEW, which creates a new unique name if group exists
:return: :class:`Color_symbols_group` instance
.. versionadded:: 9.2
.. versionchanged:: 9.4 added support for passing data as a 2d numpy array
"""
def valid(xyd):
if xyd[0][0] is None or xyd[0][1] is None or xyd[1] is None:
return False
return True
cs = cls(view, name, mode=mode, **kwargs)
cs._gxcsymb = gxapi.GXCSYMB.create(color_map.save_file())
if symbol_def is None:
symbol_def = Text_def(font='geosoft.gfn',
height=(0.25 * view.units_per_map_cm),
weight=FONT_WEIGHT_ULTRALIGHT,
color=C_BLACK)
cs._gxcsymb.set_font(symbol_def.font, symbol_def.gfn, symbol_def.weight, symbol_def.italics)
cs._gxcsymb.set_static_col(symbol_def.color.int_value, 0)
cs._gxcsymb.set_scale(symbol_def.height)
cs._gxcsymb.set_number(symbol)
if isinstance(data, np.ndarray):
if data.ndim != 2 or data.shape[1] < 3:
raise GroupException(_t('data array must have shape (-1, 3)'))
cs._gxcsymb.add_data(gxvv.GXvv(data[:, 0]).gxvv,
gxvv.GXvv(data[:, 1]).gxvv,
gxvv.GXvv(data[:, 2]).gxvv)
else:
xy = gxgm.PPoint([xy[0] for xy in data if valid(xy)])
cs._gxcsymb.add_data(gxvv.GXvv(xy.x).gxvv,
gxvv.GXvv(xy.y).gxvv,
gxvv.GXvv([d[1] for d in data if valid(d)]).gxvv)
view.gxview.col_symbol(cs.name, cs._gxcsymb)
if cs.unit_of_measure:
color_map.unit_of_measure = cs.unit_of_measure
return cs
@classmethod
def open(cls,
view,
group_name):
"""
Open an existing color symbols group.
:param view: view that contains the group
:param group_name: name of the group, which must be a color symbols group
:return: :class:`Color_symbols_group` instance
.. versionadded:: 9.2
"""
cs = cls(view, group_name, mode=READ_ONLY)
group_number = view.gxview.find_group(group_name)
cs._gxcsymb = view.gxview.get_col_symbol(group_number)
return cs
def color_map(self):
"""
Return the :class:`geosoft.gxpy.group.Color_map` of a color symbol group.
.. versionadded:: 9.3
"""
itr = gxapi.GXITR.create()
self._gxcsymb.get_itr(itr)
cmap = geosoft.gxpy.group.Color_map(itr)
cmap.title = self.name
cmap.unit_of_measure = self.unit_of_measure
return cmap
class Aggregate_group(Group):
"""
Aggregate group in a view
:Constructors:
======== ================================
`open()` open an existing aggregate group
`new()` create a new aggregate group
======== ================================
:Properties:
:name: aggregate group name
:agg: :class:`gxpy.agg.Aggregate_image` instance
.. versionadded:: 9.2
"""
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
def __del__(self):
if hasattr(self, 'agg'):
self.agg = None
if hasattr(self, '_close'):
self._close()
def __init__(self, view, group_name, mode):
self.agg = None
super().__init__(view, group_name, mode=mode)
@classmethod
def new(cls, view, agg, name=None, mode=REPLACE, clip=True):
"""
Create a new aggregate group in a view.
:param view: `geosoft.gxpy.view.View` or `geosoft.gxpy.view.View_3d` instance
:param agg: `geosoft.gxpy.agg.Aggregate` instance.
:param name: group name, default is the aggregate name
:param mode: REPLACE (default) or NEW, which creates a unique name if the group exists
:param clip: True to clip the agregare to the view clip limits
.. versionadded:: 9.2
.. versionchanged:: 9.3.1 added clip mode
"""
if name is None:
name = agg.name
agg_group = cls(view, name, mode=mode)
agg_group.agg = agg
view.clip = clip
view.gxview.aggregate(agg.gxagg, agg_group.name)
view.clip = False
return agg_group
@classmethod
def open(cls,
view,
group_name):
"""
Open an existing aggregate group in a view.
:param view: `geosoft.gxpy.view.View` or `geosoft.gxpy.view.View_3d` instance
:param group_name: group name (or number)
.. versionadded:: 9.2
"""
agg_group = cls(view, group_name, mode=READ_ONLY)
if isinstance(group_name, int):
group_number = group_name
else:
group_number = view.gxview.find_group(agg_group.name)
agg_group.agg = gxagg.Aggregate_image.open(view.gxview.get_aggregate(group_number))
return agg_group
class VoxDisplayGroup(Group):
"""
Vox display group in a view. Use class methods `new()` and `open()`
to create instances of `VoxDisplayGroup`.
:Constructors:
======== ==================================
`open()` open an existing vox_display group
`new()` create a new vox_display group
======== ==================================
.. versionadded:: 9.3.1
"""
def __exit__(self, exc_type, exc_val, exc_tb):
self.__del__()
def __del__(self):
if hasattr(self, '_voxd'):
self._voxd = None
if hasattr(self, '_close'):
self._close()
def __init__(self, view3d, group_name, mode=REPLACE):
self._voxd = None
if not view3d.is_3d:
raise GroupException(_t('View must be 3d'))
super().__init__(view3d, group_name, mode=mode)
@classmethod
def new(cls, view3d, voxd, name=None, mode=REPLACE):
"""
Add a VoxDisplay as a new group in the view
:param view3d: `geosoft.gxpy.view.View_3d` instance
:param voxd: `geosoft.gxpy.vox_display.VoxDisplay` instance
:param name: group name, default is the voxd name
:param mode: REPLACE (default) or NEW, which creates a unique name if the group exists
.. versionadded:: 9.3.1
"""
if name is None:
name = voxd.name
voxd_group = cls(view3d, name, mode=mode)
ext = voxd.vox.extent
if voxd.is_vector:
scale, height_base_ratio, max_base_size_ratio, max_cones = voxd.vector_cone_specs
if max_cones is None:
max_cones = gxapi.iDUMMY
minimum_value = voxd.shell_limits[0]
if minimum_value is None:
minimum_value = 0.
view3d.gxview.draw_vector_voxel_vectors(voxd.vox.gxvox,
name,
voxd.color_map.gxitr,
scale,
height_base_ratio,
max_base_size_ratio,
minimum_value,
max_cones)
# add to extent to make room for vectors
cell2 = min(min(voxd.vox.cells_x),
min(voxd.vox.cells_y),
min(voxd.vox.cells_z)) * 4.
ext = gxgm.Point2((ext.p0 - cell2, ext.p1 + cell2))
else:
view3d.gxview.voxd(voxd.gxvoxd, voxd_group.name)
view3d.add_extent(ext)
voxd_group._voxd = voxd
voxd_group.unit_of_measure = voxd.unit_of_measure
return voxd_group
@classmethod
def open(cls,
view,
group_name):
"""
Open an existing `VoxDisplayGroup` in a 3d view.
:param view: the 3d view
:param group_name: the name of the group to open, must be a `gxapi.GXVOXD` or
`gxapi.GXVECTOR3D`.
.. versionadded: 9.3.1
"""
voxd_group = cls(view, group_name, mode=READ_ONLY)
if view.gxview.is_group(group_name, gxapi.MVIEW_IS_VOXD):
voxd_group._voxd = gxvoxd.VoxDisplay.gxapi_gxvoxd(voxd_group.view.gxview.get_voxd(voxd_group.number))
elif view.gxview.is_group(group_name, gxapi.MVIEW_IS_VECTOR3D):
voxd_group._voxd = gxvoxd.VoxDisplay.gxapi_gxvoxd(voxd_group.view.gxview.get_vector_3d(voxd_group.number),
name=group_name + ".geosoft_vectorvoxel")
else:
raise GroupException('Group "{}" is not a GXVOXD or a GXVECTOR3D'.format(group_name))
return voxd_group
@property
def voxd(self):
"""
The `geosoft.gxpy.vox_display.VoxDisplay` for this vox group.
.. versionadded:: 9.3.1
"""
return self._voxd
class Color_map:
"""
Color map for establishing data color mapping for things like aggregates and color symbols.
:param cmap: the name of a Geosoft color map file (`.tbl, .zon, .itr, .agg`) from which to
establish the initial colors. If the file does not have zone values,
which is the case for a `.tbl` file, the Color_map will be uninitialized and you
can use one of the `set` methods to establish zone values.
You can also provide an `int`, which will create an uninitialized map of the the
specified length, or a :class:`geosoft.gxapi.GXITR` instance.
If not specified the Geosoft default color table is used.
:param title: Color map title which is displayed in the color map legend.
:param unit_of_measure: Unit of measure to be displayed in a color map legend.
.. versionadded:: 9.2
.. versionchanged:: 9.3
changed `units` to `unit_of_measure` for consistency across gxpy
"""
def __init__(self, cmap=None, title=None, unit_of_measure=None):
if cmap is None:
sr = gxapi.str_ref()
if gxapi.GXSYS.global_('MONTAJ.DEFAULT_COLOUR', sr) == 0:
cmap = sr.value
if not cmap:
cmap = 'colour'
if isinstance(cmap, str):
if cmap == 'color':
cmap = 'colour'
base, ext = os.path.splitext(cmap)
if not ext:
cmap = cmap + '.tbl'
self.file_name = cmap
self.gxitr = gxapi.GXITR.create_file(cmap)
elif isinstance(cmap, int):
self.gxitr = gxapi.GXITR.create()
self.gxitr.set_size(cmap)
for i in range(cmap):
self.__setitem__(i, (gxapi.rMAX, C_BLACK))
self.file_name = None
elif isinstance(cmap, gxapi.GXITR):
self.gxitr = cmap
else:
raise ValueError('Cannot make a color map from: {}'.format(cmap))
self._next = 0
self._title = title
self._units = unit_of_measure
def __iter__(self):
return self
def __next__(self):
if self._next >= self.length:
self._next = 0
raise StopIteration
else:
self._next += 1
return self.__getitem__(self._next - 1)
def __getitem__(self, item):
if item < 0 or item >= self.length:
raise IndexError
ir = gxapi.int_ref()
self.gxitr.get_zone_color(item, ir)
color = Color(ir.value)
if item < self.length - 1:
v = self.gxitr.get_zone_value(item)
else:
v = None
return v, color
def __setitem__(self, item, setting):
if item < 0 or item >= self.length:
raise IndexError
if not isinstance(setting[1], int):
setting = (setting[0], setting[1].int_value)
self.gxitr.set_zone_color(item, setting[1])
if item < self.length - 1:
self.gxitr.set_zone_value(item, setting[0])
def __eq__(self, other):
if self.length != other.length:
return False
for i in range(self.length):
if self[i] != other[i]:
return False
return True
@property
def title(self):
"""
Title, usually the name of the data from which the color bar was made or is intended.
None if no title
.. versionadded:: 9.2
"""
return self._title
@title.setter
def title(self, title):
if title:
self._title = str(title)
else:
self._title = None
@property
def unit_of_measure(self):
"""
Data unit of measure for the data from which the color bar was made or is intended.
None if the unit of measure is unknown.
.. versionadded:: 9.2
"""
return self._units
@unit_of_measure.setter
def unit_of_measure(self, units):
if units:
self._units = str(units)
else:
self._units = None
@property
def data_limits(self):
"""
Data limits of color map
The limit values are for information only. Applications will
assume that these values represent the largest and smallest
values in a population represented by the ITR. If they are
dummy, they have not been set.
:returns: min/max tuple
.. versionadded:: 9.4
"""
min = gxapi.float_ref()
max = gxapi.float_ref()
self.gxitr.get_data_limits(min, max)
return (min.value, max.value)
@data_limits.setter
def data_limits(self, limits):
self.gxitr.get_data_limits(limits[0], limits[1])
@property
def length(self):
"""
Number of color zones in the map.
"""
return self.gxitr.get_size()
@property
def brightness(self):
"""
Brightness is a value between -1 (black) and +1 (white), The default is 0.
:returns: brightness, -1 to +1
.. versionadded:: 9.2
"""
return self.gxitr.get_brightness()
@property
def color_map(self):
"""list of zone limts, colours in the color map"""
return [vc for vc in self]
@property
def color_map_rgb(self):
"""list of zone limits and (red, green, blue) colours"""
return [(vc[0], vc[1].rgb) for vc in self]
@brightness.setter
def brightness(self, value):
"""Map brightness between -1 (black ) and +1 (white. Can be set."""
self.gxitr.change_brightness(value)
@property
def model_type(self):
"""Geosoft colour model used in the Geosoft :class:`geosoft.gxapi.GXITR`"""
return self.gxitr.get_zone_model_type()
@property
def initialized(self):
"""
Returns True if the color_map has been initialized to have zone boundaries.
.. versionadded:: 9.2
"""
return self.length > 0 and self[0][0] != gxapi.rMAX
def set_sequential(self, start=0, increment=1):
"""
Set color map zones based on a start and increment between each color zone.
:param start: minimum zone boundary, values <= this value will have the first color
:param increment: increment between each color.
.. versionadded:: 9.2
"""
if increment <= 0:
raise ValueError(_t('increment must be > 0.'))
for i in range(self.length - 1):
self.gxitr.set_zone_value(i, start + i * increment)
def set_linear(self, minimum, maximum, inner_limits=True, contour_interval=None):
"""
Set the map boundaries based on a linear distribution between minimum and maximum.
:param minimum: minimum
:param maximum: maximum
:param inner_limits: True if the range specifies the inner limits of the color mappings, in which
case values less than or equal to the minimum are mapped to the first color
and colors greater than the maximum are mapped to the last color. If False,
the minimum and maximum are at the outer-edges of the color map.
:param contour_interval: align color edges on this interval, which is useful for matching colors
contour map, for example. The color map will be reduced in size by thinning of
unneeded colors if necessary.
.. versionadded:: 9.2
"""
if inner_limits:
if self.length < 3:
raise GroupException(_t("Colour map must have length >= 3 for inner edge linear range."))
delta = (maximum - minimum) / (self.length - 2)
minimum -= delta
maximum += delta
self.gxitr.linear(minimum, maximum,
gxapi.rDUMMY if contour_interval is None else contour_interval)
def set_logarithmic(self, minimum, maximum, contour_interval=None):
"""
Set the color boundaries based on a logarithmic distribution between minimum and maximum.
:param minimum: minimum, must be > 0
:param maximum: maximum
:param contour_interval: align color edges on this interval, 10 for powers of 10.
unneeded colors if necessary.
.. versionadded:: 9.2
"""
self.gxitr.log_linear(minimum, maximum,
gxapi.rDUMMY if contour_interval is None else contour_interval)
def set_normal(self, standard_deviation, mean, expansion=1.0, contour_interval=None):
"""
Set the color boundaries using a normal distribution around a mean.
:param standard_deviation: the standard deviation of the normal distribution.
:param mean: maximum
:param expansion: expand by this factor around the mean
:param contour_interval: align color edges on this interval, 10 for powers of 10.
unneeded colors if necessary.
.. versionadded:: 9.2
"""
self.gxitr.normal(standard_deviation,
mean,
expansion,
gxapi.rDUMMY if contour_interval is None else contour_interval)
def color_of_value(self, value):
"""
Return the gxg.Color of a value. The mapping is determined with exclusive minima, inclusive maxima
for each color level. Values <= level [0] are assigned the [0] color, and values greater than the
the [n-2] level are assigned the [n-1] color.
:param value: data value
:returns: :class:`Color` instance
.. versionadded:: 9.2
"""
return Color(self.gxitr.color_value(value))
def save_file(self, file_name=None):
"""
Save to a Geosoft file, `.tbl`, `.itr` or `.zon`. If the file_name does not have an
extension and the color_map has not been initialized a `.tbl` file is created (colors only),
otherwise a `.itr` is created, which contains both zone boundaries and colors.
:param file_name: file name, if None a temporary file is created
This is useful for gxapi methods that require a colour map to be loaded from a file. Say you
cave a Color_map instance named `cmap` and you want to create a GXCSYMB instance, which
requires a colur map file:
.. code::
cs = gxapi.GXCSYMB.create(cmap.save_file())
.. versionadded:: 9.2
"""
if file_name is None:
file_name = gx.gx().temp_file()
fn, ext = os.path.splitext(file_name)
if not ext:
if self.initialized:
file_name = fn + '.itr'
else:
file_name = fn + '.tbl'
self.gxitr.save_file(file_name)
return file_name
|
{
"content_hash": "8c8b419ab988ae47f09c725a21451fca",
"timestamp": "",
"source": "github",
"line_count": 3109,
"max_line_length": 120,
"avg_line_length": 34.75361852685751,
"alnum_prop": 0.5325916945089728,
"repo_name": "GeosoftInc/gxpy",
"id": "c308e19c847ee60803b29447e8ffca2b1ea4c1f6",
"size": "108049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxpy/group.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
}
|
"""
AUTO-GENERATED BY `scripts/generate_protocol.py` using `data/browser_protocol.json`
and `data/js_protocol.json` as inputs! Please do not modify this file.
"""
import logging
from typing import Any, Optional, Union
from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase
log = logging.getLogger(__name__)
from chromewhip.protocol import runtime as Runtime
from chromewhip.protocol import network as Network
# LogEntry: Log entry.
class LogEntry(ChromeTypeBase):
def __init__(self,
source: Union['str'],
level: Union['str'],
text: Union['str'],
timestamp: Union['Runtime.Timestamp'],
url: Optional['str'] = None,
lineNumber: Optional['int'] = None,
stackTrace: Optional['Runtime.StackTrace'] = None,
networkRequestId: Optional['Network.RequestId'] = None,
workerId: Optional['str'] = None,
args: Optional['[Runtime.RemoteObject]'] = None,
):
self.source = source
self.level = level
self.text = text
self.timestamp = timestamp
self.url = url
self.lineNumber = lineNumber
self.stackTrace = stackTrace
self.networkRequestId = networkRequestId
self.workerId = workerId
self.args = args
# ViolationSetting: Violation configuration setting.
class ViolationSetting(ChromeTypeBase):
def __init__(self,
name: Union['str'],
threshold: Union['float'],
):
self.name = name
self.threshold = threshold
class Log(PayloadMixin):
""" Provides access to log entries.
"""
@classmethod
def clear(cls):
"""Clears the log.
"""
return (
cls.build_send_payload("clear", {
}),
None
)
@classmethod
def disable(cls):
"""Disables log domain, prevents further log entries from being reported to the client.
"""
return (
cls.build_send_payload("disable", {
}),
None
)
@classmethod
def enable(cls):
"""Enables log domain, sends the entries collected so far to the client by means of the
`entryAdded` notification.
"""
return (
cls.build_send_payload("enable", {
}),
None
)
@classmethod
def startViolationsReport(cls,
config: Union['[ViolationSetting]'],
):
"""start violation reporting.
:param config: Configuration for violations.
:type config: [ViolationSetting]
"""
return (
cls.build_send_payload("startViolationsReport", {
"config": config,
}),
None
)
@classmethod
def stopViolationsReport(cls):
"""Stop violation reporting.
"""
return (
cls.build_send_payload("stopViolationsReport", {
}),
None
)
class EntryAddedEvent(BaseEvent):
js_name = 'Log.entryAdded'
hashable = []
is_hashable = False
def __init__(self,
entry: Union['LogEntry', dict],
):
if isinstance(entry, dict):
entry = LogEntry(**entry)
self.entry = entry
@classmethod
def build_hash(cls):
raise ValueError('Unable to build hash for non-hashable type')
|
{
"content_hash": "fad45af1e85224c2c8d781341b1a3a63",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 95,
"avg_line_length": 27.472868217054263,
"alnum_prop": 0.5471218961625283,
"repo_name": "chuckus/chromewhip",
"id": "1bbf4e47e0efa0f6e714dca3d63944b579b47e66",
"size": "3597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromewhip/protocol/log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1951"
},
{
"name": "JavaScript",
"bytes": "261"
},
{
"name": "Makefile",
"bytes": "229"
},
{
"name": "Python",
"bytes": "2227857"
},
{
"name": "Shell",
"bytes": "2787"
}
],
"symlink_target": ""
}
|
from django import forms
from django.utils.translation import ugettext_lazy as _
#import settings
from cmsplugin_contact.nospam.forms import HoneyPotForm, RecaptchaForm, AkismetForm
class ContactForm(forms.Form):
email = forms.EmailField(label=_("Email"))
subject = forms.CharField(label=_("Subject"), required=False)
content = forms.CharField(label=_("Content"), widget=forms.Textarea())
template = "cmsplugin_contact/contact.html"
class HoneyPotContactForm(HoneyPotForm):
pass
class AkismetContactForm(AkismetForm):
akismet_fields = {
'comment_author_email': 'email',
'comment_content': 'content'
}
akismet_api_key = None
class RecaptchaContactForm(RecaptchaForm):
recaptcha_public_key = None
recaptcha_private_key = None
recaptcha_theme = None
|
{
"content_hash": "ddf4427834c6dae9c7934a25f3aa1df5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 83,
"avg_line_length": 30.40740740740741,
"alnum_prop": 0.7198538367844093,
"repo_name": "wlanslovenija/cmsplugin-contact",
"id": "d5be49c5406434fd4bc6be594586b88cb2b52aea",
"size": "821",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "cmsplugin_contact/forms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "57129"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import tarfile
import os
import logging
import C_Funct
from Parameters import *
def Loader(directory,sap,beam):
#------------------------------------------
# Creates a table for one .singlepulse file
#------------------------------------------
events = pd.DataFrame()
meta_data = pd.DataFrame()
name = os.path.basename(directory).split('_single')[0]
try:
#Open the file
tar_file = tarfile.open(directory)
events_file = tar_file.extractfile(name+'.singlepulse')
inf_file = tar_file.extractfile(name+'.inf')
#Load the events and meta-data tables
data = pd.read_csv(events_file, delim_whitespace=True, dtype=np.float32)
inf = pd.read_csv(inf_file, sep="=", dtype=str,error_bad_lines=False,warn_bad_lines=False,header=None,skipinitialspace=True)
tar_file.close()
events_file.close()
data.columns = ['DM','Sigma','Time','Sample','Downfact','Sampling','a','b','c']
data['Duration'] = data.Sampling*data.Downfact
data.Duration = data.Duration.astype(np.float32)
data.Sample = data.Sample.astype(np.int32)
data = data.ix[:,['DM','Sigma','Time','Duration','Sample','Downfact']]
data.index.name = 'idx'
data.insert(0,'BEAM',beam)
data.insert(0,'SAP',sap)
data.SAP = data.SAP.astype(np.uint8)
data.BEAM = data.BEAM.astype(np.uint8)
data['Pulse'] = 0
data.Pulse = data.Pulse.astype(np.int64)
inf = inf.iloc[[0,1,2,4,5,7],1]
inf.iloc[0] = inf.iloc[0].replace("_rfifind","")
inf = pd.DataFrame(inf).T
inf.columns=['File','Telescope','Instrument','RA','DEC','Epoch']
inf = inf.astype(str)
inf['File'] = inf.File.apply(lambda x: os.path.basename(x))
inf.insert(0,'BEAM',beam)
inf.insert(0,'SAP',sap)
inf.SAP = inf.SAP.astype(np.uint8)
inf.BEAM = inf.BEAM.astype(np.uint8)
#Append to the existing tables
events = data
meta_data = inf
except (IOError,pd.parser.CParserError):
#Handle missing beams
logging.warning("SAP "+str(sap)+" - BEAM "+str(beam)+" doesn't exist")
return events,meta_data
def Thresh(events):
#---------------------------------
# Applies thresholds to the events
#---------------------------------
#Remove events at the end of the observation
events = events[events.Time < DURATION - 10]
#Remove low-DM events
#events = events[events.DM > DM_MIN]
return events
def Group(events):
#-----------------------------------
# Assigns a pulse-code to each event
#-----------------------------------
C_Funct.Get_Group(events.DM.values, events.Sigma.values, events.Time.values, events.Duration.values, events.Pulse.values)
events.Pulse = (events.Pulse * 10 + events.SAP) * 1000 + events.BEAM
return
def TimeAlign(Time,DM):
#-------------------------------------------------
# Corrects for the time misalignment of the pulses
#-------------------------------------------------
# Quantifies the misalignment for a broad-band pulse
# Only the extreme frequencies are taken into account
k = 4148.808 #s-1
delay = k * (F_MIN**-2 - F_MAX**-2)
Time += np.float32( delay * DM / 2 )
return Time
|
{
"content_hash": "7d470ea74b78971301a5fcc38a010f3a",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 128,
"avg_line_length": 28.557522123893804,
"alnum_prop": 0.5810350170436939,
"repo_name": "danielemichilli/LSPs",
"id": "a2b221cc3c938c450605eee088c9b0b43243a469",
"size": "3227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159123"
}
],
"symlink_target": ""
}
|
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VerveResponseTaskList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, status_code=None, message=None, list=None, data=None, records=None):
"""
VerveResponseTaskList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'status_code': 'str',
'message': 'str',
'list': 'list[Task]',
'data': 'Task',
'records': 'int'
}
self.attribute_map = {
'status_code': 'statusCode',
'message': 'message',
'list': 'list',
'data': 'data',
'records': 'records'
}
self._status_code = status_code
self._message = message
self._list = list
self._data = data
self._records = records
@property
def status_code(self):
"""
Gets the status_code of this VerveResponseTaskList.
:return: The status_code of this VerveResponseTaskList.
:rtype: str
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""
Sets the status_code of this VerveResponseTaskList.
:param status_code: The status_code of this VerveResponseTaskList.
:type: str
"""
self._status_code = status_code
@property
def message(self):
"""
Gets the message of this VerveResponseTaskList.
:return: The message of this VerveResponseTaskList.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this VerveResponseTaskList.
:param message: The message of this VerveResponseTaskList.
:type: str
"""
self._message = message
@property
def list(self):
"""
Gets the list of this VerveResponseTaskList.
:return: The list of this VerveResponseTaskList.
:rtype: list[Task]
"""
return self._list
@list.setter
def list(self, list):
"""
Sets the list of this VerveResponseTaskList.
:param list: The list of this VerveResponseTaskList.
:type: list[Task]
"""
self._list = list
@property
def data(self):
"""
Gets the data of this VerveResponseTaskList.
:return: The data of this VerveResponseTaskList.
:rtype: Task
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this VerveResponseTaskList.
:param data: The data of this VerveResponseTaskList.
:type: Task
"""
self._data = data
@property
def records(self):
"""
Gets the records of this VerveResponseTaskList.
:return: The records of this VerveResponseTaskList.
:rtype: int
"""
return self._records
@records.setter
def records(self, records):
"""
Sets the records of this VerveResponseTaskList.
:param records: The records of this VerveResponseTaskList.
:type: int
"""
self._records = records
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "b06c2f8c39cbbd0b96010d1e650cb5c6",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 186,
"avg_line_length": 25.9126213592233,
"alnum_prop": 0.5419632821281378,
"repo_name": "iEngage/python-sdk",
"id": "9a11ae50076bf15e3c0417140449373eed9a2788",
"size": "5355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iengage_client/models/verve_response_task_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2373684"
},
{
"name": "Shell",
"bytes": "1664"
}
],
"symlink_target": ""
}
|
from webmachine.auth.base import Auth, BasicAuth
|
{
"content_hash": "93a1e5f28d10a7933cdc93417f30883f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.8367346938775511,
"repo_name": "benoitc/dj-webmachine",
"id": "ae5e858eac6605409f48c1441bca6aa8a6744294",
"size": "183",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webmachine/auth/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1515"
},
{
"name": "HTML",
"bytes": "2446"
},
{
"name": "JavaScript",
"bytes": "18922"
},
{
"name": "Python",
"bytes": "120249"
}
],
"symlink_target": ""
}
|
import six
import asyncio
import signal
from six.moves.urllib.parse import urlparse
from autobahn.wamp.types import ComponentConfig
from autobahn.asyncio.rawsocket import WampRawSocketClientFactory
import txaio
# TODO - unify with previous class
class ApplicationRunnerRawSocket(object):
"""
This class is a convenience tool mainly for development and quick hosting
of WAMP application components.
It can host a WAMP application component in a WAMP-over-WebSocket client
connecting to a WAMP router.
"""
log=txaio.make_logger()
def __init__(self, url, realm, extra=None, serializer=None):
"""
:param url: Raw socket unicode - either path on local server to unix socket (or unix:/path)
or tcp://host:port for internet socket
:type url: unicode
:param realm: The WAMP realm to join the application session to.
:type realm: unicode
:param extra: Optional extra configuration to forward to the application component.
:type extra: dict
:param serializer: WAMP serializer to use (or None for default serializer).
:type serializer: `autobahn.wamp.interfaces.ISerializer`
"""
assert(type(url) == six.text_type)
assert(type(realm) == six.text_type)
assert(extra is None or type(extra) == dict)
self.url = url
self.realm = realm
self.extra = extra or dict()
self.serializer = serializer
def run(self, make, logging_level='info'):
"""
Run the application component.
:param make: A factory that produces instances of :class:`autobahn.asyncio.wamp.ApplicationSession`
when called with an instance of :class:`autobahn.wamp.types.ComponentConfig`.
:type make: callable
"""
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception:
self.log.failure("App session could not be created! ")
asyncio.get_event_loop().stop()
else:
return session
parsed_url = urlparse(self.url)
if parsed_url.scheme == 'tcp':
is_unix = False
if not parsed_url.hostname or not parsed_url.port:
raise ValueError('Host and port is required in URL')
elif parsed_url.scheme == 'unix' or parsed_url.scheme == '':
is_unix = True
if not parsed_url.path:
raise ValueError('Path to unix socket must be in URL')
transport_factory = WampRawSocketClientFactory(create, serializer=self.serializer)
loop = asyncio.get_event_loop()
if logging_level == 'debug':
loop.set_debug(True)
txaio.use_asyncio()
txaio.config.loop = loop
try:
loop.add_signal_handler(signal.SIGTERM, loop.stop)
except NotImplementedError:
# signals are not available on Windows
pass
def handle_error(loop, context):
self.log.error('Application Error: {err}', err=context)
loop.stop()
loop.set_exception_handler(handle_error)
if is_unix:
coro = loop.create_unix_connection(transport_factory, parsed_url.path)
else:
coro = loop.create_connection(transport_factory, parsed_url.hostname, parsed_url.port)
(_transport, protocol) = loop.run_until_complete(coro)
txaio.start_logging(level=logging_level) # @UndefinedVariable
try:
loop.run_forever()
except KeyboardInterrupt:
pass
self.log.debug('Left main loop waiting for completion')
# give Goodbye message a chance to go through, if we still
# have an active session
# it's not working now - because protocol is_closed must return Future
if protocol._session:
loop.run_until_complete(protocol._session.leave())
loop.close()
|
{
"content_hash": "4992d1b05d6449354531882eae6a1cca",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 107,
"avg_line_length": 35.839285714285715,
"alnum_prop": 0.6228201295465869,
"repo_name": "Jenselme/AutobahnPython",
"id": "211d4e582544909ecb250bc2c659142c67d9c0bb",
"size": "4014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/asyncio/wamp/rawsocket/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3849"
},
{
"name": "Python",
"bytes": "1065688"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
for root_path, dirs, files in os.walk(root):
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
path = os.path.join(root_path, file)
media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
|
{
"content_hash": "d776fc6af494ee101be3159e588a4d81",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 38.02325581395349,
"alnum_prop": 0.5755351681957187,
"repo_name": "brunogamacatao/portalsaladeaula",
"id": "c07dc64592615200699761759dd877e83be73d66",
"size": "1635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediagenerator/generators/copyfiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "84537"
},
{
"name": "JavaScript",
"bytes": "616811"
},
{
"name": "Python",
"bytes": "4545655"
},
{
"name": "Ruby",
"bytes": "2070"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
}
|
import urlparse
from webob import Request
from restkit.contrib.wsgi_proxy import HostProxy
import restkit
from restkit.conn import Connection
from socketpool import ConnectionPool
restkit.set_logging("debug")
pool = ConnectionPool(factory=Connection, max_size=10, backend="thread")
proxy = HostProxy("http://127.0.0.1:5984", pool=pool)
def application(environ, start_response):
req = Request(environ)
if 'RAW_URI' in req.environ:
# gunicorn so we use real path non encoded
u = urlparse.urlparse(req.environ['RAW_URI'])
req.environ['PATH_INFO'] = u.path
# do smth like adding oauth headers ..
resp = req.get_response(proxy)
# rewrite response
# do auth ...
return resp(environ, start_response)
|
{
"content_hash": "506409fdf1f01ade0622ea367ac14835",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 26.892857142857142,
"alnum_prop": 0.7078353253652059,
"repo_name": "benoitc/restkit",
"id": "ff1e062495c7634d750092d2e5c6bc5d4592fca1",
"size": "880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/couchdbproxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187462"
}
],
"symlink_target": ""
}
|
from nose.tools import * # noqa
import mock
import responses
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
import urlparse
from framework.auth.core import Auth
from website.addons.mendeley.tests.factories import (
MendeleyAccountFactory,
MendeleyUserSettingsFactory,
MendeleyNodeSettingsFactory
)
from website.addons.mendeley import views
from utils import mock_responses
API_URL = 'https://api.mendeley.com'
class MockNode(object):
addon = None
@property
def is_deleted(self):
return False
@property
def is_public(self):
return True
def get_addon(self, name):
if name == 'mendeley':
return self.addon
return None
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
#self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'})
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
def test_serialize_settings_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth,
)
assert_true(res.json['nodeHasAuth'])
assert_true(res.json['userHasAuth'])
assert_true(res.json['userIsOwner'])
assert_equal(res.json['folder'], '')
assert_equal(res.json['ownerName'], self.user.fullname)
assert_true(res.json['urls']['auth'])
assert_true(res.json['urls']['config'])
assert_true(res.json['urls']['deauthorize'])
assert_true(res.json['urls']['folders'])
assert_true(res.json['urls']['importAuth'])
assert_true(res.json['urls']['settings'])
def test_serialize_settings_non_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth,
)
assert_true(res.json['nodeHasAuth'])
assert_false(res.json['userHasAuth'])
assert_false(res.json['userIsOwner'])
assert_equal(res.json['folder'], '')
assert_equal(res.json['ownerName'], self.user.fullname)
assert_true(res.json['urls']['auth'])
assert_true(res.json['urls']['config'])
assert_true(res.json['urls']['deauthorize'])
assert_true(res.json['urls']['folders'])
assert_true(res.json['urls']['importAuth'])
assert_true(res.json['urls']['settings'])
def test_set_auth(self):
res = self.app.post_json(
self.project.api_url_for('mendeley_add_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
assert_true(res.json['result']['userHasAuth'])
assert_equal(
self.node_addon.user_settings,
self.user_addon
)
assert_equal(
self.node_addon.external_account,
self.account
)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(
self.project.api_url_for('mendeley_remove_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
def test_set_config_owner(self):
# Settings config updates node settings
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=self.user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
assert_equal(res.json, {})
def test_set_config_not_owner(self):
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
assert_equal(res.json, {})
def test_mendeley_widget_view_complete(self):
# JSON: everything a widget needs
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user))
res = views.mendeley_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
# JSON: tell the widget when it hasn't been configured
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
res = views.mendeley_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_false(res['complete'])
assert_is_none(res['list_id'])
@responses.activate
def test_mendeley_citation_list_root(self):
responses.add(
responses.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list'),
auth=self.user.auth
)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@responses.activate
def test_mendeley_citation_list_non_root(self):
responses.add(
responses.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
responses.add(
responses.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=self.user.auth
)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@responses.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f'
self.node_addon.save()
responses.add(
responses.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
responses.add(
responses.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=non_authorizing_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
|
{
"content_hash": "d34391da6191a5cec6e80cd22ee6bbba",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 102,
"avg_line_length": 34.30795847750865,
"alnum_prop": 0.5806354009077156,
"repo_name": "kushG/osf.io",
"id": "7f26274a7154f0215bcdeea3f2ac99c68e4494bf",
"size": "9940",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/addons/mendeley/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74880"
},
{
"name": "HTML",
"bytes": "34349"
},
{
"name": "JavaScript",
"bytes": "839000"
},
{
"name": "Mako",
"bytes": "465890"
},
{
"name": "Python",
"bytes": "2490642"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
"""Base class for RPC testing."""
import configparser
from enum import Enum
import argparse
import logging
import os
import pdb
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
from typing import List
from .address import create_deterministic_address_bcrt1_p2tr_op_true
from .authproxy import JSONRPCException
from . import coverage
from .p2p import NetworkThread
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
get_datadir_path,
initialize_datadir,
p2p_port,
wait_until_helper,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "bitcoin_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain: str = 'regtest'
self.setup_clean_chain: bool = False
self.nodes: List[TestNode] = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.parse_args()
self.disable_syscall_sandbox = self.options.nosandbox or self.options.valgrind
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
self.wallet_data_filename = "wallet.dat"
# Optional list of wallet names that can be set in set_test_params to
# create and import keys to. If unset, default is len(nodes) *
# [default_wallet_name]. If wallet names are None, wallet creation is
# skipped. If list is truncated, wallet creation is skipped and keys
# are not imported.
self.wallet_names = None
# By default the wallet is not required. Set to true by skip_if_no_wallet().
# When False, we ignore wallet_names regardless of what it is.
self.requires_wallet = False
# Disable ThreadOpenConnections by default, so that adding entries to
# addrman will not result in automatic connections to them.
self.disable_autoconnect = True
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
self.options.timeout_factor = 99999
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--nosandbox", dest="nosandbox", default=False, action="store_true",
help="Don't use the syscall sandbox")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown. valgrind 3.14 or later required. Forces --nosandbox.")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
group = parser.add_mutually_exclusive_group()
group.add_argument("--descriptors", action='store_const', const=True,
help="Run test using a descriptor wallet", dest='descriptors')
group.add_argument("--legacy-wallet", action='store_const', const=False,
help="Run test using legacy wallets", dest='descriptors')
self.add_options(parser)
# Running TestShell in a Jupyter notebook causes an additional -f argument
# To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument
# source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168
parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1")
self.options = parser.parse_args()
self.options.previous_releases_path = previous_releases_path
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
if self.options.descriptors is None:
# Prefer BDB unless it isn't available
if self.is_bdb_compiled():
self.options.descriptors = False
elif self.is_sqlite_compiled():
self.options.descriptors = True
else:
# If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter
# It still needs to exist and be None in order for tests to work however.
self.options.descriptors = None
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = self.config
fname_bitcoind = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"bitcoind" + config["environment"]["EXEEXT"],
)
fname_bitcoincli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"bitcoin-cli" + config["environment"]["EXEEXT"],
)
self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind)
self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli)
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("")
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
self.log.error("")
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
self.log.error("")
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("BitcoinRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must override this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# self.connect_nodes(1, 2)
for i in range(self.num_nodes - 1):
self.connect_nodes(i + 1, i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = [[]] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
if self.requires_wallet:
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for i in range(self.num_nodes):
self.init_wallet(node=i)
def init_wallet(self, *, node):
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[node] if node < len(self.wallet_names) else False
if wallet_name is not False:
n = self.nodes[node]
if wallet_name is not None:
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=True)
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
def get_bin_from_version(version, bin_name, bin_default):
if not version:
return bin_default
return os.path.join(
self.options.previous_releases_path,
re.sub(
r'\.0$',
'', # remove trailing .0 for point releases
'v{}.{}.{}.{}'.format(
(version % 100000000) // 1000000,
(version % 1000000) // 10000,
(version % 10000) // 100,
(version % 100) // 1,
),
),
'bin',
bin_name,
)
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if self.is_syscall_sandbox_compiled() and not self.disable_syscall_sandbox:
for i in range(len(extra_args)):
if versions[i] is None or versions[i] >= 219900:
extra_args[i] = extra_args[i] + ["-sandbox=log-and-abort"]
if binary is None:
binary = [get_bin_from_version(v, 'bitcoind', self.options.bitcoind) for v in versions]
if binary_cli is None:
binary_cli = [get_bin_from_version(v, 'bitcoin-cli', self.options.bitcoincli) for v in versions]
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
bitcoind=binary[i],
bitcoin_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
)
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
conf_file = test_node_i.bitcoinconf
with open(conf_file, 'r', encoding='utf8') as conf:
conf_data = conf.read()
with open(conf_file, 'w', encoding='utf8') as conf:
conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
def stop_nodes(self, wait=0):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait, wait_until_stopped=False)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def connect_nodes(self, a, b):
from_connection = self.nodes[a]
to_connection = self.nodes[b]
ip_port = "127.0.0.1:" + str(p2p_port(b))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['version'] != 0 for peer in to_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()))
def disconnect_nodes(self, a, b):
def disconnect_nodes_helper(from_connection, node_num):
def get_peer_ids():
result = []
for peer in from_connection.getpeerinfo():
if "testnode{}".format(node_num) in peer['subver']:
result.append(peer['id'])
return result
peer_ids = get_peer_ids()
if not peer_ids:
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
node_num,
))
return
for peer_id in peer_ids:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
self.disconnect_nodes(1, 2)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
self.connect_nodes(1, 2)
self.sync_all()
def no_op(self):
pass
def generate(self, generator, *args, sync_fun=None, **kwargs):
blocks = generator.generate(*args, invalid_call=False, **kwargs)
sync_fun() if sync_fun else self.sync_all()
return blocks
def generateblock(self, generator, *args, sync_fun=None, **kwargs):
blocks = generator.generateblock(*args, invalid_call=False, **kwargs)
sync_fun() if sync_fun else self.sync_all()
return blocks
def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs):
blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs)
sync_fun() if sync_fun else self.sync_all()
return blocks
def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs):
blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs)
sync_fun() if sync_fun else self.sync_all()
return blocks
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def wait_until(self, test_function, timeout=60):
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
bitcoind=self.options.bitcoind,
bitcoin_cli=self.options.bitcoincli,
coverage_dir=None,
cwd=self.options.tmpdir,
descriptors=self.options.descriptors,
))
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 3 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th address gets 25 mature and only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [create_deterministic_address_bcrt1_p2tr_op_true()[0]]
assert_equal(len(gen_addresses), 4)
for i in range(8):
self.generatetoaddress(
cache_node,
nblocks=25 if i != 7 else 24,
address=gen_addresses[i % len(gen_addresses)],
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks', 'indexes']: # Only indexes, chainstate and blocks folders
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
self.requires_wallet = True
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
if self.options.descriptors:
self.skip_if_no_sqlite()
else:
self.skip_if_no_bdb()
def skip_if_no_sqlite(self):
"""Skip the running test if sqlite has not been compiled."""
if not self.is_sqlite_compiled():
raise SkipTest("sqlite has not been compiled.")
def skip_if_no_bdb(self):
"""Skip the running test if BDB has not been compiled."""
if not self.is_bdb_compiled():
raise SkipTest("BDB has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if bitcoin-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("bitcoin-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if bitcoin-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bitcoin-cli has not been compiled.")
def skip_if_no_previous_releases(self):
"""Skip the running test if previous releases are not available."""
if not self.has_previous_releases():
raise SkipTest("previous releases not available or disabled")
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
if not os.path.isdir(self.options.previous_releases_path):
if self.options.prev_releases:
raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
return self.options.prev_releases
def skip_if_no_external_signer(self):
"""Skip the running test if external signer support has not been compiled."""
if not self.is_external_signer_compiled():
raise SkipTest("external signer support has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bitcoin-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_external_signer_compiled(self):
"""Checks whether external signer support was compiled."""
return self.config["components"].getboolean("ENABLE_EXTERNAL_SIGNER")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_specified_wallet_compiled(self):
"""Checks whether wallet support for the specified type
(legacy or descriptor wallet) was compiled."""
if self.options.descriptors:
return self.is_sqlite_compiled()
else:
return self.is_bdb_compiled()
def is_wallet_tool_compiled(self):
"""Checks whether bitcoin-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
def is_sqlite_compiled(self):
"""Checks whether the wallet module was compiled with Sqlite support."""
return self.config["components"].getboolean("USE_SQLITE")
def is_bdb_compiled(self):
"""Checks whether the wallet module was compiled with BDB support."""
return self.config["components"].getboolean("USE_BDB")
def is_syscall_sandbox_compiled(self):
"""Checks whether the syscall sandbox was compiled."""
return self.config["components"].getboolean("ENABLE_SYSCALL_SANDBOX")
|
{
"content_hash": "e57c782c0b8142bb894e31a23c0e40bb",
"timestamp": "",
"source": "github",
"line_count": 911,
"max_line_length": 312,
"avg_line_length": 45.12843029637761,
"alnum_prop": 0.607511188947266,
"repo_name": "mm-s/bitcoin",
"id": "8f75255caffb5fdfbda8981c32236357f8b858cc",
"size": "41326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/test_framework.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1228370"
},
{
"name": "C++",
"bytes": "9407360"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "541"
},
{
"name": "M4",
"bytes": "247147"
},
{
"name": "Makefile",
"bytes": "136414"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2661378"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56897"
},
{
"name": "Scheme",
"bytes": "24076"
},
{
"name": "Shell",
"bytes": "211674"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from cms.utils.page import _page_is_published
def get_article_from_slug(tree, slug, preview=False, draft=False):
"""
Resolves a slug to a single article object.
Returns None if article does not exist
"""
from ..models import Title
titles = Title.objects.select_related('article').filter(article__tree=tree)
published_only = (not draft and not preview)
if draft:
titles = titles.filter(publisher_is_draft=True)
elif preview:
titles = titles.filter(publisher_is_draft=False)
else:
titles = titles.filter(published=True, publisher_is_draft=False)
titles = titles.filter(slug=slug)
for title in titles.iterator():
if published_only and not _page_is_published(title.article):
continue
title.article.title_cache = {title.language: title}
return title.article
return
|
{
"content_hash": "311c1f896a88ad4ec39bd88eb0f5ec32",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 79,
"avg_line_length": 30.633333333333333,
"alnum_prop": 0.675734494015234,
"repo_name": "misli/django-cms-articles",
"id": "96d64be16382a87baffb94d30a57d4deaf98cf2f",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cms_articles/utils/article.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "717"
},
{
"name": "HTML",
"bytes": "12453"
},
{
"name": "JavaScript",
"bytes": "2262"
},
{
"name": "Python",
"bytes": "171047"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicators', '0078_auto_20191107_1339'),
]
operations = [
migrations.AlterField(
model_name='disaggregationtype',
name='disaggregation_type',
field=models.CharField(max_length=135, verbose_name='Disaggregation'),
),
]
|
{
"content_hash": "f73d22be33bcd39b4bb25d8c6dd7296a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 24.8125,
"alnum_prop": 0.6171284634760705,
"repo_name": "mercycorps/TolaActivity",
"id": "24c9b7f59e7a5fa46717893eab2060474e91a35c",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indicators/migrations/0079_auto_20191107_1344.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
}
|
from model import Magikarp
import numpy as np
import tensorflow as tf
config = {}
config['batch_size'] = 64
config['datafile'] = '../Data/training_data.hdf5'
config['p_datafile'] = '../Data/tal_data.hdf5'
config['full_boards_file'] = '../Data/full_boards.pkl'
config['num_epochs'] = 10
config['save_file'] = 'trained_model/trained_genadv.ckpt'
with tf.Session() as sess:
magikarp = Magikarp(config, sess)
magikarp.train()
|
{
"content_hash": "3ac9fdd11d86f57d3e79cd0527054ece",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 28.4,
"alnum_prop": 0.704225352112676,
"repo_name": "2014mchidamb/AdversarialChess",
"id": "039a6144065ebf391f0c1374bca339cb38d29546",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17319"
}
],
"symlink_target": ""
}
|
class PdfObject(str):
''' A PdfObject is a textual representation of any PDF file object
other than an array, dict or string. It has an indirect attribute
which defaults to False.
'''
indirect = False
|
{
"content_hash": "d9cda09bbccd73bab9bfbe19eb6e5d06",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 38.166666666666664,
"alnum_prop": 0.6724890829694323,
"repo_name": "tajtiattila/pdfrw",
"id": "8a0fc88d991b689aeadefd9569f8d6eabc7ffd18",
"size": "372",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pdfrw/objects/pdfobject.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105035"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from activitystreams.models import Source, Action, Activity, ActivityObject, Interest, UserObject
class SourceAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'slug',)
list_editable = ('slug',)
class ActionAdmin(admin.ModelAdmin):
list_display = ('source', 'slug',)
list_editable = ('slug',)
admin.site.register(Source, SourceAdmin)
admin.site.register(Action, ActionAdmin)
admin.site.register(Activity)
admin.site.register(ActivityObject)
admin.site.register(Interest)
admin.site.register(UserObject)
|
{
"content_hash": "4d00260ab8d42ccfeb4ce57434e9e434",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 97,
"avg_line_length": 31.63157894736842,
"alnum_prop": 0.7437603993344426,
"repo_name": "nowells/django-activitystreams",
"id": "215441e152dffaa2d5484a6b83e358e4564bd8d4",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "activitystreams/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23512"
}
],
"symlink_target": ""
}
|
"""Support for THOMSON routers."""
import logging
import re
import telnetlib
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r"(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s"
r"(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+"
r"(?P<status>([^\s]+))\s+"
r"(?P<type>([^\s]+))\s+"
r"(?P<intf>([^\s]+))\s+"
r"(?P<hwintf>([^\s]+))\s+"
r"(?P<host>([^\s]+))"
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a THOMSON scanner."""
scanner = ThomsonDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ThomsonDeviceScanner(DeviceScanner):
"""This class queries a router running THOMSON firmware."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
# Test the router is accessible.
data = self.get_thomson_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client["mac"] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client["mac"] == device:
return client["host"]
return None
def _update_info(self):
"""Ensure the information from the THOMSON router is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
_LOGGER.info("Checking ARP")
data = self.get_thomson_data()
if not data:
return False
# Flag C stands for CONNECTED
active_clients = [
client for client in data.values() if client["status"].find("C") != -1
]
self.last_results = active_clients
return True
def get_thomson_data(self):
"""Retrieve data from THOMSON and return parsed result."""
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b"Username : ")
telnet.write((self.username + "\r\n").encode("ascii"))
telnet.read_until(b"Password : ")
telnet.write((self.password + "\r\n").encode("ascii"))
telnet.read_until(b"=>")
telnet.write(("hostmgr list\r\n").encode("ascii"))
devices_result = telnet.read_until(b"=>").split(b"\r\n")
telnet.write("exit\r\n".encode("ascii"))
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception("Connection refused by router. Telnet enabled?")
return
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode("utf-8"))
if match:
devices[match.group("ip")] = {
"ip": match.group("ip"),
"mac": match.group("mac").upper(),
"host": match.group("host"),
"status": match.group("status"),
}
return devices
|
{
"content_hash": "1fd6d039fa9bb267bb007f452acd818b",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 82,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.5724191063174114,
"repo_name": "Teagan42/home-assistant",
"id": "1f3fda6cc72c7555fc25acc6a79ac578983c2c1a",
"size": "3894",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/thomson/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
import utils # Funcions d'us comu
import praw # Wrapper API reddit
import pymysql # Per interactuar amb la BBDD
import time
import datetime
SUB_LIMIT = 2000000 # Subreddits capturats com a maxim amb getSubreddits()
TOP_SUB_LIMIT = 1500 # Subreddits dels que s'extreuen publicacions
MAX_SUBMISSIONS = 100 # Limit de publicacions capturades en una sola peticio a l'API
BELOW_MAX_SUBMISSIONS = 75 # Es necessita capturar un minim de pubs per optimitzar el proces
MIN_SCORE = 500 # Puntuacio minima per incloure una publicacio a la BBDD
# IMPORTANT: SI ES MODIFICA, SOBRETOT CAP A BAIX (<500), CALDRÀ EXPLORAR
# NOVAMENT TOTS ELS SUBREDDITS. ELS PRIMERS 500 VAN COSTAR 22 DIES.
UPDATE_WAIT = ( 5 * 24 * 3600 ) # Temps d'espera en segons entre actualitzacions (> MAX_UPPER_OFFSET)
# En producció, caldrà revisar aquest temps d'espera, per saber si és excessiu.
WAIT_FRACTION = ( 2 * 3600 ) # Fraccio de temps que es tarda en enviar un missatge que anuncia que
# s'està esperant per començar una nova iteració.
START_DATE = None # Data a partir de la qual comencar a capturar publicacions
# None, capturara totes les publicacions des de l'inici del subreddit
# O be des de la data de l'ultima captura
# 31536000 equival a un any en segons
MAX_UPPER = None # Data final fins on capturar publicacions (reestablida a start() abans de cada iteració)
MAX_UPPER_OFFSET = ( 3 * 24 * 3600 ) # Offset de la data final (segons). Per defecte, 3 dies abans del moment en que
# comença cada iteració, per donar temps a que les publicacions obtinguin vots.
GET_SUBS_INTERVAL = 3600 # Interval de temps inicial per capturar publicacions
MAXIMUM_EXPANSION_MULTIPLIER = 2 # Limit segur per seleccionar intervals per sota
validRequests = 0 # Nombre total de crides que retornen entre 1 i 99 publicacions
belowRequests = 0 # Nombre total de crides que retornen menys de 75 publicacions
aboveRequests = 0 # Nombre total de crides que retornen mes de 99 publicacions
totalSubmissions = 0 # Nombre total de publicacions emmagatzemades a la BBDD
totalRounds = 1 # Nombre total d'iteracions del bucle de start()
absoluteStartTime = time.time() # Temps en que s'inicia el script
def start():
''' Controla el funcionament del bot. Crida a les funcions que capturen els noms
dels subreddits, extreu les publicacions i ho emmagatzema tot en una base de
dades.
'''
global totalRounds, MAX_UPPER
subredditsUpdated = True # Recull que la funció getSubreddits faci bé la feina
while True:
# Data i hora final fins on capturar publicacions. L'offset en segons.
MAX_UPPER = time.time() - MAX_UPPER_OFFSET
(r, db) = utils.rwlogin() # Connecta amb l'API i la BBDD
print('Consultant el nombre de subreddits emmagatzemats')
if (db.cur.execute('SELECT count(*) FROM subreddits')):
storedSubreddits = db.cur.fetchone()[0]
else:
storedSubreddits = 0
print('Hi han {0} subreddits.'.format(storedSubreddits))
# Actualització dels subreddits (~3h)
while subredditsUpdated:
subredditsUpdated = getSubreddits(False, False, r, db)
# Explora publicacions dels subreddits seleccionats
getSubmissions(False, r, db)
# Es tanquen les connexions
db.con.close()
del db.cur
del db.con
print('Connexions tancades.')
totalRounds += 1
#S'espera un temps determinat, per realitzar la propera iteracio
utils.updateWait(UPDATE_WAIT, WAIT_FRACTION)
def getSubreddits(manual=True, updateTop=False, r=None, db=None):
''' Extreu informacio de la pagina http://www.reddit.com/reddits
Concretament extreu una llista amb tots els subreddits i el nombre de
subscriptors, entre d'altres dades i ho guarda tot en una taula SQL
anomenada 'subreddits'. Si el subreddit ja existeix a la taula, s'actualitza
el nombre de subscriptors.
us, s'importa i es crida a la funcio get_subreddits, aquesta crida es pot
fer de dues maneres:
Des de la linia de comandos -> getSubreddits()
Des de l'script principal -> getSubreddits(False, r, db)
En el primer cas, es connecta a l'API i a la BBDD i en el segon, se li passen
les connexions ja creades.
Si com a updateTop es passa True, l'unic que fara sera cercar els 1000 getSubreddits
amb mes subscriptors i actualizar-ne el nombre.
:param manual: True o False (opcional)
:param updateTop: True o False (opcional)
:param r: class 'praw.Reddit' (opcional)
:param db: class 'utils.baseDades' (opcional)
:return: False si no hi han hagut problemes, True si cal repetir el procés de captura.
:rType: Boolean
'''
startTime = time.time() # per calcular el temps que es tarda
newSubs = 0 # per portar el compte dels subreddits processats
updatedSubs = 0
totalSubs = 0
# En teoria els noms dels subreddits han de ser unics, pero he detectat un
# subreddit amb el nom duplicat. El descarto, ja que dona problemes.
discarted = ('ColumbusBlueJackets')
# Si es fa una crida manual a la funcio (per defecte), es connecta a la BBDD
# i a l'API. Aixi diferencia entre les crides des de la linia de comandos i
# les crides des de l'script principal, on ja s'haura fet una connexio.
if manual:
(r, db) = utils.rwlogin() # connexio amb l'API i la BBDD
if not updateTop:
subreddits = r.get_popular_subreddits(limit=SUB_LIMIT)
print('Capturant subreddits...')
else:
db.cur.execute('SELECT display_name FROM subreddits ORDER BY subscribers DESC LIMIT {0}'.format(UPDATE_SUBREDDITS))
subredditsNames = db.cur.fetchall()
subreddits = []
print('Actualitzant els subscriptors dels {0} subreddits mes poblats...'.format(UPDATE_SUBREDDITS))
for name in subredditsNames:
# Es crea una llista amb objecte PRAW de subreddits, fent servir el nom dels subreddits
subreddits.append(r.get_subreddit(name[0]))
# Es recorre el contingut
try:
for subreddit in subreddits:
# Comprova que el subreddit no existeixi ja a la base de dades
if not (db.cur.execute("SELECT 1 FROM subreddits WHERE idstr = '{0}' LIMIT 1".format(subreddit.id))):
# Comprova si es un dels 'errors' de reddit, un subreddit amb nom duplicat
if (subreddit.display_name not in discarted):
# Si no existeix i no es un duplicat es crea una nova fila a la BBDD
db.cur.execute('INSERT INTO subreddits (idstr, idint, display_name, created_utc,' +
'description, subscribers, over18) VALUES(%s, %s, %s, %s, %s, %s, %s)',
((subreddit.id,),(utils.b36(subreddit.id),),(subreddit.display_name,),
(int(subreddit.created_utc),),(subreddit.public_description,),
(subreddit.subscribers,),(subreddit.over18,)))
newSubs += 1
# Si ja existeix, s'actualitza
else:
db.cur.execute("UPDATE subreddits SET subscribers = %s, over18 = %s WHERE idstr = '%s' LIMIT 1"
% (subreddit.subscribers, subreddit.over18, subreddit.id))
updatedSubs += 1
# Actualitza la taula on es guarden els canvis en el nombre de subscriptors
if (subreddit.display_name not in discarted):
db.cur.execute('INSERT INTO subscribers (idsub, subscribers) VALUES({0},{1})'.format(utils.b36(subreddit.id), subreddit.subscribers))
totalSubs = newSubs + updatedSubs
# Cada X subreddits, es fa un commit a la BBDD
if not (totalSubs % 100):
db.con.commit()
print('Últim subreddit processat: %s. Total processats: %s' % (subreddit.id, totalSubs))
# Fi for subreddit in subreddits
except Exception as e:
text = 'getSubreddits():Actualització: {4}\nSubreddit on ha fallat: {2} ({3})\nEXCEPCIO: {0}\nMISSATGE: {1}'.format(
e.__doc__, str(e), subreddit.display_name, subreddit.id, updateTop)
utils.storeExcept(text, db.cur, db.con)
if(manual):
db.con.close()
del db.cur
del db.con
print('Connexio amb la BBDD tancada.')
print(text)
return(True) # Problemes capturant subreddits, es repeteix el procés
db.con.commit() # Un últim commit per assegurar que la resta de 50, vagi tambe a la BBDD
if(manual):
db.con.close()
del db.cur
del db.con
print('Connexio amb la BBDD tancada.')
# Mostra el temps que ha tardat en total
utils.printGetSubredditsStats(startTime, newSubs, updatedSubs)
return False # No hi ha problemes, es segueix amb la captura de les publicacions
def getSubmissions(manual=True, r=None, db=None):
''' Captura un nombre de subreddits determinat per TOP_SUB_LIMIT i ordenats per nombre
de subscriptors. Despres un a un, en captura les publicacions.
:param manual: True o False (opcional)
:param r: class 'praw.Reddit' (opcional)
:param db: class 'utils.baseDades' (opcional)
:return: El nombre de publicacions noves i actualitzades
:rtype: str i str
'''
totalNewposts = 0 # Nombre total de missatges nous afegits (sessio)
totalUpdates = 0 # Nombre total de missatges actualitzats (sessio)
newposts = 0 # Nombre missatges nous (subreddit)
updates = 0 # Nombre missatges actualitzats (subreddit)
subsCount = 1 # Comptador per saber els subreddits processats
totalTime = 0
print('Capturant publicacions...\n')
if(manual):
(r, db) = utils.rwlogin() # connexio amb l'API i la BBDD
try:
# Es fa una consulta per capturar els ids i noms dels subreddits
select = db.cur.execute('SELECT idint, display_name FROM subreddits ORDER BY subscribers ' +
'DESC LIMIT {0}'.format(TOP_SUB_LIMIT))
if (select == TOP_SUB_LIMIT): # s'han capturat el nombre de noms esperat?
subreddits = db.cur.fetchall() # es guarden els noms dels subreddits
# Si la consulta retorna un valor diferent de l'esperat, es llenca una EXCEPCIO
else:
raise pymysql.MySQLError('S\'esperaven {0} files i se n\'han extret {1}'.format(
(TOP_SUB_LIMIT, select)))
# Un a un, es processen tots els subreddits
for subreddit in subreddits:
subredditSubmissions = utils.getNumberSubmissions(subreddit[0], db)
(newposts, updates, subsCount, submissionsChrono) = get_all_posts(
subredditSubmissions,
idint=subreddit[0],
subreddit=subreddit[1],
db=db,
r=r,
subsCount=subsCount
)
totalNewposts += newposts
totalUpdates += updates
totalTime += submissionsChrono
except pymysql.MySQLError as e:
text = 'getSubmissions():Select noms subreddits\n{2}\nEXCEPCIO: {0}\nMISSATGE: {1}'.format(e.__doc__, str(e), e)
utils.storeExcept(text, db.cur, db.con)
print(text)
raise SystemExit # Es surt
if(manual):
db.con.close()
del db.cur
del db.con
print('Connexio amb la BBDD tancada.')
# Es mostren els resultats de la sessio
utils.printSQLStats(None, totalNewposts, totalUpdates, time=totalTime)
def get_all_posts(subredditSubmissions, idint, subreddit, db, r, subsCount, lower=START_DATE,
maxupper=MAX_UPPER, interval=GET_SUBS_INTERVAL):
''' Funcio extreta de:
https://github.com/voussoir/reddit/blob/master/Prawtimestamps/timesearch.py
Autor: Ethan Dalool (Voussoir)
Modificada per adaptar-la a les necessitats del projecte.
Captura les publicacions del subreddit passat com a parametre. Es poden
indicar els limits de dates inferiors i superiors, si no es fa, es capturen
totes les publicacions del subreddit.
:paran idint: l'id en base 10 del subreddit
:param subreddit: el nom del subreddit a explorar, com a cadena.
:param db: objecte baseDades
:param r: objecte PRAW
:param subsCount: int per comptar quants subreddits s'han processat
:param lower: data inferior a partir de la que buscar. Format UNIX.
None - Per defecte, la data de creacio del subreddit.
update - captura missatges del subreddit, a partir de
la data de l'ultim capturat.
:param maxupper: data superior. Per defecte, el present.
:param interval: interval parcial en el que buscar publicacions.
La funcio redueix o amplia aquest interval, segons
si troba mes de 100 publicacions o menys de 75.
:param subredditSubmissions: nombre de publicacions en el moment de comencar a
actualitzar.
:return: El nombre de noves publicacions i d'aquelles que s'hagin actualitzat
en cas de que ja existissin i el numero de subreddit processat. Com a
quart valor, retorna els segons transcorreguts.
:rtype: int, int, int, float
'''
chrono = 0
queryNewposts = 0 # Noves publicacions, valor parcial
queryUpdates = 0 # Publicacions actualitzades, valor parcial
newposts = 0 # Noves publicacions, total subreddit
updates = 0 # Publicacions actualitzades, total subreddit
offset = -time.timezone # Compensacio per la zona horaria
startTime = time.time() # Temps d'inici de captura de les publicacions
global validRequests # Nombre total de crides que retornen entre 1 i 99 publicacions
global belowRequests # Nombre total de crides que retornen menys de 75 publicacions
global aboveRequests # Nombre total de crides que retornen mes de 99 publicacions
global totalSubmissions # Nombre total de publicacions emmagatzemades a la BBDD
global totalRounds # Nombre total d'iteracions del bucle de start()
# Inicialment es comprova si lower te algun valor. En cas afirmatiu es mantindra
# aquest valor. En cas de que sigui None, es captura la data de finalitzacio de
# l'ultima exploracio en aquest subreddit. Si no existeix tal data, no es modifica
# lower, que seguira sent None. En cas contrari, se li assigna la data.
if lower is None:
# Seguira capturant missatges, des de l'ultim que s'hagi trobat durant la
# ultima iteracio del script.
lastDate = utils.getLastDate(idint, db)
if lastDate:
lower = lastDate + 1 # Última data on es va finalitzar en una iteració
# anterior, més un segon
# Es comprova de nou si lower és None, en cas de ser-ho se li assigna
# la data de creacio del subreddit a explorar.
if lower is None:
# La data minima per comencar a capturar missatges
# sera la de la creacio del subreddit
lower = r.get_subreddit(subreddit).created_utc
if maxupper is None:
nowstamp = datetime.datetime.now(datetime.timezone.utc).timestamp()
maxupper = nowstamp
maxupper -= offset # Ajust del valor a la zona horaria
upper = lower + interval # Interval - Tall per sobre
itemcount = 0
toomany_inarow = 0 # Modifica la velocitat en que es varien els intervals
intervalDiff = 0 # Per mostrar la diferencia entre interval present i anterior
# Comença com un int, acaba com una cadena
while (lower < maxupper):
while True: # Cerca de publicacions dintre d'un interval
try:
query = 'timestamp:%d..%d' % (lower, upper)
searchresults = list(r.search(query, subreddit=subreddit, sort='new',
limit=MAX_SUBMISSIONS, syntax='cloudsearch'))
break
except Exception as e:
text = 'get_all_posts():r.search. Query: {2}\nEXCEPCIO: {0}\nMISSATGE: {1}'.format(e.__doc__, str(e), query)
utils.storeExcept(text, db.cur, db.con)
time.sleep(5)
continue
#Fi while True
searchresults.reverse()
itemsfound = len(searchresults)
itemcount += itemsfound
# El seguent codi computa el nombre de publicacions trobades en l'interval
# de temps utilitzat, si son mes del valor MAX_SUBMISSIONS o menys del
# 75% d'aquest, modifica l'interval i torna a cercar.
# Quan troba un nombre de publicacions acceptable, les introdueix a la
# base de dades.
# La variable toomany_inarow ajuda a accelerar la modificacio de l'interval,
# en cas de que es produeixin diversos casos seguits en que s'obtinguin
# massa resultats.
# Valors utilitzats per imprimir les estadistiques de cada iteracio
printInterval = interval
printLower = lower
printUpper = upper
textResults = ''
if (itemsfound < BELOW_MAX_SUBMISSIONS):
diff = 2 - (itemsfound / BELOW_MAX_SUBMISSIONS)
diff = min(MAXIMUM_EXPANSION_MULTIPLIER, diff)
interval = int(interval * diff)
belowRequests += 1
if (itemsfound > (MAX_SUBMISSIONS - 1)):
# Intencionalment no elif.
# En cas d'obtenir pocs resultats, es guarden igualment amb l'else.
interval = int(interval * (0.8 - (0.05 * toomany_inarow)))
toomany_inarow += 1
aboveRequests += 1
else:
(queryNewposts, queryUpdates, subredditSubmissions) = utils.smartinsert(
db.con, db.cur, searchresults, idint, MIN_SCORE, subredditSubmissions)
# Es guarda l'ultima data, quan cerqui fins al present,
# la data correspondrà a MAX_UPPER
utils.storeLastDate(idint, upper, db)
lower = upper
toomany_inarow = max(0, toomany_inarow-1)
newposts += queryNewposts
updates += queryUpdates
validRequests += 1
totalSubmissions += itemsfound
# Titol del subreddit i comptador de temps
textSubreddit = ' Subreddit: {0} ({1} de {2}). Iteracio: {3} '.format(subreddit,
subsCount, TOP_SUB_LIMIT, totalRounds)
chrono = time.time() - startTime
absChrono = time.time() - absoluteStartTime
#Mostra informacio de cada peticio
utils.gapStats(textSubreddit, chrono, printInterval, intervalDiff, printLower,
printUpper, validRequests, totalSubmissions, itemsfound,
belowRequests, aboveRequests, MAX_SUBMISSIONS,
BELOW_MAX_SUBMISSIONS, absChrono, absoluteStartTime)
upper = lower + interval
# Upper no pot ser superior a maxupper o la variable que guarda el nombre
# total de posts, serà erronia. Si s'executa l'if, s'esta cercant a l'ultim
# interval, abans de la data i hora fixades com a límit màxim.
if (upper > maxupper):
upper = maxupper
intervalDiff = printInterval - interval
print()
#Fi while lower < maxupper
utils.printSQLStats(subreddit, newposts, updates, time=None)
return(newposts, updates, subsCount+1, chrono)
if (__name__ == '__main__'):
start()
|
{
"content_hash": "49e8b58b939b60af3a4304927d1ac300",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 149,
"avg_line_length": 50.023529411764706,
"alnum_prop": 0.5951552210724365,
"repo_name": "rroyo/redditWhoBot",
"id": "a68be053f5c32731ab14a98cc1aa6e5f127ee57d",
"size": "23000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redditWhoBot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41763"
}
],
"symlink_target": ""
}
|
import cgi
import json
import sqlite3 as lite
class GetAllPlaylistSongsFromDB():
def __init__(self):
amp_db_loc = "/usr/share/ampyche/db/ampyche.db"
self.amp_db_loc = amp_db_loc
pll_db_loc = "/usr/share/ampyche/db/ampychePlaylist.db"
self.pll_db_loc = pll_db_loc
def _make_a_list(self, genlist):
agen = []
for gen in genlist:
gen = ' '.join(gen)
agen.append(gen)
agen.sort()
return agen
def _format_li(self, a_list):
meeco = "<ul data-role='listview' data-mini='true' data-inset='true' data-split-icon='gear'>"
for a in a_list:
a = "<li><a href='#' class='selpl'>%s</a><a href='#' class='deletepl'></a></li>" %a
meeco = meeco + a
meeco = meeco + "</ul>"
return meeco
def _get_all_playlists(self):
con = lite.connect(self.pll_db_loc)
cur = con.cursor()
cur.execute("SELECT playlistname FROM playlists")
plists = cur.fetchall()
con.close()
plists = self._make_a_list(plists)
playlistsongs = []
for p in plists:
con = lite.connect(self.pll_db_loc)
cur = con.cursor()
cur.execute("SELECT song FROM playlistsongs WHERE playlistname=?", [p])
plsongs = cur.fetchall()
con.close()
plsongs = self._make_a_list(plsongs)
plsongs = self._format_li(plsongs)
pp = "<h6 class='playname'>%s</h6>" %p
zero = pp, plsongs
playlistsongs.append(zero)
return playlistsongs
print("Content-Type: application/json\n\n")
taz = GetAllPlaylistSongsFromDB()
pluto = taz._get_all_playlists()
print(json.dumps(pluto, sort_keys=True, indent=4))
|
{
"content_hash": "1b85889377d6f5c27455978ef00c490f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 95,
"avg_line_length": 28.60377358490566,
"alnum_prop": 0.6569920844327177,
"repo_name": "ampyche/ampyche",
"id": "3839e2dbfbc62ea845fceebe129c12b151ff461e",
"size": "1536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amp-cgi-bin/getters/getallplaylistsongs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "501376"
},
{
"name": "JavaScript",
"bytes": "1328718"
},
{
"name": "PHP",
"bytes": "59697"
},
{
"name": "Python",
"bytes": "82806"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
import sensor
sensor.set_contrast(1)
sensor.set_gainceiling(8)
sensor.set_framesize(sensor.QQVGA)
sensor.set_pixformat(sensor.RGB565)
image = sensor.snapshot()
image.save("/test.ppm")
|
{
"content_hash": "7f1cc5cdd0d08c0d67e630eeaaae56e5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 35,
"avg_line_length": 23.125,
"alnum_prop": 0.7837837837837838,
"repo_name": "dhylands/openmv",
"id": "d701c9d885ac688f56c793e9647048dacf5e3ccb",
"size": "185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "usr/examples/write_ppm.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
def simpleFunction(param):
return "Hello %s" % param
def helloFunction():
return "Hello"
def exceptionalFunction(param):
raise Exception(param)
class PythonPathTests(unittest.TestCase):
def setUp(self):
pass
def _runTest_(self, txt, result, errMsg="Error", allowPythonPath=0):
self.context = simpleTALES.Context(allowPythonPath=allowPythonPath)
self.context.addGlobal('top', 'Hello from the top')
self.context.addGlobal('exceptFunc', exceptionalFunction)
self.context.addGlobal('helloFunc', simpleFunction)
self.context.addGlobal(
'helloPath', simpleTALES.PathFunctionVariable(simpleFunction))
self.context.addGlobal('helloFunction', helloFunction)
self.context.addGlobal('myList', [1, 2, 3, 4, 5, 6])
self.context.addGlobal('testing', 'testing')
self.context.addGlobal('map', {'test': 'maptest'})
self.context.addGlobal('data', {'one': 1, 'zero': 0})
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testPythonPathException(self):
self._runTest_(
"""<html tal:content="python:exceptFunc ('Test exception!')">Exists</html>""",
'<html>Exception: Test exception!</html>',
'Exception thrown during python path not handled!',
allowPythonPath=1)
def testPythonPathDisabled(self):
self._runTest_(
"""<html tal:content="python:helloFunc ('Colin!')">Exists</html>""",
'<html>0</html>',
'Python path with allowPythonPath=false still expanded!',
allowPythonPath=0)
def testPythonPathFuncSuccess(self):
self._runTest_(
"""<html tal:content="python:helloFunc ('Colin!')">Exists</html>""",
'<html>Hello Colin!</html>',
'Python path with function failed.',
allowPythonPath=1)
def testPythonPathSliceSuccess(self):
self._runTest_(
"""<html tal:repeat="num python:myList[2:4]" tal:content="num">Exists</html>""",
'<html>3</html><html>4</html>',
'Python path with slice failed.',
allowPythonPath=1)
def testPythonStringCompare(self):
self._runTest_(
"""<html tal:condition="python: testing=='testing'">Passed.</html>""",
'<html>Passed.</html>',
'Python string compare failed.',
allowPythonPath=1)
def testPythonPathFunc(self):
self._runTest_(
"""<html tal:content="python: path ('map/test')">Passed.</html>""",
'<html>maptest</html>',
'Python path function call failed',
allowPythonPath=1)
def testPythonStringFunc(self):
self._runTest_(
"""<html tal:content="python: string ('Hello ${map/test} there')">Passed.</html>""",
'<html>Hello maptest there</html>',
'Python string function call failed',
allowPythonPath=1)
def testPythonExistsFunc1(self):
self._runTest_(
"""<html tal:condition="python: exists ('map/test')">Passed.</html>""",
'<html>Passed.</html>',
'Python exists function call failed',
allowPythonPath=1)
def testPythonExistsFunc2(self):
self._runTest_(
"""<html tal:condition="python: exists ('map/nosuchpath')">Passed.</html>""",
'',
'Python exists function call failed',
allowPythonPath=1)
def testPythonNocallFunc(self):
self._runTest_(
"""<html tal:condition="python: callable (nocall ('helloFunc'))">Passed.</html>""",
'<html>Passed.</html>',
'Python nocall function call failed',
allowPythonPath=1)
def testPythonPathFuncWithFunc(self):
self._runTest_(
"""<html tal:condition="python: path ('helloFunction')=='Hello'">Passed.</html>""",
'<html>Passed.</html>',
'Python path function using a function failed',
allowPythonPath=1)
def testPythonPathFuncWithPath(self):
self._runTest_(
"""<html tal:condition="python: helloPath ('helloFunction')=='Hello helloFunction'">Passed.</html>""",
'<html>Passed.</html>',
'Python path function wrapped in a PathFunctionVariable failed',
allowPythonPath=1)
def testTestFunctionDefault(self):
self._runTest_(
"""<html tal:condition="python: test (path ('data/one'))">Passed.</html>""",
'<html>Passed.</html>',
'Test function failed to use default.',
allowPythonPath=1)
def testTestFunctionTwoArgs(self):
self._runTest_(
"""<html tal:condition="python: test (0,1)">Passed.</html>""",
'',
'Test function failed to use default of false.',
allowPythonPath=1)
def testTestFunctionThreeArgs(self):
self._runTest_(
"""<html tal:content="python: test (0,1,2)">Passed.</html>""",
'<html>2</html>',
'Test function failed to use default.',
allowPythonPath=1)
def testTestFunctionFiveArgs(self):
self._runTest_(
"""<html tal:content="python: test (0,1,0,2,5)">Passed.</html>""",
'<html>5</html>',
'Test function failed to use default.',
allowPythonPath=1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "21a696bca7edc196cab85074ecb0bbf2",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 114,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.5845570037947534,
"repo_name": "janbrohl/SimpleTAL",
"id": "2ef69447cd2162ed9b9cddb2caea4e6095ab5491",
"size": "7822",
"binary": false,
"copies": "1",
"ref": "refs/heads/six",
"path": "tests/TALESTests/PythonPathTests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "412454"
}
],
"symlink_target": ""
}
|
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.descriptions import DescriptionMixin, RenderMode
from indico.core.db.sqlalchemy.protection import ProtectionManagersMixin
from indico.util.locators import locator_property
from indico.util.string import format_repr, text_to_repr
def get_next_position(context):
from indico.modules.events.tracks.models.groups import TrackGroup
event_id = context.current_parameters['event_id']
track_max_position = (db.session.query(db.func.max(Track.position))
.filter(Track.event_id == event_id)
.scalar())
track_group_max_position = (db.session.query(db.func.max(TrackGroup.position))
.filter(TrackGroup.event_id == event_id)
.scalar())
pos = max(track_max_position or 0, track_group_max_position or 0)
return pos + 1
class Track(DescriptionMixin, ProtectionManagersMixin, db.Model):
__tablename__ = 'tracks'
__table_args__ = {'schema': 'events'}
disable_protection_mode = True
is_track_group = False
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
id = db.Column(
db.Integer,
primary_key=True
)
title = db.Column(
db.String,
nullable=False
)
code = db.Column(
db.String,
nullable=False,
default=''
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
position = db.Column(
db.Integer,
nullable=False,
default=get_next_position
)
default_session_id = db.Column(
db.Integer,
db.ForeignKey('events.sessions.id'),
index=True,
nullable=True
)
track_group_id = db.Column(
db.Integer,
db.ForeignKey('events.track_groups.id', ondelete='SET NULL'),
index=True,
nullable=True
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'tracks',
cascade='all, delete-orphan',
lazy=True,
order_by=position
)
)
acl_entries = db.relationship(
'TrackPrincipal',
lazy=True,
cascade='all, delete-orphan',
collection_class=set,
backref='track'
)
default_session = db.relationship(
'Session',
lazy=True,
backref='default_for_tracks'
)
track_group = db.relationship(
'TrackGroup',
lazy=True,
backref=db.backref(
'tracks',
order_by=position,
lazy=True,
passive_deletes=True
)
)
# relationship backrefs:
# - abstract_reviews (AbstractReview.track)
# - abstracts_accepted (Abstract.accepted_track)
# - abstracts_reviewed (Abstract.reviewed_for_tracks)
# - abstracts_submitted (Abstract.submitted_for_tracks)
# - contributions (Contribution.track)
# - proposed_abstract_reviews (AbstractReview.proposed_tracks)
@property
def short_title(self):
return self.code if self.code else self.title
@property
def full_title(self):
return f'{self.code} - {self.title}' if self.code else self.title
@property
def title_with_group(self):
return f'{self.track_group.title}: {self.title}' if self.track_group else self.title
@property
def short_title_with_group(self):
return f'{self.track_group.title}: {self.short_title}' if self.track_group else self.short_title
@property
def full_title_with_group(self):
return f'{self.track_group.title}: {self.full_title}' if self.track_group else self.full_title
@locator_property
def locator(self):
return dict(self.event.locator, track_id=self.id)
def __repr__(self):
return format_repr(self, 'id', _text=text_to_repr(self.title))
def can_delete(self, user):
return self.event.can_manage(user) and not self.abstracts_accepted
def can_review_abstracts(self, user):
if not user:
return False
elif not self.event.can_manage(user, permission='abstract_reviewer', explicit_permission=True):
return False
elif self.event.can_manage(user, permission='review_all_abstracts', explicit_permission=True):
return True
return self.can_manage(user, permission='review', explicit_permission=True)
def can_convene(self, user):
if not user:
return False
elif not self.event.can_manage(user, permission='track_convener', explicit_permission=True):
return False
elif self.event.can_manage(user, permission='convene_all_abstracts', explicit_permission=True):
return True
return self.can_manage(user, permission='convene', explicit_permission=True)
|
{
"content_hash": "9443853732095f39550e68c05dd6cb93",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 104,
"avg_line_length": 31.93548387096774,
"alnum_prop": 0.6191919191919192,
"repo_name": "pferreir/indico",
"id": "49c5ae05d1aa59fc6801919269920b68c2aa929d",
"size": "5164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/tracks/models/tracks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""
Flex compatibility tests.
@author: U{Nick Joyce<mailto:nick@boxdesign.co.uk>}
@since: 0.1.0
"""
import unittest
import pyamf
from pyamf import flex, util, amf3, amf0
class ArrayCollectionTestCase(unittest.TestCase):
def test_create(self):
ac = flex.ArrayCollection([1, 2, 3])
self.assertEquals(ac, {0: 1, 1: 2, 2: 3})
ac = flex.ArrayCollection(('a', 'b', 'b'))
self.assertEquals(ac, {0: 'a', 1: 'b', 2: 'b'})
ac = flex.ArrayCollection({'first': 'Matt', 'last': 'Matthews'})
self.assertEquals(ac, {'first': 'Matt', 'last': 'Matthews'})
def test_encode(self):
stream = util.BufferedByteStream()
encoder = amf3.Encoder(stream)
x = flex.ArrayCollection()
x['spam'] = 'eggs'
encoder.writeElement(x)
self.assertEquals(stream.getvalue(),
'\n\x07Cflex.messaging.io.ArrayCollection'
'\t\x01\tspam\x06\teggs\x01')
stream = util.BufferedByteStream()
encoder = amf0.Encoder(stream)
x = flex.ArrayCollection()
x['spam'] = 'eggs'
encoder.writeElement(x)
self.assertEquals(stream.getvalue(),
'\x11\n\x07Cflex.messaging.io.ArrayCollection\t\x01\tspam\x06\x09'
'eggs\x01')
def test_decode(self):
stream = util.BufferedByteStream(
'\n\x07Cflex.messaging.io.ArrayCollection'
'\t\x01\x09spam\x06\x09eggs\x01')
decoder = amf3.Decoder(stream)
x = decoder.readElement()
self.assertEquals(x.__class__, flex.ArrayCollection)
self.assertEquals(x.keys(), ['spam'])
self.assertEquals(x.items(), [('spam', u'eggs')])
class ObjectProxyTestCase(unittest.TestCase):
def test_encode(self):
stream = util.BufferedByteStream()
encoder = amf3.Encoder(stream)
x = flex.ObjectProxy(pyamf.MixedArray(a='spam', b=5))
encoder.writeElement(x)
self.assertEquals(stream.getvalue(),
'\x0a\x07;flex.messaging.io.ObjectProxy\x09\x01\x03a\x06\x09spam'
'\x03b\x04\x05\x01')
def test_decode(self):
stream = util.BufferedByteStream(
'\x0a\x07;flex.messaging.io.ObjectProxy\x09\x01\x03a\x06\x09spam'
'\x03b\x04\x05\x01')
decoder = amf3.Decoder(stream)
x = decoder.readElement()
self.assertEquals(x.__class__, flex.ObjectProxy)
self.assertEquals(x._amf_object, {'a': 'spam', 'b': 5})
def test_get_attrs(self):
x = flex.ObjectProxy()
self.assertEquals(x._amf_object, pyamf.ASObject())
x._amf_object = None
self.assertEquals(x._amf_object, None)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ArrayCollectionTestCase))
suite.addTest(unittest.makeSuite(ObjectProxyTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "65f58cbeb62bcbb0d3ff55f1901abd92",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 78,
"avg_line_length": 27.885714285714286,
"alnum_prop": 0.608948087431694,
"repo_name": "jamesward-demo/air-quick-fix",
"id": "74e2a075429691705909eaafe2c9aa8ae2c7db56",
"size": "3027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIRQuickFixServer/pyamf/tests/test_flex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "573"
},
{
"name": "Python",
"bytes": "474510"
}
],
"symlink_target": ""
}
|
import unittest
from fysom import Fysom
class FysomSameDstTransitionTests(unittest.TestCase):
def test_if_src_not_specified_then_is_wildcard(self):
fsm = Fysom({
'initial': 'hungry',
'events': [
{'name': 'eat', 'src': 'hungry', 'dst': 'satisfied'},
{'name': 'eat', 'src': 'satisfied', 'dst': 'full'},
{'name': 'eat', 'src': 'full', 'dst': 'sick'},
{'name': 'eat', 'src': 'sick', 'dst': '='},
{'name': 'rest', 'src': '*', 'dst': 'hungry'},
{'name': 'walk', 'src': '*', 'dst': '='},
{'name': 'run', 'src': ['hungry', 'sick'], 'dst': '='},
{'name': 'run', 'src': 'satisfied', 'dst': 'hungry'},
{'name': 'run', 'src': 'full', 'dst': 'satisfied'}
]
})
fsm.walk()
self.assertEqual(fsm.current, 'hungry')
fsm.run()
self.assertEqual(fsm.current, 'hungry')
fsm.eat()
self.assertEqual(fsm.current, 'satisfied')
fsm.walk()
self.assertEqual(fsm.current, 'satisfied')
fsm.run()
self.assertEqual(fsm.current, 'hungry')
fsm.eat()
self.assertEqual(fsm.current, 'satisfied')
fsm.eat()
self.assertEqual(fsm.current, 'full')
fsm.walk()
self.assertEqual(fsm.current, 'full')
fsm.eat()
self.assertEqual(fsm.current, 'sick')
fsm.walk()
self.assertEqual(fsm.current, 'sick')
fsm.run()
self.assertEqual(fsm.current, 'sick')
fsm.rest()
self.assertEqual(fsm.current, 'hungry')
fsm.eat()
self.assertEqual(fsm.current, 'satisfied')
fsm.run()
self.assertEqual(fsm.current, 'hungry')
|
{
"content_hash": "029b5a25b2e1225a5744932a525ea3d5",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 71,
"avg_line_length": 37.083333333333336,
"alnum_prop": 0.49382022471910114,
"repo_name": "mriehl/fysom",
"id": "d9a4e68405a90b708fd79158ec9f3af9cf0e92e8",
"size": "3180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/unittest/python/test_same_dst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84485"
}
],
"symlink_target": ""
}
|
"""
Contains some base primitives that provide basic flow logic for the messages.
Based on a stripped down version of base.py in ActivityWatch.
"""
from abc import abstractmethod, ABCMeta
import threading
from queue import Queue, Empty
from . import Event
from typing import Optional
class Agent(threading.Thread):
"""
Base class for all agents.
"""
__metaclass__ = ABCMeta
nrAgents = 1 # Monotonically increasing count of agents created
def __init__(self, event_timeout=0.2):
self.id = self.nrAgents
self.nrAgents += 1 # statically increase the number of agents created
threading.Thread.__init__(self, name=self.identifier, daemon=True)
self._mailbox = Queue()
self._enabled = False
self._wait_for_event_timeout = event_timeout
self._processed_events = 0
# TODO: Rename "identifier" to something less like the class attribute id
@property
def identifier(self) -> str:
"""Identifier for agent, used in settings and as a module name shorter than the class name"""
return "{}[{}]".format(self.__class__.__name__, self.id)
@property
def running(self) -> bool:
"""Returns true if the agent is actually running, otherwise false"""
return self.is_alive()
@property
def enabled(self) -> bool:
"""Returns true if the agent is supposed to be running, otherwise false"""
return self._enabled
@property
def queue_size(self) -> int:
"""Doesn't guarrantee the exact size of the queue, see Queue.qsize() docs for more info"""
return self._mailbox.qsize()
@property
def processed_events(self) -> int:
"""Returns how many events have been removed from the queue"""
return self._processed_events
def start(self, *args, **kwargs):
self._enabled = True
threading.Thread.start(self, *args, **kwargs)
def run(self):
try:
while self.enabled:
try:
event = self.next_event(timeout=self._wait_for_event_timeout)
self.handle_event(event)
except Empty:
pass
finally:
self.cleanup()
def stop(self):
"""
Stops the agent.
Not guaranteed to stop since it relies on the implementation of the specific agent.
"""
self._enabled = False
# TODO: Decide on the final name for this method: "post", "post_event", "put_event" or something else?
def put_event(self, event: Event):
self._mailbox.put(event)
def next_event(self, timeout: Optional[float] = None) -> Optional[Event]:
"""
Retrieves the next event in the queue.
Will block until event is available, unless timeout is set in which
case it will wait a maximum amount of time and then return None.
"""
if timeout is None:
event = self._mailbox.get()
else:
event = self._mailbox.get(True, timeout=timeout)
if event is not None:
self._processed_events += 1
return event
# TODO: make @abstractmethod without breaking everything
def handle_event(self, event):
raise NotImplementedError
# TODO: make @abstractmethod without breaking everything
def cleanup(self):
"""
Performs final cleanup. Called when an agent has been stopped or encounters an unhandled exception.
Can be overridden to perform cleanup tasks, just don't forget to call the superclasses cleanup method first!
"""
pass
def to_json_dict(self):
"""Dumps information about the agent in a JSON-serializable format"""
obj = {"name": self.identifier, "id": self.id,
"status": self.running, "enabled": self.enabled,
"queue_size": self.queue_size, "processed_events": self.processed_events}
return obj
class PausableAgent(Agent):
"""
A simple attempt at a pausable agent.
A pausable agent can simply pause it's event processing and resume it
at a later point in time without the need to instantiate a new thread.
"""
# WARNING: NOT TESTED! PROBABLY NOT WORKING!
def __init__(self, **kwargs):
Agent.__init__(**kwargs)
self._not_paused_flag = threading.Event()
self._not_paused_flag.set()
raise NotImplementedError
def run(self):
try:
while self.running:
# If paused, wait for pause to end
self._not_paused_flag.wait()
try:
event = self.next_event(timeout=self.timeout)
self.handle_event(event)
except Empty:
pass
finally:
self.cleanup()
@property
def is_paused(self):
return not self._not_paused_flag.is_set()
def pause(self):
self._not_paused_flag.clear()
def unpause(self):
self._not_paused_flag.set()
|
{
"content_hash": "6ca1e34ef69de9bc601b0c0960c73468",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 116,
"avg_line_length": 31.841772151898734,
"alnum_prop": 0.6060425362750944,
"repo_name": "Homebrain/Homebrain",
"id": "527b5a34ff734738d5bdb6f2320c01727ced6cb5",
"size": "5031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homebrain/core/agents.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "656"
},
{
"name": "HTML",
"bytes": "10116"
},
{
"name": "JavaScript",
"bytes": "6026"
},
{
"name": "Python",
"bytes": "54990"
},
{
"name": "Shell",
"bytes": "732"
}
],
"symlink_target": ""
}
|
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module has been tested with Twisted versions 11.0.0 and newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.interfaces import \
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
from twisted.python import failure, log
from twisted.internet import error
import twisted.names.cache
import twisted.names.client
import twisted.names.hosts
import twisted.names.resolve
from zope.interface import implementer
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
# Since this class is intended to be used in applications
# where the top-level event loop is ``io_loop.start()`` rather
# than ``reactor.run()``, it is implemented a little
# differently than other Twisted reactors. We override
# ``mainLoop`` instead of ``doIteration`` and must implement
# timed call functionality on top of `.IOLoop.add_timeout`
# rather than using the implementation in
# ``PosixReactorBase``.
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
before most other twisted-related imports. Conversely, because it
initializes the `.IOLoop`, it cannot be called before
`.fork_processes` or multi-process `~.TCPServer.start`. These
conflicting requirements make it difficult to use `.TornadoReactor`
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
the Twisted reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
Uses the global Twisted reactor by default. To create multiple
``TwistedIOLoops`` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
resolved.raiseException()
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred)
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f
|
{
"content_hash": "1fc4beae2efcada1607c1efd9985015b",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 80,
"avg_line_length": 36.770577933450085,
"alnum_prop": 0.6072108973137741,
"repo_name": "ColorFuzzy/tornado",
"id": "d3a4e75d1c95bd8e8df8fc2a08babfcb99759734",
"size": "21586",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "tornado/platform/twisted.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1078"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12417"
},
{
"name": "JavaScript",
"bytes": "6073"
},
{
"name": "Python",
"bytes": "1505229"
},
{
"name": "Ruby",
"bytes": "1733"
},
{
"name": "Shell",
"bytes": "4881"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from functools import wraps
import os
db_session = None
Model = declarative_base()
def entry_point(func):
@wraps(func)
def internal(*args, **kwargs):
result = None
cache_db_path = os.path.join(args[0].cache_path, "cache.db")
engine = create_engine("sqlite:///" + cache_db_path,
convert_unicode=True)
global db_session
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
global Model
Model.query = db_session.query_property()
from . import models
Model.metadata.create_all(bind=engine)
try:
result = func(*args, **kwargs)
finally:
db_session.remove()
return result
return internal
|
{
"content_hash": "fd32bce2763324f9443c5bc170d42460",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 31.303030303030305,
"alnum_prop": 0.5769603097773476,
"repo_name": "aspyatkin/assetoolz",
"id": "8dc6195911eeb891109c836ebeb5a3bf86c7ccee",
"size": "1033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assetoolz/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44126"
}
],
"symlink_target": ""
}
|
"""Twitch.tv uses OAuth2 for authorization.
We use the Implicit Grant Workflow.
The user has to visit an authorization site, login, authorize
PyTwitcher. Once he allows PyTwitcher, twitch will redirect him to
:data:`pytwitcherapi.REDIRECT_URI`.
In the url fragment, there is the access token.
This module features a server, that will respond to the redirection of
the user. So if twitch is redirecting to :data:`pytwitcherapi.REDIRECT_URI`,
the server is gonna send a website, which will extract the access token,
send it as a post request and give the user a response,
that everything worked.
"""
import logging
import os
import sys
import oauthlib.oauth2
import pkg_resources
from pytwitcherapi import constants
if sys.version_info[0] == 3:
from http import server
else:
import BaseHTTPServer as server
log = logging.getLogger(__name__)
class RedirectHandler(server.BaseHTTPRequestHandler):
"""This request handler will handle the redirection of the user
when he grants authorization to PyTwitcher and twitch redirects him.
"""
extract_site_url = '/'
success_site_url = '/success'
def _set_headers(self):
"""Set the response and headers
:returns: None
:raises: None
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self, ):
"""Handle GET requests
If the path is '/', a site which extracts the token will be generated.
This will redirect the user to the '/sucess' page, which shows
a success message.
:returns: None
:rtype: None
:raises: None
"""
urld = {self.extract_site_url: 'extract_token_site.html',
self.success_site_url: 'success_site.html'}
site = urld.get(self.path)
if not site:
log.debug("Requesting false url on login server.")
self.send_error(404)
return
log.debug('Requesting the login server. Responding with %s.', urld)
self._set_headers()
self._write_html(site)
def _write_html(self, filename):
"""Read the html site with the given filename
from the data directory and write it to :data:`RedirectHandler.wfile`.
:param filename: the filename to read
:type filename: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
datapath = os.path.join('html', filename)
sitepath = pkg_resources.resource_filename('pytwitcherapi', datapath)
with open(sitepath, 'r') as f:
html = f.read()
self.wfile.write(html.encode('utf-8'))
def do_POST(self, ):
"""Handle POST requests
When the user is redirected, this handler will respond with a website
which will send a post request with the url fragment as parameters.
This will get the parameters and store the original redirection
url and fragments in :data:`LoginServer.tokenurl`.
:returns: None
:rtype: None
:raises: None
"""
log.debug('POST')
self._set_headers()
# convert the parameters back to the original fragment
# because we need to send the original uri to set_token
# url fragments will not show up in self.path though.
# thats why we make the hassle to send it as a post request.
# Note: oauth does not allow for http connections
# but twitch does, so we fake it
ruri = constants.REDIRECT_URI.replace('http://', 'https://')
self.server.set_token(ruri + self.path.replace('?', '#'))
class LoginServer(server.HTTPServer):
"""This server responds to the redirection of the user
after he granted authorization.
"""
def __init__(self, session):
"""Initialize a new server.
The server will be on :data:`constants.LOGIN_SERVER_ADRESS`.
:param session: the session that needs a token
:type session: :class:`requests_oauthlib.OAuth2Session`
:raises: None
"""
server.HTTPServer.__init__(self,
constants.LOGIN_SERVER_ADRESS,
RedirectHandler)
self.session = session
"""The session that needs a token"""
def set_token(self, redirecturl):
"""Set the token on the session
:param redirecturl: the original full redirect url
:type redirecturl: :class:`str`
:returns: None
:rtype: None
:raises: None
"""
log.debug('Setting the token on %s.' % self.session)
self.session.token_from_fragment(redirecturl)
class TwitchOAuthClient(oauthlib.oauth2.MobileApplicationClient):
"""This is a client needed for :class:`oauthlib.oauth2.OAuth2Session`.
It fixes the Authorization header for twitch.
Usually the Authorization Header looks like this::
{'Authorization': 'Bearer <<token>>'}
But Twitch needs it to be like this::
{'Authorization': 'OAuth <<token>>'}
So we override :meth:`TwitchOAuthClient._add_bearer_token` to fix the header.
"""
def _add_bearer_token(self, *args, **kwargs):
"""Add a bearer token to the request uri, body or authorization header.
This is overwritten to change the headers slightly.
"""
s = super(TwitchOAuthClient, self)
uri, headers, body = s._add_bearer_token(*args, **kwargs)
authheader = headers.get('Authorization')
if authheader:
headers['Authorization'] = authheader.replace('Bearer', 'OAuth')
return uri, headers, body
|
{
"content_hash": "e300572e47d224599d55c9ab284b9ae5",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 81,
"avg_line_length": 33.32941176470588,
"alnum_prop": 0.6343099188139781,
"repo_name": "Pytwitcher/pytwitcherapi",
"id": "cfd53d8f838531fa8a3ff25acb5973746c4469c2",
"size": "5666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pytwitcherapi/oauth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "315"
},
{
"name": "Makefile",
"bytes": "1318"
},
{
"name": "Python",
"bytes": "146876"
}
],
"symlink_target": ""
}
|
from . import axis
from nibabel import cifti2
import numpy as np
def get_axes(file):
"""
Extracts the axes form a CIFTI file header
Parameters
----------
file : str
filename or an already opened Cifti2Image
Returns
-------
tuple of N Axes describing the rows/columns along each of the dimensions
The type of the axes will be determined by the information in the CIFTI file itself,
not the extension of the CIFTI file
"""
if isinstance(file, cifti2.Cifti2Image):
img = file
else:
img = cifti2.Cifti2Image.from_filename(file)
dim = 0
shape = img.shape
only_size_1_dimensions = True
axes = []
while dim != len(shape):
axes.append(axis.from_mapping(img.header.matrix.get_index_map(dim)))
if len(axes[-1]) != 1:
only_size_1_dimensions = False
if len(axes[-1]) != shape[dim]:
if only_size_1_dimensions:
shape = (1, ) + shape
else:
raise ValueError("CIFTI header expects %i elements along dimension %i, but %i were found" %
(len(axes[-1]), dim, shape[dim]))
dim += 1
try:
img.header.matrix.get_index_map(dim)
raise ValueError("CIFTI header contains definition for dimension %i, but array is only %i-dimensional" %
(dim, dim))
except cifti2.Cifti2HeaderError:
pass
return tuple(axes)
def read(file):
"""
Loads a CIFTI file
Parameters
----------
file : str
filename or an already opened file
Returns
-------
- memory-mapped N-dimensional array with the actual data
- tuple of N Axes describing the rows/columns along each of the dimensions
The type of the axes will be determined by the information in the CIFTI file itself,
not the extension of the CIFTI file
"""
if isinstance(file, cifti2.Cifti2Image):
img = file
else:
img = cifti2.Cifti2Image.from_filename(file)
axes = get_axes(img)
arr = img.get_data()
while arr.ndim < len(axes):
arr = arr[None, ...]
return arr, axes
def write(filename, arr, axes):
"""
Saves a CIFTI file
Parameters
----------
filename : str
name of output CIFTI file
arr : array
data to be stored as vector or matrix
axes : list[axis.Axis]
axis explaining each of the dimensions in the arr
"""
arr = np.asarray(arr)
if len(axes) != arr.ndim:
raise ValueError("Number of defined CIFTI axes (%i) does not match dimensionality of array (%i)" %
(len(axes), arr.ndim))
for dim, ax, len_arr in zip(range(arr.ndim), axes, arr.shape):
if len(ax) != len_arr:
raise ValueError("Size of CIFTI axes (%i) does not match array size (%i) for dimension %i" %
(len(ax), len_arr, dim))
img = cifti2.Cifti2Image(arr, axis.to_header(axes))
img.to_filename(filename)
|
{
"content_hash": "5669d46cc5f84e238defc846e3cba62d",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 112,
"avg_line_length": 30,
"alnum_prop": 0.5887788778877888,
"repo_name": "MichielCottaar/cifti",
"id": "887fc9f1dfbedeeb210e7077b819efdd6bb331f6",
"size": "3030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cifti/io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59680"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
}
|
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import ( # noqa: F401
SplineExactKnotsFitter,
SplineInterpolateFitter,
SplineSmoothingFitter,
SplineSplrepFitter,
)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = [
"LinearLSQFitter",
"LevMarLSQFitter",
"TRFLSQFitter",
"DogBoxLSQFitter",
"LMLSQFitter",
"FittingWithOutlierRemoval",
"SLSQPLSQFitter",
"SimplexLSQFitter",
"JointFitter",
"Fitter",
"ModelLinearityError",
"ModelsError",
]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value
"""
class Covariance:
"""Class for covariance matrix calculated by fitter."""
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = "parameter variances / covariances \n"
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += fstring.replace(" " * len(param), param, 1).format(
repr(np.round(row[: i + 1], round_val))[7:-2]
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError("Covariance must be indexed by two values.")
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(
params[1]
)
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError(
"Covariance can be indexed by two parameter names or integer indices."
)
return self.cov_matrix[i1][i2]
class StandardDeviations:
"""Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = "standard deviations\n"
for i, std in enumerate(self.stds):
if i <= max_lines - 1:
param = self.param_names[i]
ret_str += (
f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n"
)
else:
ret_str += "..."
return ret_str.rstrip()
def __repr__(self):
return self.pprint(max_lines=10, round_val=3)
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError(
"Standard deviation can be indexed by parameter name or integer."
)
return self.stds[i]
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
"""Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith("_"):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop("equivalencies", None)
data_has_units = (
isinstance(x, Quantity)
or isinstance(y, Quantity)
or isinstance(z, Quantity)
)
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies
)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(
model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]],
)
if isinstance(y, Quantity) and z is not None:
y = y.to(
model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]],
)
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data["z"] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data["_left_kwargs"] = model[1]
rename_data["_right_kwargs"] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError(
"This model does not support being fit to data with units."
)
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ["fixed"]
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {
"residuals": None,
"rank": None,
"singular_values": None,
"params": None,
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, "mask")
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return model
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1 / (xx.count() - n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append(
(1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2)
)
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn(
"Calculation of fitting uncertainties "
"for 2D models with masked values not "
"currently supported.\n",
AstropyUserWarning,
)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1 / (len(xx) - n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append(
[(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)]
)
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, "domain") and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, "window") and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, "x_domain") and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, "y_domain") and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, "x_window") and model.x_window is None:
model.x_window = [-1.0, 1.0]
if hasattr(model, "y_window") and model.y_window is None:
model.y_window = [-1.0, 1.0]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError(
"Model is not linear in parameters, "
"linear fit methods should not be used."
)
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(
x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis
)
has_fixed = any(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [
idx
for idx in range(len(model_copy.param_names))
if idx not in fitparam_indices
]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray(
[
getattr(model_copy, model_copy.param_names[idx]).value
for idx in fixparam_indices
]
)
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x,
weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "domain"):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x
)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x,
y,
weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis,
)
# map domain into window
if hasattr(model_copy, "x_domain"):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = np.asarray(
self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y)
)
fixderivs = self._deriv_with_constraints(
model_copy, fixparam_indices, x=x, y=y
)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(
f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y"
)
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError(
"Found NaNs in the coefficient matrix, which "
"should not happen and would crash the lapack "
"routine. Maybe check that weights are not null."
)
a = None # need for calculating covarience
if (masked and len(model_copy) > 1) or (
weights is not None and weights.ndim > 1
):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(
model_lhs, model_rhs, rcond
)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond)
self.fit_info["residuals"] = resids
self.fit_info["rank"] = rank
self.fit_info["singular_values"] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info["params"] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (
hasattr(model_copy, "_order")
and len(model_copy) == 1
and not has_fixed
and rank != model_copy._order
):
warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(
model_copy, a * scl, len(lacoef), x, y, z, resids
)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {"niter": None}
def __str__(self):
return (
f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}"
)
def __repr__(self):
return (
f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})"
)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (
not hasattr(self.fitter, "supports_masked_input")
or self.fitter.supports_masked_input is not True
):
raise ValueError(
f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values"
)
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x,)
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if "axis" not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs["axis"] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop("axis", None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True,
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(
data_T, mask_T, model_vals_T
):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn(
"outlier_func did not accept axis argument; "
"reverted to slow loop over models.",
AstropyUserWarning,
)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(
fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights,
**kwargs,
)
else:
fitted_model = self.fitter(
fitted_model,
*coords,
filtered_data,
weights=filtered_weights,
**kwargs,
)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {"niter": n}
self.fit_info.update(getattr(self.fitter, "fit_info", {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ["fixed", "tied", "bounds"]
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2:-1]) - meas)
else:
value = np.ravel(weights * (model(*args[2:-1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError(
"Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before "
"fitting to avoid this error."
)
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [
x
for x in model.fixed
if (model.fixed[x] is False) and (model.tied[x] is False)
]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array(
[np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)]
)
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars], True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
fit_deriv = np.array(model.fit_deriv(x, *params))
try:
output = np.array(
[np.ravel(_) for _ in np.array(weights) * fit_deriv]
)
if output.shape != fit_deriv.shape:
output = np.array(
[np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv]
)
return output
except ValueError:
return np.array(
[
np.ravel(_)
for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0)
]
).transpose()
else:
if not model.col_fit_deriv:
return [
np.ravel(_)
for _ in (
np.ravel(weights)
* np.array(model.fit_deriv(x, y, *params)).T
).T
]
return [
np.ravel(_)
for _ in weights * np.array(model.fit_deriv(x, y, *params))
]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2)
dof = len(y) - len(init_values)
self.fit_info["param_cov"] = cov_x * sum_sqrs / dof
else:
self.fit_info["param_cov"] = None
if self._calc_uncertainties is True:
if self.fit_info["param_cov"] is not None:
self._add_fitting_uncertainties(model, self.fit_info["param_cov"])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], z[mask]
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
maxiter=DEFAULT_MAXITER,
acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS,
estimate_jacobian=False,
filter_non_finite=False,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z = self._filter_non_finite(x, y, z)
farg = (
model_copy,
weights,
) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(
model_copy, farg, maxiter, acc, epsilon, estimate_jacobian
)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {
"nfev": None,
"fvec": None,
"fjac": None,
"ipvt": None,
"qtf": None,
"message": None,
"ierr": None,
"param_jac": None,
"param_cov": None,
}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function,
init_values,
args=farg,
Dfun=dfunc,
col_deriv=model.col_fit_deriv,
maxfev=maxiter,
epsfcn=epsilon,
xtol=acc,
full_output=True,
)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info["cov_x"] = cov_x
self.fit_info["message"] = mess
self.fit_info["ierr"] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = "2-point"
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(
self._wrap_deriv(params, model, weights, x, y, z)
)
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function,
init_values,
args=farg,
jac=dfunc,
max_nfev=maxiter,
diff_step=np.sqrt(epsilon),
xtol=acc,
method=self._method,
bounds=bounds,
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn(
f"The fit may be unsuccessful; check: \n {self.fit_info.message}",
AstropyUserWarning,
)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("trf", calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__("dogbox", calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__("lm", calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]["slice"]
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[: model.n_inputs + 1]
del lstsqargs[: model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError(
"At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given"
)
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(
f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected"
)
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(
f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided"
)
self.fitparams[:], _ = optimize.leastsq(
self.objective_function, self.fitparams, args=args
)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]["slice"]
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :]
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]["slice"]
shape = param_metrics[name]["shape"]
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset : offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]["slice"]
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated("5.1", "private method: _fitter_to_model_params has been made public now")
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]["slice"]
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated("5.1", "private method: _model_to_fit_params has been made public now")
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = "Optimizer cannot handle {0} constraints."
if any(model.fixed.values()) and "fixed" not in supported_constraints:
raise UnsupportedConstraintError(message.format("fixed parameter"))
if any(model.tied.values()) and "tied" not in supported_constraints:
raise UnsupportedConstraintError(message.format("tied parameter"))
if (
any(tuple(b) != (None, None) for b in model.bounds.values())
and "bounds" not in supported_constraints
):
raise UnsupportedConstraintError(message.format("bound parameter"))
if model.eqcons and "eqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("equality"))
if model.ineqcons and "ineqcons" not in supported_constraints:
raise UnsupportedConstraintError(message.format("inequality"))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn(
"Model is linear in parameters; consider using linear fitting methods.",
AstropyUserWarning,
)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(
AstropyUserWarning(
f"{type(e).__name__} error occurred in entry point {name}."
)
)
else:
if not inspect.isclass(entry_point):
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to be a Class."
)
)
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(
AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"
)
)
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, "select"):
populate_entry_points(ep.select(group="astropy.modeling"))
else:
populate_entry_points(ep.get("astropy.modeling", []))
_populate_ep()
|
{
"content_hash": "2ed04d504a8c23cf538e599558d2c9c8",
"timestamp": "",
"source": "github",
"line_count": 2202,
"max_line_length": 122,
"avg_line_length": 37.426884650317895,
"alnum_prop": 0.5652923047055112,
"repo_name": "pllim/astropy",
"id": "484bd01bb9d33df0e540ab0beef9f167a98d6432",
"size": "82493",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/modeling/fitting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
import os, stat, time
import pygtk
pygtk.require('2.0')
import gtk
folderxpm = [
"17 16 7 1",
" c #000000",
". c #808000",
"X c yellow",
"o c #808080",
"O c #c0c0c0",
"+ c white",
"@ c None",
"@@@@@@@@@@@@@@@@@",
"@@@@@@@@@@@@@@@@@",
"@@+XXXX.@@@@@@@@@",
"@+OOOOOO.@@@@@@@@",
"@+OXOXOXOXOXOXO. ",
"@+XOXOXOXOXOXOX. ",
"@+OXOXOXOXOXOXO. ",
"@+XOXOXOXOXOXOX. ",
"@+OXOXOXOXOXOXO. ",
"@+XOXOXOXOXOXOX. ",
"@+OXOXOXOXOXOXO. ",
"@+XOXOXOXOXOXOX. ",
"@+OOOOOOOOOOOOO. ",
"@ ",
"@@@@@@@@@@@@@@@@@",
"@@@@@@@@@@@@@@@@@"
]
folderpb = gtk.gdk.pixbuf_new_from_xpm_data(folderxpm)
filexpm = [
"12 12 3 1",
" c #000000",
". c #ffff04",
"X c #b2c0dc",
"X XXX",
"X ...... XXX",
"X ...... X",
"X . ... X",
"X ........ X",
"X . .... X",
"X ........ X",
"X . .. X",
"X ........ X",
"X . .. X",
"X ........ X",
"X X"
]
filepb = gtk.gdk.pixbuf_new_from_xpm_data(filexpm)
class FileListingCellDataExample:
column_names = ['Name', 'Size', 'Mode', 'Last Changed']
def delete_event(self, widget, event, data=None):
gtk.main_quit()
return False
def __init__(self, dname = None):
cell_data_funcs = (None, self.file_size, self.file_mode,
self.file_last_changed)
# Create a new window
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_size_request(400, 300)
self.window.connect("delete_event", self.delete_event)
listmodel = self.make_list(dname)
# create the TreeView
self.treeview = gtk.TreeView()
# create the TreeViewColumns to display the data
self.tvcolumn = [None] * len(self.column_names)
cellpb = gtk.CellRendererPixbuf()
self.tvcolumn[0] = gtk.TreeViewColumn(self.column_names[0], cellpb)
self.tvcolumn[0].set_cell_data_func(cellpb, self.file_pixbuf)
cell = gtk.CellRendererText()
self.tvcolumn[0].pack_start(cell, False)
self.tvcolumn[0].set_cell_data_func(cell, self.file_name)
self.treeview.append_column(self.tvcolumn[0])
for n in range(1, len(self.column_names)):
cell = gtk.CellRendererText()
self.tvcolumn[n] = gtk.TreeViewColumn(self.column_names[n], cell)
if n == 1:
cell.set_property('xalign', 1.0)
self.tvcolumn[n].set_cell_data_func(cell, cell_data_funcs[n])
self.treeview.append_column(self.tvcolumn[n])
self.treeview.connect('row-activated', self.open_file)
self.scrolledwindow = gtk.ScrolledWindow()
self.scrolledwindow.add(self.treeview)
self.window.add(self.scrolledwindow)
self.treeview.set_model(listmodel)
self.window.show_all()
return
def make_list(self, dname=None):
if not dname:
self.dirname = os.path.expanduser('~')
else:
self.dirname = os.path.abspath(dname)
self.window.set_title(self.dirname)
files = [f for f in os.listdir(self.dirname) if f[0] <> '.']
files.sort()
files = ['..'] + files
listmodel = gtk.ListStore(object)
for f in files:
listmodel.append([f])
return listmodel
def open_file(self, treeview, path, column):
model = treeview.get_model()
iter = model.get_iter(path)
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
if stat.S_ISDIR(filestat.st_mode):
new_model = self.make_list(filename)
treeview.set_model(new_model)
return
def file_pixbuf(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
if stat.S_ISDIR(filestat.st_mode):
pb = folderpb
else:
pb = filepb
cell.set_property('pixbuf', pb)
return
def file_name(self, column, cell, model, iter):
cell.set_property('text', model.get_value(iter, 0))
return
def file_size(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
cell.set_property('text', filestat.st_size)
return
def file_mode(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
cell.set_property('text', oct(stat.S_IMODE(filestat.st_mode)))
return
def file_last_changed(self, column, cell, model, iter):
filename = os.path.join(self.dirname, model.get_value(iter, 0))
filestat = os.stat(filename)
cell.set_property('text', time.ctime(filestat.st_mtime))
return
def main():
gtk.main()
if __name__ == "__main__":
flcdexample = FileListingCellDataExample()
main()
|
{
"content_hash": "c2a47abdf5bf4878db7fa5cff0faaee9",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 77,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.5490118577075099,
"repo_name": "certik/pyjamas",
"id": "3f1cfe8468c336fe6baa1a9ca2739a9274ae070b",
"size": "5083",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pygtkweb/demos/045-filelisting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "401884"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "4074658"
},
{
"name": "Shell",
"bytes": "14552"
}
],
"symlink_target": ""
}
|
from models.game.bots.Bot import Bot
from models.game.Game import Game
from models.data.GameDataModel import GameDataModel
from models.data import DatabaseConnection as DB
class Experiment(object):
def __init__(self, player1, player2, iterations, record=True):
""" An Experiment is a sequence of several games between two bots. Results can be saved or discarded
:param player1: the Bot playing as 'X'
:param player2: the Bot playing as 'O'
:param iterations: the number of games to play for this experiment
:param record: boolean indicator - should the result of games be recorded or not?
"""
if not isinstance(player1, Bot) or not isinstance(player2, Bot):
raise Exception("Invalid Experiment: both players must be bots")
self.p1 = player1
self.p2 = player2
self.iterations = iterations
self.completed_iterations = 0
self.p1_wins = 0
self.p2_wins = 0
self.ties = 0
self.record_result = record
self.finished = False
def run(self, callback=None):
""" Runs the current experiment. The callback function will be called after each game is finished.
:param callback: a function to call at the termination of each game. The iteration number and winner will be passed as arguments
:return: None
"""
db_insertion_scripts = []
for i in list(range(0, self.iterations)):
game = Game(self.p1, self.p2)
game.finish_game()
self.completed_iterations += 1
winner = game.get_winner()
if winner == self.p1.number:
self.p1_wins += 1
elif winner == self.p2.number:
self.p2_wins += 1
else:
self.ties += 1
if self.record_result:
game_dm = GameDataModel(game)
db_insertion_scripts.append(game_dm.get_save_script())
if callback is not None:
callback(i+1, game.get_winner())
if self.record_result:
insertion_script = "\n".join(db_insertion_scripts)
DB.execute(insertion_script)
self.finished = True
def get_p1_win_rate(self):
if self.completed_iterations == 0:
return 0
return self.p1_wins / self.completed_iterations
def get_p2_win_rate(self):
if self.completed_iterations == 0:
return 0
return self.p2_wins / self.completed_iterations
def get_tie_rate(self):
if self.completed_iterations == 0:
return 0
return self.ties / self.completed_iterations
|
{
"content_hash": "093b1bbbff361ae44a6c0cfa7436168e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 137,
"avg_line_length": 34.8051948051948,
"alnum_prop": 0.6037313432835821,
"repo_name": "zachdj/ultimate-tic-tac-toe",
"id": "745ebd4d144ee82b00c7f5dab11bf339e44e7e19",
"size": "2680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/game/Experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143310"
}
],
"symlink_target": ""
}
|
"""
WSGI config for RenderProgressTracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RenderProgressTracker.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "e4a7c20fdc29ef5e6f5fa871ab7da749",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 81,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.7889688249400479,
"repo_name": "trailbehind/RenderProgressTracker",
"id": "d091234c728bef0efb214537dfc95f9fbd2e66e4",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RenderProgressTracker/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10161"
},
{
"name": "JavaScript",
"bytes": "223044"
},
{
"name": "Python",
"bytes": "12325"
}
],
"symlink_target": ""
}
|
from .array import DecimalArray, DecimalDtype, make_data, to_decimal
__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"]
|
{
"content_hash": "6b5d9941a3d5a92a1a15c896f72b3bd1",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 69,
"avg_line_length": 46.666666666666664,
"alnum_prop": 0.7142857142857143,
"repo_name": "jreback/pandas",
"id": "8194327f8812ea85f248964e1e60d6797ef1b936",
"size": "140",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/tests/extension/decimal/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4879"
},
{
"name": "C",
"bytes": "406353"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "14930989"
},
{
"name": "Shell",
"bytes": "29317"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import collections
import os
import shlex
import subprocess
import sys
import pytest
from setuptools.dist import Distribution
import setuptools_golang
@pytest.fixture(autouse=True, scope='session')
def enable_coverage_subprocesses():
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.environ['COVERAGE_PROCESS_START'] = os.path.join(here, 'setup.cfg')
def auto_namedtuple(**kwargs):
return collections.namedtuple('auto_namedtuple', kwargs.keys())(**kwargs)
def run(*cmd, **kwargs):
returncode = kwargs.pop('returncode', 0)
proc = subprocess.Popen(cmd, **kwargs)
out, err = proc.communicate()
out = out.decode('UTF-8').replace('\r', '') if out is not None else None
err = err.decode('UTF-8').replace('\r', '') if err is not None else None
if returncode is not None:
if proc.returncode != returncode:
raise AssertionError(
f'{cmd!r} returned {proc.returncode} (expected {returncode})\n'
f'out:\n{out}\nerr:\n{err}\n',
)
return auto_namedtuple(returncode=proc.returncode, out=out, err=err)
def run_output(*cmd, **kwargs):
return run(*cmd, stdout=subprocess.PIPE, **kwargs).out
def test_sets_cmdclass():
dist = Distribution()
assert not dist.cmdclass.get('build_ext')
setuptools_golang.set_build_ext(
dist, 'build_golang', {'root': 'github.com/asottile/fake'},
)
assert dist.cmdclass['build_ext']
@pytest.fixture(scope='session')
def venv(tmpdir_factory):
"""A shared virtualenv fixture, be careful not to install two of the same
package into this -- or sadness...
"""
bin = 'Scripts' if sys.platform == 'win32' else 'bin'
venv = tmpdir_factory.mktemp('venv').join('venv')
pip = venv.join(bin, 'pip').strpath
python = venv.join(bin, 'python').strpath
# Make sure this virtualenv has the same executable
run('virtualenv', venv.strpath, '-p', sys.executable)
# Install this so we can get coverage
run(pip, 'install', 'covdefaults>=1.2.0', 'coverage-enable-subprocess')
# Install us!
run(pip, 'install', '-e', '.')
yield auto_namedtuple(venv=venv, pip=pip, python=python)
SUM = 'import {0}; print({0}.sum(1, 2))'
@pytest.mark.parametrize(
('pkg', 'mod'),
(
(os.path.join('testing', 'sum'), 'sum'),
(os.path.join('testing', 'sum_pure_go'), 'sum_pure_go'),
(os.path.join('testing', 'sum_sub_package'), 'sum_sub_package.sum'),
),
)
def test_sum_integration(venv, pkg, mod):
run(venv.pip, 'install', '-v', pkg)
out = run_output(venv.python, '-c', SUM.format(mod))
assert out == '3\n'
HELLO_WORLD = 'import project_with_c; print(project_with_c.hello_world())'
def test_integration_project_with_c(venv):
test_sum_integration(
venv,
os.path.join('testing', 'project_with_c'), 'project_with_c_sum.sum',
)
out = run_output(venv.python, '-c', HELLO_WORLD)
assert out == 'hello world\n'
RED = 'import red; print(red.red(u"ohai"))'
def test_integration_imports_gh(venv):
run(venv.pip, 'install', os.path.join('testing', 'imports_gh'))
out = run_output(venv.python, '-c', RED)
assert out == '\x1b[0;31mohai\x1b[0m\n'
GOMOD = 'import gomodules; gomodules.reversemsg()'
def test_integration_gomodules(venv):
run(venv.pip, 'install', os.path.join('testing', 'gomodules'))
out = run_output(venv.python, '-c', GOMOD)
assert out == 'test example'
def test_integration_notfound(venv):
ret = run(
venv.pip, 'install', os.path.join('testing', 'notfound'),
returncode=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
assert ret.returncode != 0
assert (
'Error building extension `notfound`: notfound.go does not exist' in
ret.out
)
def test_integration_multidir(venv):
ret = run(
venv.pip, 'install', os.path.join('testing', 'multidir'),
returncode=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
assert ret.returncode != 0
assert (
'Error building extension `multidir`: '
'sources must be a single file in the `main` package.' in ret.out
)
OHAI = 'import hello_lib; print(hello_lib.ohai(u"Anthony"))'
def test_integration_internal_imports(venv):
run(venv.pip, 'install', os.path.join('testing', 'internal_imports'))
out = run_output(venv.python, '-c', OHAI)
assert out == 'ohai, Anthony\n'
def test_integration_user_gopath(venv, tmpdir):
testdir = os.path.join('testing', 'gomodules')
gopath = str(tmpdir.join('gopath'))
env = {**os.environ, 'SETUPTOOLS_GOLANG_GOPATH': gopath}
ret = run(
venv.pip, 'install', '-v', testdir,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
assert f'$ GOPATH={shlex.quote(gopath)} go get -d' in ret.out
def test_integration_defines(venv):
run(venv.pip, 'install', os.path.join('testing', 'defines'))
out = run_output(venv.python, '-c', SUM.format('sum'))
assert out == '3\n'
def test_regression_dangling_symlink(venv):
# this raises an error because of a dangling symlink
run(venv.pip, 'install', os.path.join('testing', 'dangling_symlink'))
|
{
"content_hash": "d6c6faad9754cfcd2831d5e702b6a3c2",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 30.270114942528735,
"alnum_prop": 0.6407822289728499,
"repo_name": "asottile/setuptools-golang",
"id": "c9c181910e5d6c0286951b8cfdeca8d8773d33d9",
"size": "5267",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/setuptools_golang_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4128"
},
{
"name": "Go",
"bytes": "5177"
},
{
"name": "Python",
"bytes": "18858"
}
],
"symlink_target": ""
}
|
from keystoneclient.auth.identity import v3
class OAuth2Method(v3.AuthMethod):
_method_parameters = ['access_token']
def __init__(self, **kwargs):
"""Construct an OAuth based authentication method.
:param string access_token: Access token id.
"""
super(OAuth2Method, self).__init__(**kwargs)
def get_auth_data(self, session, auth, headers, **kwargs):
# Build the data for our custom auth method. Check the OAuth2.0 keystone
# auth plugin for more info: https://www.github.com/ging/keystone
auth_data = {
'access_token_id':self.access_token
}
name = 'oauth2'
return name, auth_data
class OAuth2(v3.AuthConstructor):
_auth_method_class = OAuth2Method
|
{
"content_hash": "bc34e4cd6ce82b01a1efd886a07d4673",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 29.423076923076923,
"alnum_prop": 0.6352941176470588,
"repo_name": "ging/python-keystoneclient",
"id": "bccedfa0f14ca202c0631cc6a0c496513f868ee0",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/v3/contrib/oauth2/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1480821"
},
{
"name": "Shell",
"bytes": "7148"
}
],
"symlink_target": ""
}
|
"""
Problem 89
18 February 2005
For a number written in Roman numerals to be considered valid there are basic rules which must be followed.
Even though the rules allow some numbers to be expressed in more than one way there is always a "best" way
of writing a particular number.
For example, it would appear that there are at least six ways of writing the number sixteen:
IIIIIIIIIIIIIIII
VIIIIIIIIIII
VVIIIIII
XIIIIII
VVVI
XVI
However, according to the rules only XIIIIII and XVI are valid, and the last example is considered to be the most
efficient, as it uses the least number of numerals.
The 11K text file, roman.txt (right click and 'Save Link/Target As...'), contains one thousand numbers written in
valid, but not necessarily minimal, Roman numerals; see About... Roman Numerals for the definitive rules for
this problem.
Find the number of characters saved by writing each of these in their minimal form.
Note: You can assume that all the Roman numerals in the file contain no more than four consecutive identical units.
----------------------------------------------------------
Created on 10.02.2015
@author: ahallmann
"""
import unittest
import timeit
mapping = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
order = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
def read_numerals(filename):
f = open(filename, 'r')
numerals = []
for line in f.readlines():
numerals.append(line.strip())
f.close()
return numerals
def format_roman_numeral(n):
global mapping, order
sub_numerals = {'I': 10, 'X': 100, 'C': 1000}
s = ''
number_index = 0
while number_index < len(order):
numeral_char = order[number_index]
numeral_value = mapping[numeral_char]
for sub_char in sub_numerals:
sub_value = mapping[sub_char]
if 1 <= numeral_value - sub_value <= n < numeral_value <= sub_numerals[sub_char]:
s += sub_char
n += sub_value
if n >= numeral_value:
s += numeral_char * (n / numeral_value)
n %= numeral_value
if n <= 0:
break
number_index -= 1
number_index += 1
return s
def format_roman_numeral_old(number):
s = ''
if 900 <= number < 1000:
s += 'C'
number += 100
while number >= 1000:
s += 'M'
number -= 1000
if 900 <= number < 1000:
s += 'C'
number += 100
if 400 <= number < 500:
s += 'C'
number += 100
if number >= 500:
s += 'D'
number -= 500
if 90 <= number < 100:
s += 'X'
number += 10
while number >= 100:
s += 'C'
number -= 100
if 90 <= number < 100:
s += 'X'
number += 10
if 40 <= number < 50:
s += 'X'
number += 10
if number >= 50:
s += 'L'
number -= 50
if number == 9:
s += 'I'
number += 1
while number >= 10:
s += 'X'
number -= 10
if number == 9:
s += 'I'
number += 1
if number == 4:
s += 'I'
number += 1
if number >= 5:
s += 'V'
number -= 5
while number >= 1:
s += 'I'
number -= 1
return s
def parse_roman_numeral(numeral):
global mapping, order
last_order = -1
s = 0
for c in numeral:
if c not in mapping:
raise ValueError("unknown char: " + c)
current_order = order.index(c)
if current_order < last_order:
s -= mapping[order[last_order]] * 2
last_order = current_order
s += mapping[c]
return s
def optimize_roman_numeral(numeral):
return format_roman_numeral(parse_roman_numeral(numeral))
def solve():
numerals = read_numerals("data/problem089.txt")
savings = 0
for numeral in numerals:
o = optimize_roman_numeral(numeral)
savings += len(numeral) - len(o)
return savings
class Test(unittest.TestCase):
def test_samples(self):
test_cases = {
'I': 1,
'II': 2,
'IV': 4,
'XVI': 16,
'XIX': 19,
'XLIX': 49,
'XL': 40,
'XC': 90,
'XCV': 95,
'CD': 400,
'CM': 900,
'MMMMDXCV': 4595,
'MMMMCMXCIX': 4999
}
for c in test_cases:
self.assertEqual(c, format_roman_numeral(test_cases[c]))
def test_parse_format_compatibility(self):
for i in range(5000):
self.assertEqual(i, parse_roman_numeral(format_roman_numeral(i)))
def test_old_new_compatibility(self):
for i in range(5000):
self.assertEqual(format_roman_numeral_old(i), format_roman_numeral(i))
def test_answer(self):
self.assertEqual(743, solve())
pass
# -----------------------------------------
def run():
return solve()
if __name__ == '__main__':
unittest.main()
# if __name__ == '__main__':
# t = timeit.Timer("run()", "from __main__ import run")
# count = 1
# print(str(t.timeit(count)) + " seconds for " + str(count) + " runs")
|
{
"content_hash": "754eb0c8f5b6d288aeda43730c46c00c",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 115,
"avg_line_length": 25.38425925925926,
"alnum_prop": 0.507021703447018,
"repo_name": "arturh85/projecteuler",
"id": "641cc23a9776c77e519d71ca64af5d95a4f79c1c",
"size": "5499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/problem089.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "180515"
}
],
"symlink_target": ""
}
|
"""Support for MyQ-Enabled Garage Doors."""
from typing import Any
from pymyq.const import DEVICE_TYPE_GATE as MYQ_DEVICE_TYPE_GATE
from pymyq.errors import MyQError
from homeassistant.components.cover import (
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import MyQEntity
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY, MYQ_TO_HASS
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
async_add_entities(
[MyQCover(coordinator, device) for device in myq.covers.values()]
)
class MyQCover(MyQEntity, CoverEntity):
"""Representation of a MyQ cover."""
_attr_supported_features = CoverEntityFeature.OPEN | CoverEntityFeature.CLOSE
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
super().__init__(coordinator, device)
self._device = device
if device.device_type == MYQ_DEVICE_TYPE_GATE:
self._attr_device_class = CoverDeviceClass.GATE
else:
self._attr_device_class = CoverDeviceClass.GARAGE
self._attr_unique_id = device.device_id
@property
def is_closed(self) -> bool:
"""Return true if cover is closed, else False."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSED
@property
def is_closing(self) -> bool:
"""Return if the cover is closing or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSING
@property
def is_open(self) -> bool:
"""Return if the cover is opening or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_OPEN
@property
def is_opening(self) -> bool:
"""Return if the cover is opening or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_OPENING
async def async_close_cover(self, **kwargs: Any) -> None:
"""Issue close command to cover."""
if self.is_closing or self.is_closed:
return
try:
wait_task = await self._device.close(wait_for_state=False)
except MyQError as err:
raise HomeAssistantError(
f"Closing of cover {self._device.name} failed with error: {err}"
) from err
# Write closing state to HASS
self.async_write_ha_state()
result = wait_task if isinstance(wait_task, bool) else await wait_task
# Write final state to HASS
self.async_write_ha_state()
if not result:
raise HomeAssistantError(f"Closing of cover {self._device.name} failed")
async def async_open_cover(self, **kwargs: Any) -> None:
"""Issue open command to cover."""
if self.is_opening or self.is_open:
return
try:
wait_task = await self._device.open(wait_for_state=False)
except MyQError as err:
raise HomeAssistantError(
f"Opening of cover {self._device.name} failed with error: {err}"
) from err
# Write opening state to HASS
self.async_write_ha_state()
result = wait_task if isinstance(wait_task, bool) else await wait_task
# Write final state to HASS
self.async_write_ha_state()
if not result:
raise HomeAssistantError(f"Opening of cover {self._device.name} failed")
|
{
"content_hash": "3dd185eed65b7e42d75da0fdf27cf060",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 86,
"avg_line_length": 33.35344827586207,
"alnum_prop": 0.6484879813905402,
"repo_name": "nkgilley/home-assistant",
"id": "51d0b3290a6acc029464f9437d196aef3614e233",
"size": "3869",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/myq/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Const for Plaato."""
from datetime import timedelta
DOMAIN = "plaato"
PLAATO_DEVICE_SENSORS = "sensors"
PLAATO_DEVICE_ATTRS = "attrs"
SENSOR_SIGNAL = f"{DOMAIN}_%s_%s"
CONF_USE_WEBHOOK = "use_webhook"
CONF_DEVICE_TYPE = "device_type"
CONF_DEVICE_NAME = "device_name"
CONF_CLOUDHOOK = "cloudhook"
PLACEHOLDER_WEBHOOK_URL = "webhook_url"
PLACEHOLDER_DOCS_URL = "docs_url"
PLACEHOLDER_DEVICE_TYPE = "device_type"
PLACEHOLDER_DEVICE_NAME = "device_name"
DOCS_URL = "https://www.home-assistant.io/integrations/plaato/"
PLATFORMS = ["sensor", "binary_sensor"]
SENSOR_DATA = "sensor_data"
COORDINATOR = "coordinator"
DEVICE = "device"
DEVICE_NAME = "device_name"
DEVICE_TYPE = "device_type"
DEVICE_ID = "device_id"
UNDO_UPDATE_LISTENER = "undo_update_listener"
DEFAULT_SCAN_INTERVAL = 5
MIN_UPDATE_INTERVAL = timedelta(minutes=1)
EXTRA_STATE_ATTRIBUTES = {
"beer_name": "beer_name",
"keg_date": "keg_date",
"mode": "mode",
"original_gravity": "original_gravity",
"final_gravity": "final_gravity",
"alcohol_by_volume": "alcohol_by_volume",
}
|
{
"content_hash": "55d19d58be074fa7e7111abee2035fc4",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.7058270676691729,
"repo_name": "Danielhiversen/home-assistant",
"id": "2d8cf40c91e1e6127eb63c4066d6b55d55f9f8b8",
"size": "1064",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/plaato/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
from django.template import RequestContext, loader
from models import Artist, Song
from django.shortcuts import render, redirect
from forms import SongForm
from django.http import HttpResponse
from justAnalyze import analyze
def index(request):
song_list = Song.objects.all()
context = {
'song_list': song_list,
}
return render(request, 'lyrics/index.html', context=context)
def new_song(request):
if request.method == "POST":
form = SongForm(request.POST)
if form.is_valid():
data = analyze(form.cleaned_data['lyrics'])
try :
artist = Artist.objects.all().filter(name=form.cleaned_data['artist-name'])
except:
artist = Artist(name=form.cleaned_data['artist_name'])
artist.save()
song = Song(name=form.cleaned_data['song_name'], lyrics = form.cleaned_data['lyrics'], artist=artist,
number_of_words= data['total_words'], number_unique_words=data['unique_words'],
unique_word_percent=data['percentage'], repeated_rhymes=data['repeated_rhymes'],
bad_words=data['bad_words'], thug_rating=data['thug_rating'], avg_syllables=data['thug_rating'])
song.save()
return redirect("/")
form = SongForm()
return render(request, 'lyrics/new_song.html', { 'form': form })
|
{
"content_hash": "b9cd08f742e750903a501f22e30c6799",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 120,
"avg_line_length": 43.78125,
"alnum_prop": 0.6174161313347609,
"repo_name": "stuntman723/rap-analyzer",
"id": "4b2eb3f08275f1e793908918bea658662a303bff",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lyrics/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "HTML",
"bytes": "73763"
},
{
"name": "JavaScript",
"bytes": "104857"
},
{
"name": "Python",
"bytes": "6204255"
},
{
"name": "Shell",
"bytes": "3807"
}
],
"symlink_target": ""
}
|
"""
Manage manipulations such as swapping reaction cofactors, over-express or down-regulate genes and reactions.
"""
from cobra import Reaction
def increase_flux(reaction, ref_value, value):
"""
lb 0 ub
|--------------------------- ' ---------------------------|
<- - -|----------'
'----------|- - - ->
Parameters
----------
reaction: cobra.Reaction
The reaction to over-express.
ref_value: float
The flux value to come from.
value: float
The flux value to achieve.
"""
if abs(value) < abs(ref_value):
raise ValueError("'value' is lower than 'ref_value', this is increase_flux (%f < %f)" % (value, ref_value))
if value > 0:
reaction.lower_bound = value
elif value < 0:
reaction.upper_bound = value
else:
reaction.knock_out()
def decrease_flux(reaction, ref_value, value):
"""
lb 0 ub
|--------------------------- ' ---------------------------|
|- - >----------'
'----------<- - - -|
Parameters
----------
reaction: cobra.Reaction
The reaction to down_regulate.
ref_value: float
The flux value to come from.
value: float
The flux value to achieve.
"""
if abs(value) > abs(ref_value):
raise ValueError("'value' is higher than 'ref_value', this is decrease_flux (%f < %f)" % (value, ref_value))
if value > 0:
reaction.upper_bound = value
elif value < 0:
reaction.lower_bound = value
else:
reaction.knock_out()
def reverse_flux(reaction, ref_value, value):
"""
Forces a reaction to have a minimum flux level in the opposite direction of a reference state.
lb 0 ub
|--------------------------- ' ---------------------------|
<----------'- - - - - - - ->
Parameters
----------
reaction: cobra.Reaction
The reaction that will be inverted.
ref_value: float
The flux value to come from.
value: float
The flux value to achieve.
"""
if (value >= 0) == (ref_value >= 0):
raise ValueError("'value' and 'ref_value' cannot have the same sign (%.5f, %.5f)" % (value, ref_value))
if value > 0:
reaction.upper_bound = value
elif value < 0:
reaction.lower_bound = value
else:
reaction.knock_out()
def swap_cofactors(reaction, model, swap_pairs, inplace=True):
"""
Swaps the cofactors of a reaction. For speed, it can be done inplace which just changes the coefficients.
If not done inplace, it will create a new Reaction, add it to the model, and knockout the original reaction.
Parameters
----------
reaction: cobra.Reaction
The reaction to swap.
model: cameo.cobra.Model
A constraint-based model.
swap_pairs: tuple
A tuple of (cofactors, equivalent_cofactors)
inplace: bool
If replace is done inplace, it changes the coefficients in the matrix. Otherwise, it creates a new reaction
with the other cofactors and adds it to the model.
Returns
-------
Reaction
A reaction with swapped cofactors (the same if inplace).
"""
if all(reaction.metabolites.get(met, False) for met in swap_pairs[0]):
new_coefficients = {met: -reaction.metabolites[met] for met in swap_pairs[0]}
new_coefficients.update({new_met: reaction.metabolites[met] for met, new_met in zip(*swap_pairs)})
elif all(reaction.metabolites.get(met, False) for met in swap_pairs[1]):
new_coefficients = {met: -reaction.metabolites[met] for met in swap_pairs[1]}
new_coefficients.update({new_met: reaction.metabolites[met] for new_met, met in zip(*swap_pairs)})
else:
raise ValueError("%s: Invalid swap pairs %s (%s)" % (reaction.id, str(swap_pairs), reaction.reaction))
def _inplace(rxn, stoichiometry):
rxn.add_metabolites(stoichiometry, combine=True)
def _replace(rxn, stoichiometry):
new_reaction = Reaction(id="%s_swap" % rxn.id, name=rxn.name,
lower_bound=rxn.lower_bound, upper_bound=rxn.upper_bound)
new_reaction.stoichiometry = rxn
new_reaction.add_metabolites(stoichiometry)
return new_reaction
if inplace:
_inplace(reaction, new_coefficients)
return reaction
else:
new_reaction = _replace(reaction, new_coefficients)
model.add_reactions([new_reaction])
reaction.knock_out()
return new_reaction
|
{
"content_hash": "3d805e1c14c05027da6517fa2dc6e4fc",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 116,
"avg_line_length": 33.35664335664335,
"alnum_prop": 0.5526205450733752,
"repo_name": "biosustain/cameo",
"id": "b860dc63e4b4ffa5115caf42b0b69aa35a0f9f62",
"size": "5389",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "cameo/core/manipulation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "847489"
},
{
"name": "Roff",
"bytes": "259"
},
{
"name": "Shell",
"bytes": "690"
}
],
"symlink_target": ""
}
|
from unittest import TestCase, main
from eventsource_parser import Event, parse
class EventSourceTests(TestCase):
def testParseData(self):
event, extra = parse("data:somedata\n\n")
self.assertEqual(event, Event(None, None, 'somedata'))
self.assertEqual(extra, '')
def testParseMultiLinedata(self):
event, extra = parse("data:some\ndata:data\n\n")
self.assertEqual(event, Event(None, None, 'some\ndata'))
self.assertEqual(extra, '')
def testParseType(self):
event, extra = parse("event: event-type\ndata: somedata\n\n")
self.assertEqual(event, Event(None, "event-type", "somedata"))
self.assertEqual(extra, '');
def testParseId(self):
event, extra = parse("id: 1234\ndata: somedata\n\n")
self.assertEqual(event, Event("1234", None, "somedata"))
self.assertEqual(extra, '');
def testParseMultipleEvents(self):
event, extra = parse("data: an event\n\ndata: another event\n\n")
self.assertEqual(event, Event(None, None, "an event"))
self.assertEqual(extra, 'data: another event\n\n');
event, extra = parse(extra)
self.assertEqual(event, Event(None, None, "another event"))
self.assertEqual(extra, '');
def testRetryParsing(self):
event, extra = parse("retry: 10\ndata: an event\n\ndata: last event\n\n")
self.assertEqual(event, Event(None, None, "an event"))
self.assertEqual(extra, 'retry: 10\n\ndata: last event\n\n');
event, extra = parse(extra)
self.assertEqual(event, Event(None, 'retry', 10))
self.assertEqual(extra, 'data: last event\n\n');
def testIncompleteEvent(self):
event, extra = parse("data: miss an empty line to dispatch\n")
self.assertEqual(event, None)
self.assertEqual(extra, "data: miss an empty line to dispatch\n")
def testEmptyFields(self):
event, extra = parse("nothing")
self.assertEqual(event, None)
self.assertEqual(extra, "nothing")
def testUnicode(self):
event, extra = parse(u"data: cet été il a fait beau à Lyon\n\n")
self.assertEqual(event, Event(None, None, u"cet été il a fait beau à Lyon"))
self.assertEqual(extra, u"")
def testEmptyEvent(self):
event, extra = parse(":comment\n\ndata: an event\n\n")
self.assertEqual(event, None)
self.assertEqual(extra, "data: an event\n\n")
main()
|
{
"content_hash": "94b808319dbfefed8dddcda22a18d110",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 84,
"avg_line_length": 39,
"alnum_prop": 0.6357346357346357,
"repo_name": "tOkeshu/eventsource-parser",
"id": "eee84d9dadd60d76c2e45df3fe5d10ce1ca70969",
"size": "2481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4585"
}
],
"symlink_target": ""
}
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from django_powerbank.core.validators import MsisdnValidator
class MsisdnValidatorTests(TestCase):
def test_no_plus(self):
with self.assertRaises(ValidationError):
MsisdnValidator()("48 601 123 123")
with self.assertRaises(ValidationError):
MsisdnValidator()("48601123123")
def test_pl(self):
MsisdnValidator()("+48 601 123 123")
MsisdnValidator()("+48601123123")
MsisdnValidator()("+48 22 751 12 12")
MsisdnValidator()("+48 801 123 123")
MsisdnValidator()("+48 22 234 7497")
def test_usa(self):
MsisdnValidator()("+1 310 997 62 24")
|
{
"content_hash": "a7b2132afda570e82886d0c090c50c7f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 32.86363636363637,
"alnum_prop": 0.6666666666666666,
"repo_name": "wooyek/django-powerbank",
"id": "d833b46e70451b48beec6f51a2ee5aee1c0d8fb6",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/forms_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5257"
},
{
"name": "Makefile",
"bytes": "3746"
},
{
"name": "Python",
"bytes": "65553"
},
{
"name": "Shell",
"bytes": "1936"
}
],
"symlink_target": ""
}
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
from __future__ import unicode_literals
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import iri_to_uri, force_bytes, force_text
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return '%s=%s' % (t[0], t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict(dictitem(el) for el in cc)
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join(dictvalue(el) for el in cc.items())
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict(_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control']))
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
if not response.streaming:
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set(header.lower() for header in vary_headers)
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set(header.lower() for header in vary_headers)
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependant names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(force_bytes(value))
path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
# If i18n or l10n are used, the generated cache key will be suffixed
# with the current locale. Adding the raw value of Accept-Language is
# redundant in that case and would result in storing the same content
# under multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response['Vary']):
header = header.upper().replace('-', '_')
if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
continue
headerlist.append('HTTP_' + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
{
"content_hash": "02530c30b899a49cf97b967c9423349c",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 96,
"avg_line_length": 41.996212121212125,
"alnum_prop": 0.6702444304139984,
"repo_name": "denisenkom/django",
"id": "7a77f911cedf93e744211e1ef7bf47a83893ab22",
"size": "11087",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/utils/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50381"
},
{
"name": "JavaScript",
"bytes": "100648"
},
{
"name": "Python",
"bytes": "8801295"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
import maya.cmds as cmds
cmds.window()
cmds.columnLayout()
cmds.rowColumnLayout( numberOfColumns=1)
cmds.intSliderGrp( field=True, label='Radius',
min=1, max=10, step=1,
columnWidth3=[50, 50, 100],
columnAlign3=['left', 'left', 'left'],
adjustableColumn3=3)
cmds.intSliderGrp( field=True, label='Height',
min=1, max=100, value=3, step=1,
columnWidth3=[50, 50, 100],
columnAlign3=['left', 'left', 'left'],
adjustableColumn3=3)
cmds.rowColumnLayout( numberOfColumns=2,
columnSpacing=[(2, 77)],
columnWidth=[(2, 100)])
cmds.text(label='Users')
cmds.intSlider()
cmds.showWindow()
|
{
"content_hash": "4e97a343bc27c9a25afb1a401b9bfe1e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 31.52,
"alnum_prop": 0.5342639593908629,
"repo_name": "satishgoda/Maya",
"id": "e9930fcb14a39e4d74c48bb53413d24c89f37d12",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "concepts/ui/intSlider/intSliderAndGrp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "399766"
},
{
"name": "Python",
"bytes": "14689"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("pos", "0016_transaction_attended")]
operations = [
migrations.AlterField(
model_name="pospayment",
name="person",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="members.Person",
),
),
migrations.AlterField(
model_name="transaction",
name="creator",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="transaction",
name="person",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="members.Person",
),
),
]
|
{
"content_hash": "ee3c8368a5d09d938416fc70e3342fcb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 61,
"avg_line_length": 28.404761904761905,
"alnum_prop": 0.5205364626990779,
"repo_name": "ianastewart/cwltc-admin",
"id": "ebaa8c434ef63963eb4218463b840c026dcdbc1d",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pos/migrations/0017_auto_20180411_1744.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "945975"
},
{
"name": "Dockerfile",
"bytes": "882"
},
{
"name": "HTML",
"bytes": "526368"
},
{
"name": "JavaScript",
"bytes": "843481"
},
{
"name": "Python",
"bytes": "8389886"
},
{
"name": "Shell",
"bytes": "1023"
}
],
"symlink_target": ""
}
|
"""
WebPortfolio View
"""
from flask import redirect, request, url_for, session, abort
from webportfolio import (WebPortfolio, route, flash_error, flash_success,
flash_info, flash_data, get_flashed_data,
ModelError, ViewError)
from webportfolio.decorators import (render_as_json, render_as_xml,
nav_menu, with_user_roles, login_required,
no_login_required, extends)
from webportfolio.ext import (mailer, cache, storage, recaptcha, csrf)
from webportfolio.packages import contact_page, user, cms
from application import model
# ------------------------------------------------------------------------------
# /
# This is the entry point of the site
# All root based (/) endpoint could be placed in here
#
# It extends the contact_page module, to be accessed at '/contact'
#
@extends(contact_page.contact_page)
class Index(WebPortfolio):
route_base = "/"
@nav_menu("Home", order=1)
def index(self):
self.meta_(title="Hello WebPortfolio!")
return {}
# ------------------------------------------------------------------------------
# /admin
# This is the admin view
# It extends the 'cms.admin' module to manage posts
# It extends the 'user.admin' module to manage users
#
@nav_menu("Admin", group="admin")
@extends(cms.admin, model=model)
@extends(user.admin, model=model)
class Admin(WebPortfolio):
LAYOUT = "admin-layout.html"
route_base = "admin"
@nav_menu("Home")
def index(self):
self.meta_(title="My Admin Home Page")
return {}
@nav_menu("Page 2")
def page_2(self):
self.meta_(title="My 2nd Page")
return {}
# ------------------------------------------------------------------------------
# /account
# This is a User Account section
# It extends the 'user.account' module, which automatically requires the
# endpoint to be authenticated
# If you don't an endpoint to be authenticated, just at the decorator:
# '@no_login_required'
#
@nav_menu("My Account", group="account", order=3, align_right=True)
@extends(user.account, model=model)
class Account(WebPortfolio):
@nav_menu("My Account", order=1)
def index(self):
self.meta_(title="My Account")
return {}
@nav_menu("Upload Image Demo", order=2)
@route("upload", methods=["GET", "POST"])
def upload(self):
self.meta_(title="Upload Demo")
if request.method == "POST":
try:
_file = request.files.get('file')
if _file:
my_object = storage.upload(_file,
prefix="demo/",
public=True,
allowed_extensions=["gif", "jpg", "jpeg", "png"])
if my_object:
return redirect(url_for("Account:upload", object_name=my_object.name))
except Exception as e:
flash_error(e.message)
return redirect(url_for("Account:upload"))
my_object = None
object_name = request.args.get("object_name")
if object_name:
my_object = storage.get(object_name=object_name)
return dict(my_object=my_object)
@nav_menu("No Login", order=3)
@no_login_required
def no_login(self):
self.meta_(title="No Login")
return {}
# ------------------------------------------------------------------------------
# /blog
# This a blog endpoint
# It extends the 'cms.post' module to fetch posts with 'blog' category
#
@extends(cms.post,
model=model,
query={
"types": ["blog"],
"order_by": "published_at desc",
},
endpoints={
"index": {"menu": "Blog", "endpoint": "/", "per_page": 10,
"post_title": "My Blog",
"post_header": "My Blog",
"post_subheader": ""},
"single": {"menu": "Read", "endpoint": "/:slug"},
"authors": {"menu": "Authors", "endpoint": "authors", "show_menu": False},
"archive": {"menu": "Archive", "endpoint": "archive", "show_menu": False},
})
class Blog(WebPortfolio):
pass
# ------------------------------------------------------------------------------
# /page
# This a blog endpoint
# It extends the 'cms.post' module to fetch posts with 'page' category
#
@extends(cms.post,
model=model,
query={
"types": ["page"],
"order_by": "title asc"
},
endpoints={
"index": {"menu": "Pages",
"endpoint": "/",
"post_title": "Pages",
"post_show_byline": False},
"single": {"menu": "Documents", "endpoint": "/:slug", "post_show_byline": False},
"archive": {"menu": "Archive", "endpoint": "archive", "show_menu": False},
"authors": {"menu": "Authors", "endpoint": "authors", "show_menu": False}
})
class Page(WebPortfolio):
pass
# ------------------------------------------------------------------------------
# /docs
# This a blog endpoint
# It extends the 'cms.post' module to fetch posts with 'document' category
#
@extends(cms.post,
model=model,
query={
"types": ["document"],
"order_by": "title asc"
},
endpoints={
"index": {"menu": "Documents",
"endpoint": "/",
"post_title": "Documents",
"post_show_byline": False},
"single": {"menu": "Documents", "endpoint": "/:slug", "post_show_byline": False},
"archive": {"menu": "Archive", "endpoint": "archive", "show_menu": False},
"authors": {"menu": "Authors", "endpoint": "authors", "show_menu": False}
})
class Document(WebPortfolio):
route_base = "docs"
|
{
"content_hash": "fdcf47844eed927690f01cf454b96fe6",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 96,
"avg_line_length": 34.502857142857145,
"alnum_prop": 0.49619079165286517,
"repo_name": "mardix/webportfolio",
"id": "a08b6649cde9df93aa2c7c67b41caf79c23be672",
"size": "6038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webportfolio/app-skel/app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "232980"
},
{
"name": "HTML",
"bytes": "156586"
},
{
"name": "JavaScript",
"bytes": "221737"
},
{
"name": "Python",
"bytes": "224634"
},
{
"name": "Shell",
"bytes": "88"
}
],
"symlink_target": ""
}
|
from collections import deque
from .skip import skip
from .window import window
from .chain import chain
def split(pipe, splitter, skip_empty=False):
''' this function works a lot like groupby but splits on given patterns,
the same behavior as str.split provides. if skip_empty is True,
split only yields pieces that have contents
Example:
splitting 1011101010101
by 10
returns ,11,,,,1
Or if skip_empty is True
splitting 1011101010101
by 10
returns 11,1
'''
splitter = tuple(splitter)
len_splitter = len(splitter)
pipe=iter(pipe)
current = deque()
tmp = []
windowed = window(pipe, len(splitter))
for i in windowed:
if i == splitter:
skip(windowed, len(splitter)-1)
yield list(current)
current.clear()
tmp = []
else:
current.append(i[0])
tmp = i
if len(current) or len(tmp):
yield list(chain(current,tmp))
if __name__ == '__main__':
print(list(split(''.join(map(str,range(10))),'345')))
|
{
"content_hash": "758228e402ebe00ad8a5da883e5c02ae",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 26.88372093023256,
"alnum_prop": 0.5631487889273357,
"repo_name": "CodyKochmann/generators",
"id": "d6add0ad2ceb7473d2925153c5606290018da7d8",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generators/split.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85464"
}
],
"symlink_target": ""
}
|
import datetime
import decimal
import random
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.pool import NullPool
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing import provision
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
def test_basic_reflection(self, metadata, connection):
meta = metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all(connection)
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload_with=connection
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload_with=connection,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.combinations(
(mssql.XML, "XML"),
(mssql.IMAGE, "IMAGE"),
(mssql.MONEY, "MONEY"),
(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)"),
(mssql.FLOAT, "FLOAT(53)"),
(mssql.REAL, "REAL"),
# FLOAT(5) comes back as REAL
(mssql.FLOAT(5), "REAL"),
argnames="type_obj,ddl",
)
def test_assorted_types(self, metadata, connection, type_obj, ddl):
table = Table("type_test", metadata, Column("col1", type_obj))
table.create(connection)
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=connection)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_identity(self, metadata, connection):
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
with testing.expect_deprecated(
"The dialect options 'mssql_identity_start' and"
):
table.create(connection)
meta2 = MetaData()
table2 = Table("identity_test", meta2, autoload_with=connection)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], None)
eq_(
table2.c["col1"].dialect_options["mssql"]["identity_increment"],
None,
)
eq_(table2.c["col1"].identity.start, 2)
eq_(table2.c["col1"].identity.increment, 3)
def test_skip_types(self, connection):
connection.exec_driver_sql(
"create table foo (id integer primary key, data xml)"
)
with mock.patch.object(
connection.dialect, "ischema_names", {"int": mssql.INTEGER}
):
with testing.expect_warnings(
"Did not recognize type 'xml' of column 'data'"
):
eq_(
inspect(connection).get_columns("foo"),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
},
{
"name": "data",
"type": testing.eq_type_affinity(
sqltypes.NullType
),
"nullable": True,
"default": None,
"autoincrement": False,
},
],
)
def test_cross_schema_fk_pk_name_overlaps(self, metadata, connection):
# test for issue #4228
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
"options": {},
}
],
)
def test_table_name_that_is_greater_than_16_chars(
self, metadata, connection
):
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all(connection)
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=connection
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_temporary_table(self, metadata, connection, table_name, exists):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("txt", mssql.NVARCHAR(50)),
Column("dt2", mssql.DATETIME2),
)
tt.create(connection)
connection.execute(
tt.insert(),
[
{
"id": 1,
"txt": u"foo",
"dt2": datetime.datetime(2020, 1, 1, 1, 1, 1),
},
{
"id": 2,
"txt": u"bar",
"dt2": datetime.datetime(2020, 2, 2, 2, 2, 2),
},
],
)
if not exists:
with expect_raises(exc.NoSuchTableError):
Table(
table_name,
metadata,
autoload_with=connection,
)
else:
tmp_t = Table(table_name, metadata, autoload_with=connection)
result = connection.execute(
tmp_t.select().where(tmp_t.c.id == 2)
).fetchall()
eq_(
result,
[(2, "bar", datetime.datetime(2020, 2, 2, 2, 2, 2))],
)
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_has_table_temporary(
self, metadata, connection, table_name, exists
):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer),
)
tt.create(connection)
found_it = testing.db.dialect.has_table(connection, table_name)
eq_(found_it, exists)
def test_has_table_temp_not_present_but_another_session(self):
"""test #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert not c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
def test_has_table_temp_temp_present_both_sessions(self):
"""test #7168, continues from #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
with c2.begin():
c2.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
with c2.begin():
c2.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
@testing.fixture
def temp_db_alt_collation_fixture(
self, connection_no_trans, testing_engine
):
temp_db_name = "%s_different_collation" % (
provision.FOLLOWER_IDENT or "default"
)
cnxn = connection_no_trans.execution_options(
isolation_level="AUTOCOMMIT"
)
cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name)
cnxn.exec_driver_sql(
"CREATE DATABASE %s COLLATE Danish_Norwegian_CI_AS" % temp_db_name
)
eng = testing_engine(
url=testing.db.url.set(database=temp_db_name),
options=dict(poolclass=NullPool, future=True),
)
yield eng
cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name)
def test_global_temp_different_collation(
self, temp_db_alt_collation_fixture
):
"""test #8035"""
tname = "##foo%s" % (random.randint(1, 1000000),)
with temp_db_alt_collation_fixture.connect() as conn:
conn.exec_driver_sql(
"CREATE TABLE %s (id int primary key)" % (tname,)
)
conn.commit()
eq_(
inspect(conn).get_columns(tname),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
}
],
)
Table(tname, MetaData(), autoload_with=conn)
def test_db_qualified_items(self, metadata, connection):
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all(connection)
dbname = connection.exec_driver_sql("select db_name()").scalar()
owner = connection.exec_driver_sql("SELECT user_name()").scalar()
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(connection)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
"options": {},
}
],
)
assert inspect(connection).has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload_with=connection,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
def test_fk_on_unique_index(self, metadata, connection):
# test for issue #7160
Table(
"uidx_parent",
metadata,
Column("id", Integer, primary_key=True),
Column("uidx_col1", Integer, nullable=False),
Column("uidx_col2", Integer, nullable=False),
Index(
"UIDX_composite",
"uidx_col1",
"uidx_col2",
unique=True,
),
)
Table(
"uidx_child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_uidx_col1", Integer, nullable=False),
Column("parent_uidx_col2", Integer, nullable=False),
ForeignKeyConstraint(
["parent_uidx_col1", "parent_uidx_col2"],
["uidx_parent.uidx_col1", "uidx_parent.uidx_col2"],
name="FK_uidx_parent",
),
)
metadata.create_all(connection)
inspector = inspect(connection)
fk_info = inspector.get_foreign_keys("uidx_child")
eq_(
fk_info,
[
{
"referred_table": "uidx_parent",
"referred_columns": ["uidx_col1", "uidx_col2"],
"referred_schema": None,
"name": "FK_uidx_parent",
"constrained_columns": [
"parent_uidx_col1",
"parent_uidx_col2",
],
"options": {},
}
],
)
def test_indexes_cols(self, metadata, connection):
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
def test_indexes_cols_with_commas(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
def test_indexes_cols_with_spaces(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
def test_indexes_with_filtered(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_where=t1.c.x == "test")
Index("idx_y", t1.c.y, mssql_where=t1.c.y >= 5)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
filtered_indexes = []
for ix in ind:
if "dialect_options" in ix:
filtered_indexes.append(ix["dialect_options"]["mssql_where"])
eq_(sorted(filtered_indexes), ["([x]='test')", "([y]>=(5))"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx), "CREATE INDEX idx_x ON t (x) WHERE ([x]='test')"
)
def test_max_ident_in_varchar_not_present(self, metadata, connection):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all(connection)
for col in inspect(connection).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=connection.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
@classmethod
def define_tables(cls, metadata):
col_num = 150
t = Table(
"base_table",
metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(col_num)
]
)
cls.view_str = (
view_str
) = "CREATE VIEW huge_named_view AS SELECT %s FROM base_table" % (
",".join("long_named_column_number_%d" % i for i in range(col_num))
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
def test_inspect_view_definition(self):
inspector = inspect(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
class OwnerPlusDBTest(fixtures.TestBase):
def test_default_schema_name_not_interpreted_as_tokenized(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2014_VERSION
mock_connection = mock.Mock(scalar=lambda sql: "Jonah.The.Whale")
schema_name = dialect._get_default_schema_name(mock_connection)
eq_(schema_name, "Jonah.The.Whale")
eq_(
base._owner_plus_db(dialect, schema_name),
(None, "Jonah.The.Whale"),
)
def test_owner_database_pairs_dont_use_for_same_db(self):
dialect = mssql.dialect()
identifier = "my_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[mock.call.exec_driver_sql("select db_name()")],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs_switch_for_different_db(self):
dialect = mssql.dialect()
identifier = "my_other_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql("use my_other_db"),
mock.call.exec_driver_sql("use my_db"),
],
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
@testing.combinations(
("foo", None, "foo", "use foo"),
("foo.bar", "foo", "bar", "use foo"),
("Foo.Bar", "Foo", "Bar", "use [Foo]"),
("[Foo.Bar]", None, "Foo.Bar", "use [Foo.Bar]"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat", "use [Foo.Bar]"),
(
"[foo].]do something; select [foo",
"foo",
"do something; select foo",
"use foo",
),
(
"something; select [foo].bar",
"something; select foo",
"bar",
"use [something; select foo]",
),
(
"[abc].[def].[efg].[hij]",
"[abc].[def].[efg]",
"hij",
"use [abc].[def].[efg]",
),
("abc.def.efg.hij", "abc.def.efg", "hij", "use [abc.def.efg]"),
)
def test_owner_database_pairs(
self, identifier, expected_schema, expected_owner, use_stmt
):
dialect = mssql.dialect()
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(
scalar=mock.Mock(return_value="Some Database")
)
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
if schema is None:
eq_(mock_connection.mock_calls, [])
else:
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql(use_stmt),
mock.call.exec_driver_sql("use [Some Database]"),
],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
class IdentityReflectionTest(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
__requires__ = ("identity_columns",)
@classmethod
def define_tables(cls, metadata):
for i, col in enumerate(
[
Column(
"id1",
Integer,
Identity(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
),
Column("id2", Integer, Identity()),
Column("id3", sqltypes.BigInteger, Identity()),
Column("id4", sqltypes.SmallInteger, Identity()),
Column("id5", sqltypes.Numeric, Identity()),
]
):
Table("t%s" % i, metadata, col)
def test_reflect_identity(self, connection):
insp = inspect(connection)
cols = []
for t in self.tables_test_metadata.tables.keys():
cols.extend(insp.get_columns(t))
for col in cols:
is_true("dialect_options" not in col)
is_true("identity" in col)
if col["name"] == "id1":
eq_(col["identity"], {"start": 2, "increment": 3})
elif col["name"] == "id2":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), int)
eq_(type(col["identity"]["increment"]), int)
elif col["name"] == "id3":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), util.compat.long_type)
eq_(type(col["identity"]["increment"]), util.compat.long_type)
elif col["name"] == "id4":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), int)
eq_(type(col["identity"]["increment"]), int)
elif col["name"] == "id5":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), decimal.Decimal)
eq_(type(col["identity"]["increment"]), decimal.Decimal)
@testing.requires.views
def test_reflect_views(self, connection):
connection.exec_driver_sql("CREATE VIEW view1 AS SELECT * FROM t1")
insp = inspect(connection)
for col in insp.get_columns("view1"):
is_true("dialect_options" not in col)
is_true("identity" in col)
eq_(col["identity"], {})
|
{
"content_hash": "6eab1f16b1dbe8dafe6d65da40c1d91d",
"timestamp": "",
"source": "github",
"line_count": 861,
"max_line_length": 79,
"avg_line_length": 33.88501742160279,
"alnum_prop": 0.5031362467866324,
"repo_name": "j5int/sqlalchemy",
"id": "4c5a5398164ae46357ff104499a4d0c2a53be437",
"size": "29197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/dialect/mssql/test_reflection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "63151"
},
{
"name": "Python",
"bytes": "15339979"
}
],
"symlink_target": ""
}
|
__author__ = "Tom Goldenberg <thomasagoldenberg@gmail.com>"
__date__ = "$February 3, 2016"
import sys
from collections import defaultdict
# iterate over file lines
def corpus_iterator(corpus_file):
l = corpus_file.readline() # pop off a line of the file
while l: # while there are lines left
line = l.strip() # strip the whitespace from the line
if line: # if line is not null
fields = line.split(" ") #split string into an array
ne_tag = fields[-1] # set end word as GENE tag
word = " ".join(fields[:-1]) # actual word is preceding part of string
yield(word, ne_tag) #yield the string and its tag
else:
yield (None, None) # yield null if null
l = corpus_file.readline() # pop off another line from the file
class Replacer(object):
def __init__(self): # initialize class
self.counts = defaultdict(int) # counts = {} with default value set to zero
def word_count(self, corpus_file, output): # assign each word to a count in count object
iterator = corpus_iterator(corpus_file) # iterate through file lines
for word, ne_tag in iterator: # refer to word and tag
if word: # if word is not null
self.counts[word] += 1 # word count adds 1
for word in self.counts.keys(): # iterate through
if self.counts[word] > 4: # if word counts is greater than 4
del self.counts[word] # delete the word field from the object
print self.counts
def replace_rare(self, corpus_file, output):
iterator = corpus_iterator(corpus_file) # iterate through file lines
for word, ne_tag in iterator: # access word and tag
if word is None: # if word is null
output.write('\n'); # write empty line
else:
if self.counts[word] > 0: # if count is greater than zero
output.write('%s %s\n' %('_RARE_', ne_tag)) # replace the word with _RARE_
else: # if count is zero
output.write('%s %s\n' %(word, ne_tag)) # write the word and tag
def usage():
print """
python replace_rare.py [input_file] > [output_file]
Read in a gene tagged training input file and
output its content with rare words replaced with _RARE_.
"""
if __name__ == "__main__":
if len(sys.argv) != 2: # command should have 2 arguments - input and output files
usage() # exit the program if no args
sys.exit(2)
try:
input1 = file(sys.argv[1], "r") # assign 1st arg and 2nd arg to files
input2 = file(sys.argv[1], "r")
except IOError: # write error if cannot process files
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % arg)
sys.exit(1)
replacer = Replacer() # initialize class Replacer with defaultdict self.counts
replacer.word_count(input1, sys.stdout) # create counter of word frequency
replacer.replace_rare(input2, sys.stdout) # replace rare words with _RARE_
|
{
"content_hash": "6e54cf090b5a608b86564b0aa59351cc",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 94,
"avg_line_length": 43.5,
"alnum_prop": 0.6137931034482759,
"repo_name": "tgoldenberg/nlp",
"id": "a48a207d0b2f5aa0da65154e6f6a5db0c42d3055",
"size": "3045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nlp_projects/h1-p/simple_tagger/replace_rare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "34049"
},
{
"name": "Python",
"bytes": "114857"
}
],
"symlink_target": ""
}
|
import os
import re
import cx_Oracle
import collections
import datetime
kneeDiagnosisCode = [5164,5165,5163,5162,5161,5256,5258,5257,5313,5314,5315,5055,5261,5260,5259,5262,5263,5264]
#Primary query, Look for all decisions where a claim has been processed already. Much of the filtering is based on the engineering notebook,
# the only add on is the prev_evaltn_ind which is a poor flag mechanism in Ratings.
#Because of historical copies being represented across all rating profiles grouping is required to clean up the data.
#Organize them based first by participant id, then profile date, disability id, begin date, then code and percent.
SQL="select rd.ptcpnt_vet_id, rd.prfil_dt, rd.begin_dt, rd.end_dt, rd.prmlgn_dt, rd.dgnstc_txt, rd.dsblty_id, rd.diagnosis_code, rd.hypntd_dgnstc_type_cd, rd.prcnt_nbr \
from AH4929_RATING_DECISION rd \
inner join KNEE_AGGREGATE_CONTENTION ac on ac.vet_id = rd.ptcpnt_vet_id \
where rd.begin_dt IS NOT NULL and rd.begin_dt < rd.prmlgn_dt and (rd.end_dt is NULL or rd.end_dt >= rd.prmlgn_dt) and rd.system_type_cd = 'C' \
and rd.dsblty_decn_type_cd = 'SVCCONNCTED' and (rd.prev_evaltn_ind IS NULL OR rd.prev_evaltn_ind = 'N') \
group by rd.begin_dt, rd.end_dt, rd.prmlgn_dt, rd.dgnstc_txt, rd.dsblty_id, rd.diagnosis_code, rd.hypntd_dgnstc_type_cd, rd.prcnt_nbr, rd.ptcpnt_vet_id, rd.prfil_dt \
order by ptcpnt_vet_id,prfil_dt,dsblty_id,begin_dt,diagnosis_code,prcnt_nbr"
class DecisionPercentage:
def __init__(self,code,percentage):
self.code = code
self.percentage = percentage
class AggregateDecision:
def __init__(self):
self.VET_ID = None
self.PROFILE_DATE = None
self.PROMULGATION_DATE = None
self.RECENT_KNEE_DATE = None
self.CDD = 0
self.KNEE_CDD = None
self.A5164 = 0
self.A5165 = 0
self.A5163 = 0
self.A5162 = 0
self.A5161 = 0
self.A5256 = 0
self.A5258 = 0
self.A5257 = 0
self.A5313 = 0
self.A5314 = 0
self.A5315 = 0
self.A5055 = 0
self.A5261 = 0
self.A5260 = 0
self.A5259 = 0
self.A5262 = 0
self.A5263 = 0
self.A5264 = 0
self.TXT_BILATERAL = 0
self.TXT_LEFT = 0
self.TXT_RIGHT = 0
self.TXT_KNEE = 0
self.TXT_IMPAIRMENT = 0
self.TXT_LIMITATION = 0
self.TXT_AMPUTATION = 0
self.TXT_ANKYLOSES = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class Decision:
def __init__(self, ptcpnt_vet_id, prfil_dt, begin_dt, end_dt, prmlgn_dt, dgnstc_txt, dsblty_id, diagnosis_code, hypntd_dgnstc_type_cd, prcnt_nbr):
self.ptcpnt_vet_id = ptcpnt_vet_id
self.prfil_dt = prfil_dt
self.begin_dt = begin_dt
self.end_dt = end_dt
self.prmlgn_dt = prmlgn_dt
if dgnstc_txt is None:
self.dgnstc_txt = ''
else:
self.dgnstc_txt = dgnstc_txt
self.dsblty_id = dsblty_id
if diagnosis_code == 'Unknown':
self.diagnosis_code = -99
else:
self.diagnosis_code = int(diagnosis_code)
self.hypntd_dgnstc_type_cd = hypntd_dgnstc_type_cd
if prcnt_nbr is None:
print(ptcpnt_vet_id)
self.prcnt_nbr = 0
else:
self.prcnt_nbr = int(prcnt_nbr)
def __str__(self):
from pprint import pprint
return str(vars(self))
print(str(datetime.datetime.now()))
connection = cx_Oracle.connect('developer/D3vVV0Rd@127.0.0.1:1521/DEV.BCDSS')
cursor = connection.cursor()
cursor.execute(SQL)
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.KNEE_AGGREGATE_DECISION (VET_ID, PROFILE_DATE, PROMULGATION_DATE, RECENT_KNEE_DATE, CDD, KNEE_CDD, A5164, A5165, A5163, A5162, A5161, A5256, A5258, A5257, A5313, A5314, A5315, A5055, A5261, A5260, A5259, A5262, A5263, A5264, TXT_BILATERAL,TXT_LEFT,TXT_RIGHT,TXT_KNEE,TXT_IMPAIRMENT,TXT_LIMITATION,TXT_AMPUTATION,TXT_ANKYLOSES) \
VALUES (:VET_ID, :PROFILE_DATE, :PROMULGATION_DATE, :RECENT_KNEE_DATE, :CDD, :KNEE_CDD, \
:A5164, :A5165, :A5163, :A5162, :A5161, :A5256, :A5258, :A5257, :A5313, :A5314, :A5315, :A5055, :A5261, :A5260, :A5259, :A5262, :A5263, :A5264, \
:TXT_BILATERAL,:TXT_LEFT,:TXT_RIGHT,:TXT_KNEE,:TXT_IMPAIRMENT,:TXT_LIMITATION,:TXT_AMPUTATION,:TXT_ANKYLOSES)')
aggregateDecision = None
currRatingProfile = -1
currParticipant = -1
counter = 0
hasMultipleDisabilityCodes = collections.Counter()
recentKneeBeginDate = collections.Counter()
totalCDD = 1
totalKneeCDD = 1
hasKneeCDD = False
for row in cursor:
if counter == 1000: #Commit every 1000 records. Improvement would be to look into aggregate inserts
connection.commit()
counter=0
decision = Decision(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]) #Map loose fields into a Contention object. (Contention is a convenience object)
if currParticipant != decision.ptcpnt_vet_id or currRatingProfile != decision.prfil_dt: #Process insert statement and reset aggregation variables when profile date changes
if currRatingProfile != -1: #Skip if first time through
#Calculate the CDD values
for disabilityPercentage in multipleDisabilityCodes.values():
totalCDD *= (1 - (disabilityPercentage.percentage / 100))
if disabilityPercentage.code in kneeDiagnosisCode:
hasKneeCDD = True
totalKneeCDD *= (1 - (disabilityPercentage.percentage / 100))
aggregateDecision.CDD = 100 * (1 - totalCDD)
if hasKneeCDD:
aggregateDecision.KNEE_CDD = 100 * (1 - totalKneeCDD)
if aggregateDecision.KNEE_CDD > 60:
aggregateDecision.KNEE_CDD = 60
else:
aggregateDecision.KNEE_CDD = None
if recentKneeBeginDate[currRatingProfile] == 0: #Oracle will use the number 0 to indicate it has not been set, empty values list does not appear to work
aggregateDecision.RECENT_KNEE_DATE = None
else:
aggregateDecision.RECENT_KNEE_DATE = recentKneeBeginDate[currRatingProfile]
writeCursor.execute(None, {'VET_ID' :aggregateDecision.VET_ID, 'PROFILE_DATE' :aggregateDecision.PROFILE_DATE, 'PROMULGATION_DATE' :aggregateDecision.PROMULGATION_DATE, 'RECENT_KNEE_DATE' :aggregateDecision.RECENT_KNEE_DATE, 'CDD' :aggregateDecision.CDD, 'KNEE_CDD' :aggregateDecision.KNEE_CDD,
'A5164' :aggregateDecision.A5164, 'A5165' :aggregateDecision.A5165, 'A5163' :aggregateDecision.A5163, 'A5162' :aggregateDecision.A5162, 'A5161' :aggregateDecision.A5161, 'A5256' :aggregateDecision.A5256, 'A5258' :aggregateDecision.A5258, 'A5257' :aggregateDecision.A5257, 'A5313' :aggregateDecision.A5313, 'A5314' :aggregateDecision.A5314, 'A5315' :aggregateDecision.A5315, 'A5055' :aggregateDecision.A5055, 'A5261' :aggregateDecision.A5261, 'A5260' :aggregateDecision.A5260, 'A5259' :aggregateDecision.A5259, 'A5262' :aggregateDecision.A5262, 'A5263' :aggregateDecision.A5263, 'A5264' :aggregateDecision.A5264,
'TXT_BILATERAL' :aggregateDecision.TXT_BILATERAL, 'TXT_LEFT' :aggregateDecision.TXT_LEFT, 'TXT_RIGHT' :aggregateDecision.TXT_RIGHT, 'TXT_KNEE' :aggregateDecision.TXT_KNEE, 'TXT_IMPAIRMENT' :aggregateDecision.TXT_IMPAIRMENT, 'TXT_LIMITATION' :aggregateDecision.TXT_LIMITATION, 'TXT_AMPUTATION' :aggregateDecision.TXT_AMPUTATION, 'TXT_ANKYLOSES' :aggregateDecision.TXT_ANKYLOSES})
counter += 1
#Reset the counters
totalCDD = 1
totalKneeCDD = 1
hasKneeCDD = False
currRatingProfile = decision.prfil_dt
currParticipant = decision.ptcpnt_vet_id
multipleDisabilityCodes = collections.Counter()
recentKneeBeginDate = collections.Counter()
#Capture all rating profile level items that do not change per contention
aggregateDecision = AggregateDecision()
aggregateDecision.VET_ID = decision.ptcpnt_vet_id
aggregateDecision.PROFILE_DATE = currRatingProfile
aggregateDecision.PROMULGATION_DATE = decision.prmlgn_dt
#Since we are ordering by disability id then begin_dt we choose the most recent percent number
multipleDisabilityCodes[decision.dsblty_id] = DecisionPercentage(decision.diagnosis_code, decision.prcnt_nbr)
if decision.diagnosis_code in kneeDiagnosisCode: #Is the diagnosis an ear?
if recentKneeBeginDate[currRatingProfile] == 0 or decision.begin_dt > recentKneeBeginDate[currRatingProfile]: #Is the date container empty, or is the knee decision date more recent?
recentKneeBeginDate[currRatingProfile] = decision.begin_dt #Set it
#Use regex to look for a hit and then if it hits make it true. No need to track how many times, just true or false
try:
if re.search("BilaterAL",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_BILATERAL += 1
if re.search("Left",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_LEFT += 1
if re.search("Right",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_RIGHT += 1
if re.search("Knee",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_KNEE += 1
if re.search("Impairment",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_IMPAIRMENT += 1
if re.search("Limitation",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_LIMITATION += 1
if re.search("Amputation",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_AMPUTATION += 1
if re.search("Ankyloses",decision.dgnstc_txt,re.IGNORECASE):
aggregateDecision.TXT_ANKYLOSES += 1
except TypeError:
print(decision)
#Simply test the codes and again true or false
if decision.diagnosis_code == 5164:
aggregateDecision.A5164 += 1
if decision.diagnosis_code == 5165:
aggregateDecision.A5165 += 1
if decision.diagnosis_code == 5163:
aggregateDecision.A5163 += 1
if decision.diagnosis_code == 5162:
aggregateDecision.A5162 += 1
if decision.diagnosis_code == 5161:
aggregateDecision.A5161 += 1
if decision.diagnosis_code == 5256:
aggregateDecision.A5256 += 1
if decision.diagnosis_code == 5258:
aggregateDecision.A5258 += 1
if decision.diagnosis_code == 5257:
aggregateDecision.A5257 += 1
if decision.diagnosis_code == 5313:
aggregateDecision.A5313 += 1
if decision.diagnosis_code == 5314:
aggregateDecision.A5314 += 1
if decision.diagnosis_code == 5315:
aggregateDecision.A5315 += 1
if decision.diagnosis_code == 5055:
aggregateDecision.A5055 += 1
if decision.diagnosis_code == 5261:
aggregateDecision.A5261 += 1
if decision.diagnosis_code == 5260:
aggregateDecision.A5260 += 1
if decision.diagnosis_code == 5259:
aggregateDecision.A5259 += 1
if decision.diagnosis_code == 5262:
aggregateDecision.A5262 += 1
if decision.diagnosis_code == 5263:
aggregateDecision.A5263 += 1
if decision.diagnosis_code == 5264:
aggregateDecision.A5264 += 1
#A bit strange looking but due to Python's identation approach this occurs after the for loop in order to capture the last claim.
for disabilityPercentage in multipleDisabilityCodes.values():
#Calculate the CDD values
totalCDD *= (1 - (disabilityPercentage.percentage / 100))
if disabilityPercentage.code in kneeDiagnosisCode:
hasKneeCDD = True
totalKneeCDD *= (1 - (disabilityPercentage.percentage / 100))
aggregateDecision.CDD = 100 * (1 - totalCDD)
if hasKneeCDD:
aggregateDecision.KNEE_CDD = 100 * (1 - totalKneeCDD)
if aggregateDecision.KNEE_CDD > 60:
aggregateDecision.KNEE_CDD = 60
else:
aggregateDecision.KNEE_CDD = None
if recentKneeBeginDate[currRatingProfile] == 0:
aggregateDecision.RECENT_KNEE_DATE = None
else:
aggregateDecision.RECENT_KNEE_DATE = recentKneeBeginDate[currRatingProfile]
writeCursor.execute(None, {'VET_ID' :aggregateDecision.VET_ID, 'PROFILE_DATE' :aggregateDecision.PROFILE_DATE, 'PROMULGATION_DATE' :aggregateDecision.PROMULGATION_DATE, 'RECENT_KNEE_DATE' :aggregateDecision.RECENT_KNEE_DATE, 'CDD' :aggregateDecision.CDD, 'KNEE_CDD' :aggregateDecision.KNEE_CDD,
'A5164' :aggregateDecision.A5164, 'A5165' :aggregateDecision.A5165, 'A5163' :aggregateDecision.A5163, 'A5162' :aggregateDecision.A5162, 'A5161' :aggregateDecision.A5161, 'A5256' :aggregateDecision.A5256, 'A5258' :aggregateDecision.A5258, 'A5257' :aggregateDecision.A5257, 'A5313' :aggregateDecision.A5313, 'A5314' :aggregateDecision.A5314, 'A5315' :aggregateDecision.A5315, 'A5055' :aggregateDecision.A5055, 'A5261' :aggregateDecision.A5261, 'A5260' :aggregateDecision.A5260, 'A5259' :aggregateDecision.A5259, 'A5262' :aggregateDecision.A5262, 'A5263' :aggregateDecision.A5263, 'A5264' :aggregateDecision.A5264,
'TXT_BILATERAL' :aggregateDecision.TXT_BILATERAL, 'TXT_LEFT' :aggregateDecision.TXT_LEFT, 'TXT_RIGHT' :aggregateDecision.TXT_RIGHT, 'TXT_KNEE' :aggregateDecision.TXT_KNEE, 'TXT_IMPAIRMENT' :aggregateDecision.TXT_IMPAIRMENT, 'TXT_LIMITATION' :aggregateDecision.TXT_LIMITATION, 'TXT_AMPUTATION' :aggregateDecision.TXT_AMPUTATION, 'TXT_ANKYLOSES' :aggregateDecision.TXT_ANKYLOSES})
connection.commit()
print(str(datetime.datetime.now()))
writeCursor.close()
cursor.close()
connection.close()
|
{
"content_hash": "ebcea0259fdc01e14de909d0a434a745",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 628,
"avg_line_length": 46.951492537313435,
"alnum_prop": 0.7459270444250179,
"repo_name": "pgrazaitis/BCDS",
"id": "c29890e24485c6a2542ff92782ba4d7579d38724",
"size": "12583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Model/scripts/knee/python/aggregateDecision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "Batchfile",
"bytes": "616"
},
{
"name": "CSS",
"bytes": "9525"
},
{
"name": "HTML",
"bytes": "83942"
},
{
"name": "Java",
"bytes": "228518"
},
{
"name": "JavaScript",
"bytes": "118949"
},
{
"name": "Python",
"bytes": "68806"
}
],
"symlink_target": ""
}
|
"""This code example gets all premium rates belonging to a specific rate card.
To create premium rates, run create_premium_rates.py.
Tags: PremiumRateService.getPremiumRatesByStatement
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
RATE_CARD_ID = 'INSERT_RATE_CARD_ID_HERE'
def main(client, rate_card_id):
# Initialize appropriate service.
premium_rate_service = client.GetService('PremiumRateService',
version='v201505')
# Create statement object to select a single proposal by an ID.
values = [{
'key': 'rateCardId',
'value': {
'xsi_type': 'NumberValue',
'value': rate_card_id
}
}]
query = 'WHERE rateCardId = :rateCardId ORDER BY id ASC'
statement = dfp.FilterStatement(query, values)
# Get premium rates by statement.
while True:
response = premium_rate_service.getPremiumRatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for premium_rate in response['results']:
print ('Premium rate with ID \'%s\' of type \'%s\' assigned to '
' rate card with ID \'%s\' was found.\n' % (
premium_rate['id'],
dfp.DfpClassType(premium_rate['premiumFeature']),
premium_rate['rateCardId']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, RATE_CARD_ID)
|
{
"content_hash": "96fb1d8efc95a0a636680ef4c5328107",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 31.14814814814815,
"alnum_prop": 0.6355529131985731,
"repo_name": "ya7lelkom/googleads-python-lib",
"id": "5fc8fcdffd9fd179e93fd6d9ae9abbb0cc06aa10",
"size": "2300",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/dfp/v201505/premium_rate_service/get_premium_rates_for_a_rate_card.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535232"
}
],
"symlink_target": ""
}
|
import os
from flask.ext.script import Manager, Server
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script.commands import ShowUrls, Clean
from web import create_app
from web.models import db, User
# default to dev config because no one should use this in
# production anyway
env = os.environ.get('APPNAME_ENV', 'dev')
app = create_app('web.settings.%sConfig' % env.capitalize())
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("server", Server())
manager.add_command("show-urls", ShowUrls())
manager.add_command("clean", Clean())
manager.add_command("db", MigrateCommand)
@manager.shell
def make_shell_context():
""" Creates a python REPL with several default imports
in the context of the app
"""
return dict(app=app, db=db, User=User)
@manager.command
def createdb():
""" Creates a database with all of the tables defined in
your SQLAlchemy models
"""
db.create_all()
if __name__ == "__main__":
manager.run()
|
{
"content_hash": "0d3f8bfbbbb5f3d39765cc690d4801db",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 60,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.7062314540059347,
"repo_name": "emrahayanoglu/Flask-Skeleton",
"id": "45154721625c208048b9f06c42d3137b09c1e30d",
"size": "1034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "15317"
}
],
"symlink_target": ""
}
|
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle_noargs(self, **options):
from social_graph.api import Graph
graph = Graph()
graph.clear_cache()
|
{
"content_hash": "ad88d1554bd7c84365b3762aace734ab",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 53,
"avg_line_length": 27.5,
"alnum_prop": 0.6909090909090909,
"repo_name": "dgvicente/django-social-network",
"id": "8a7daf236487d4ce5ddb3599f356c41c82c62d47",
"size": "235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "social_network/management/commands/clearcache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "64925"
},
{
"name": "JavaScript",
"bytes": "4643"
},
{
"name": "Python",
"bytes": "154919"
}
],
"symlink_target": ""
}
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from employ.exceptions import ExecutionError
class Manager(object):
"""
Base Manager class that all Manager plugins must inherit from.
"""
name = "manager"
@classmethod
def from_config(cls, config):
"""
Helper classmethod used to create an instance of :class:`employ.managers.Manager`
from the provided `config`
:param config: the config to get the settings from
:type config: :class:`ConfigParser.RawConfigParser`
:returns: :class:`employ.managers.Manager`
"""
settings = {}
if config.has_section(cls.name):
settings = dict(config.items(cls.name))
return cls(**settings)
def setup_instances(self):
"""
Method called to setup the required instances.
All children must implement this method.
The result of this method should be that all the required
instances are created/connected to.
"""
raise NotImplementedError()
def __enter__(self):
"""
Used to call :func:`setup_instances` when using in a context manager
with manager:
# instances are connected to
"""
self.setup_instances()
def cleanup_instances(self):
"""
Method called to destroy/disconnect from instances.
All children must implement this method.
The result of this method should be that all instances are
disconnected or destroy.
"""
raise NotImplementedError()
def __exit__(self, type, value, traceback):
"""
Used to call :func:`cleanup_instances` when using in a context manager
with manager:
# instances are available
# instances are destroyed
"""
self.cleanup_instances()
def setup(self, script):
"""
Execute `script` on all instances.
All children must implement this method.
:param script: filename of a local script to run on each instance
:type script: str
"""
raise NotImplementedError()
def run(self, command):
"""
Execute `command` on all instances.
All children must implement this method.
This method should execute `command.commad()` on all instances
as well as sending the results of all instances to `command.aggregate`
The results will be in the following format::
[(status, stdout, stderr), ...]
:param command: the command to run on the instances
:type command: :class:`employ.commands.Command`
"""
raise NotImplementedError()
def validate_results(self, results, command):
"""
Helper method to validate the results of running commands.
:param results: the (status, stdout, stderr) results from running `command`
:type results: list
:param command: the raw str command that was run
:type command: str
:raises: :class:`employ.exections.ExecutionError`
"""
for status, stdout, stderr in results:
if status != 0:
raise ExecutionError(
"Non-Zero status code from executing command: %s" % command,
command, status, stdout, stderr,
)
|
{
"content_hash": "435b2f803819c60881d4a0379fa10b33",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 89,
"avg_line_length": 29.911504424778762,
"alnum_prop": 0.6032544378698225,
"repo_name": "brettlangdon/employ",
"id": "98c3194a1affb412718689265c3f10b4b7e72778",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "employ/managers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29652"
}
],
"symlink_target": ""
}
|
"""First-order predictor algorithm."""
import copy
from collections.abc import Iterable
from .cram import deplete
from ..results import Results
def predictor(operator, timesteps, power=None, power_density=None,
print_out=True):
r"""Deplete using a first-order predictor algorithm.
Implements the first-order predictor algorithm. This algorithm is
mathematically defined as:
.. math::
y' &= A(y, t) y(t)
A_p &= A(y_n, t_n)
y_{n+1} &= \text{expm}(A_p h) y_n
Parameters
----------
operator : openmc.deplete.TransportOperator
The operator object to simulate on.
timesteps : iterable of float
Array of timesteps in units of [s]. Note that values are not cumulative.
power : float or iterable of float, optional
Power of the reactor in [W]. A single value indicates that the power is
constant over all timesteps. An iterable indicates potentially different
power levels for each timestep. For a 2D problem, the power can be given
in [W/cm] as long as the "volume" assigned to a depletion material is
actually an area in [cm^2]. Either `power` or `power_density` must be
specified.
power_density : float or iterable of float, optional
Power density of the reactor in [W/gHM]. It is multiplied by initial
heavy metal inventory to get total power if `power` is not speficied.
print_out : bool, optional
Whether or not to print out time.
"""
if power is None:
if power_density is None:
raise ValueError(
"Neither power nor power density was specified.")
if not isinstance(power_density, Iterable):
power = power_density*operator.heavy_metal
else:
power = [i*operator.heavy_metal for i in power_density]
if not isinstance(power, Iterable):
power = [power]*len(timesteps)
# Generate initial conditions
with operator as vec:
# Initialize time and starting index
if operator.prev_res is None:
t = 0.0
i_res = 0
else:
t = operator.prev_res[-1].time[-1]
i_res = len(operator.prev_res) - 1
chain = operator.chain
for i, (dt, p) in enumerate(zip(timesteps, power)):
# Get beginning-of-timestep concentrations and reaction rates
# Avoid doing first transport run if already done in previous
# calculation
if i > 0 or operator.prev_res is None:
x = [copy.deepcopy(vec)]
op_results = [operator(x[0], p)]
# Create results, write to disk
Results.save(operator, x, op_results, [t, t + dt], p, i_res + i)
else:
# Get initial concentration
x = [operator.prev_res[-1].data[0]]
# Get rates
op_results = [operator.prev_res[-1]]
op_results[0].rates = op_results[0].rates[0]
# Scale reaction rates by ratio of powers
power_res = operator.prev_res[-1].power
ratio_power = p / power_res
op_results[0].rates *= ratio_power[0]
# Deplete for full timestep
x_end = deplete(chain, x[0], op_results[0].rates, dt, print_out)
# Advance time, update vector
t += dt
vec = copy.deepcopy(x_end)
# Perform one last simulation
x = [copy.deepcopy(vec)]
op_results = [operator(x[0], power[-1])]
# Create results, write to disk
Results.save(operator, x, op_results, [t, t], p, i_res + len(timesteps))
|
{
"content_hash": "7629e955de4ba02cfeb74fb1ff475b79",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 80,
"avg_line_length": 35.94174757281554,
"alnum_prop": 0.5869800108049703,
"repo_name": "wbinventor/openmc",
"id": "f670a125ee2c5e7be2e17dfc270fccd15230b08e",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "openmc/deplete/integrator/predictor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9675"
},
{
"name": "C++",
"bytes": "1289928"
},
{
"name": "CMake",
"bytes": "11264"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Python",
"bytes": "2653785"
},
{
"name": "Shell",
"bytes": "2519"
}
],
"symlink_target": ""
}
|
import urlparse
from .harvestor import Harvestor, API_VERSION, TRELLO_API_NETLOC, TRELLO_API_SCHEME
def test_url_composing():
token = "123"
api_key = "456"
endpoint = "asdqwe"
h = Harvestor(token, api_key=api_key)
url = h.url(endpoint)
split = urlparse.urlsplit(url)
assert split[0] == TRELLO_API_SCHEME
assert split[1] == TRELLO_API_NETLOC
assert split[2] == "/" + API_VERSION + "/" + endpoint
assert urlparse.parse_qs(split[3]) == {"token": [token], "key": [api_key]}
|
{
"content_hash": "40298420569c2ad900d71643db4e483f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 83,
"avg_line_length": 32.0625,
"alnum_prop": 0.6393762183235867,
"repo_name": "TomasTomecek/trello-reporter",
"id": "5be51bfd560ff31d98f829c0cefdcac25931f801",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trello_reporter/harvesting/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "30644"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "129269"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
}
|
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SimplePage(page_module.Page):
def __init__(self, url, page_set, credentials='', name=''):
super(SimplePage, self).__init__(
url, page_set=page_set, name=name,
credentials_path='data/credentials.json')
self.credentials = credentials
def RunPageInteractions(self, action_runner):
pass
class Google(SimplePage):
def __init__(self, page_set):
super(Google, self).__init__(
url='https://www.google.com/#hl=en&q=barack+obama', page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(Google, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class Gmail(SimplePage):
def __init__(self, page_set):
super(Gmail, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(Gmail, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleCalendar(SimplePage):
def __init__(self, page_set):
super(GoogleCalendar, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(GoogleCalendar, self).RunNavigateSteps(action_runner)
action_runner.ExecuteJavaScript('''
(function() { var elem = document.createElement("meta");
elem.name="viewport";
elem.content="initial-scale=1";
document.body.appendChild(elem);
})();''')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
class Youtube(SimplePage):
def __init__(self, page_set):
super(Youtube, self).__init__(
url='http://www.youtube.com',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(Youtube, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class Facebook(SimplePage):
def __init__(self, page_set):
super(Facebook, self).__init__(
url='http://www.facebook.com/barackobama',
page_set=page_set,
credentials='facebook',
name='Facebook')
def RunNavigateSteps(self, action_runner):
super(Facebook, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='About')
class Top10PageSet(page_set_module.PageSet):
"""10 Pages chosen from Alexa top sites"""
def __init__(self):
super(Top10PageSet, self).__init__(
archive_data_file='data/top_10.json',
user_agent_type='desktop',
bucket=page_set_module.PARTNER_BUCKET)
# top google property; a google tab is often open
self.AddUserStory(Google(self))
# productivity, top google properties
# TODO(dominikg): fix crbug.com/386152
#self.AddUserStory(Gmail(self))
# productivity, top google properties
self.AddUserStory(GoogleCalendar(self))
# #3 (Alexa global)
self.AddUserStory(Youtube(self))
# top social, Public profile
self.AddUserStory(Facebook(self))
# #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddUserStory(SimplePage('http://en.wikipedia.org/wiki/Wikipedia',
self, name='Wikipedia'))
# #1 world commerce website by visits; #3 commerce in the US by time spent
self.AddUserStory(SimplePage('http://www.amazon.com', self))
# #4 Alexa
self.AddUserStory(SimplePage('http://www.yahoo.com/', self))
# #16 Alexa
self.AddUserStory(SimplePage('http://www.bing.com/', self))
# #20 Alexa
self.AddUserStory(SimplePage('http://www.ask.com/', self))
|
{
"content_hash": "a142db9ee20eabc463b4933d417f1334",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 78,
"avg_line_length": 31.204918032786885,
"alnum_prop": 0.6661413186235882,
"repo_name": "hefen1/chromium",
"id": "e198ded038e5e639964ee14bbb8ed13217ed7e13",
"size": "3969",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/top_10.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "C",
"bytes": "4050888"
},
{
"name": "C++",
"bytes": "227355953"
},
{
"name": "CSS",
"bytes": "970407"
},
{
"name": "HTML",
"bytes": "28896884"
},
{
"name": "Java",
"bytes": "8494381"
},
{
"name": "JavaScript",
"bytes": "19110753"
},
{
"name": "Makefile",
"bytes": "37978"
},
{
"name": "Objective-C",
"bytes": "1276474"
},
{
"name": "Objective-C++",
"bytes": "7755220"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "264470"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "423501"
},
{
"name": "Python",
"bytes": "7622149"
},
{
"name": "Shell",
"bytes": "478642"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
from flexmock import flexmock
from .. import OratorTestCase
from .. import mock
from ..orm.models import User
from orator.query.builder import QueryBuilder
from orator.connections.connection import Connection
class ConnectionTestCase(OratorTestCase):
def test_table_returns_query_builder(self):
connection = Connection(None, "database")
builder = connection.table("users")
self.assertIsInstance(builder, QueryBuilder)
self.assertEqual("users", builder.from__)
self.assertEqual(connection.get_query_grammar(), builder.get_grammar())
def test_transaction(self):
connection = Connection(None, "database")
connection.begin_transaction = mock.MagicMock(unsafe=True)
connection.commit = mock.MagicMock(unsafe=True)
connection.rollback = mock.MagicMock(unsafe=True)
connection.insert = mock.MagicMock(return_value=1)
with connection.transaction():
connection.table("users").insert({"name": "foo"})
connection.begin_transaction.assert_called_once()
connection.commit.assert_called_once()
self.assertFalse(connection.rollback.called)
connection.begin_transaction.reset_mock()
connection.commit.reset_mock()
connection.rollback.reset_mock()
try:
with connection.transaction():
connection.table("users").insert({"name": "foo"})
raise Exception("foo")
except Exception as e:
self.assertEqual("foo", str(e))
connection.begin_transaction.assert_called_once()
connection.rollback.assert_called_once()
self.assertFalse(connection.commit.called)
def test_try_again_if_caused_by_lost_connection_is_called(self):
connection = flexmock(Connection(None, "database"))
cursor = flexmock()
connection.should_receive("_try_again_if_caused_by_lost_connection").once()
connection.should_receive("_get_cursor_for_select").and_return(cursor)
connection.should_receive("reconnect")
cursor.should_receive("execute").and_raise(Exception("error"))
connection.select('SELECT * FROM "users"')
def test_lost_connection_returns_true_with_capitalized_error(self):
connection = Connection(None, "database")
self.assertTrue(connection._caused_by_lost_connection("Lost Connection"))
def test_prefix_set_to_none(self):
connection = Connection(None, "database", None)
self.assertIsNotNone(connection.get_table_prefix())
self.assertEqual("", connection.get_table_prefix())
class ConnectionThreadLocalTest(OratorTestCase):
threads = 4
def test_create_thread_local(self):
self.init_database()
def create_user_thread(low, hi):
for _ in range(low, hi):
User.create(name="u%d" % i)
User.get_connection_resolver().disconnect()
threads = []
for i in range(self.threads):
threads.append(
threading.Thread(target=create_user_thread, args=(i * 10, i * 10 + 10))
)
[t.start() for t in threads]
[t.join() for t in threads]
self.assertEqual(User.select().count(), self.threads * 10)
def test_read_thread_local(self):
self.init_database()
data_queue = Queue()
def reader_thread(q, num):
for _ in range(num):
data_queue.put(User.select().count())
threads = []
for i in range(self.threads):
threads.append(
threading.Thread(target=reader_thread, args=(data_queue, 20))
)
[t.start() for t in threads]
[t.join() for t in threads]
self.assertEqual(data_queue.qsize(), self.threads * 20)
|
{
"content_hash": "0a30b5e2852db07c42f7962bc94d9490",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 87,
"avg_line_length": 32.73109243697479,
"alnum_prop": 0.637997432605905,
"repo_name": "sdispater/orator",
"id": "870ef8fe97bbe82104b6776b30d6154013d15938",
"size": "3920",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.9",
"path": "tests/connections/test_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2170"
},
{
"name": "Python",
"bytes": "1013569"
}
],
"symlink_target": ""
}
|
"""
This is (almost) a direct C++ to Python transliteration of
<VTK-root>/Examples/DataManipulation/Cxx/Cube.cxx from the VTK
source distribution, which "shows how to manually create vtkPolyData"
A convenience function, mkVtkIdList(), has been added and one if/else
so the example also works in version 6 or later.
If your VTK version is 5.x then remove the line: colors = vtk.vtkNamedColors()
and replace the set background parameters with (1.0, 0.9688, 0.8594)
"""
import vtk
def mkVtkIdList(it):
"""
Makes a vtkIdList from a Python iterable. I'm kinda surprised that
this is necessary, since I assumed that this kind of thing would
have been built into the wrapper and happen transparently, but it
seems not.
:param it: A python iterable.
:return: A vtkIdList
"""
vil = vtk.vtkIdList()
for i in it:
vil.InsertNextId(int(i))
return vil
def main():
colors = vtk.vtkNamedColors()
# x = array of 8 3-tuples of float representing the vertices of a cube:
x = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (1.0, 1.0, 0.0), (0.0, 1.0, 0.0),
(0.0, 0.0, 1.0), (1.0, 0.0, 1.0), (1.0, 1.0, 1.0), (0.0, 1.0, 1.0)]
# pts = array of 6 4-tuples of vtkIdType (int) representing the faces
# of the cube in terms of the above vertices
pts = [(0, 1, 2, 3), (4, 5, 6, 7), (0, 1, 5, 4),
(1, 2, 6, 5), (2, 3, 7, 6), (3, 0, 4, 7)]
# We'll create the building blocks of polydata including data attributes.
cube = vtk.vtkPolyData()
points = vtk.vtkPoints()
polys = vtk.vtkCellArray()
scalars = vtk.vtkFloatArray()
# Load the point, cell, and data attributes.
for i, xi in enumerate(x):
points.InsertPoint(i, xi)
for pt in pts:
polys.InsertNextCell(mkVtkIdList(pt))
for i, _ in enumerate(x):
scalars.InsertTuple1(i, i)
# We now assign the pieces to the vtkPolyData.
cube.SetPoints(points)
cube.SetPolys(polys)
cube.GetPointData().SetScalars(scalars)
# Now we'll look at it.
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInputData(cube)
cubeMapper.SetScalarRange(cube.GetScalarRange())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
# The usual rendering stuff.
camera = vtk.vtkCamera()
camera.SetPosition(1, 1, 1)
camera.SetFocalPoint(0, 0, 0)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(cubeActor)
renderer.SetActiveCamera(camera)
renderer.ResetCamera()
renderer.SetBackground(colors.GetColor3d("Cornsilk"))
# renderer.SetBackground(1.0, 0.9688, 0.8594)
renWin.SetSize(600, 600)
# interact with data
renWin.Render()
iren.Start()
if __name__ == "__main__":
main()
|
{
"content_hash": "62d46078a9ecc5b358a53632b924b907",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 30.020833333333332,
"alnum_prop": 0.6495489243580846,
"repo_name": "lorensen/VTKExamples",
"id": "2b4310d168e4b9a610b89247fa9e9fc713889afc",
"size": "2905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/GeometricObjects/Cube.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
"""
Agent Models
============
Models and interface classes related to the agent.
"""
import re
import sys
from textwrap import dedent
import netaddr
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import validates
from netaddr import AddrFormatError, IPAddress
from pyfarm.core.enums import AgentState, STRING_TYPES, PY3
from pyfarm.core.config import read_env_number, read_env_int, read_env_bool
from pyfarm.master.application import db, app
from pyfarm.models.core.functions import repr_ip
from pyfarm.models.core.mixins import (
ValidatePriorityMixin, UtilityMixins, ReprMixin)
from pyfarm.models.core.types import (
id_column, IPv4Address, IDTypeAgent, IDTypeTag, UseAgentAddressEnum,
AgentStateEnum)
from pyfarm.models.core.cfg import (
TABLE_AGENT, TABLE_SOFTWARE, TABLE_TAG, TABLE_AGENT_TAG_ASSOC,
MAX_HOSTNAME_LENGTH, MAX_TAG_LENGTH, TABLE_AGENT_SOFTWARE_ASSOC,
TABLE_PROJECT_AGENTS, TABLE_PROJECT)
PYFARM_REQUIRE_PRIVATE_IP = read_env_bool("PYFARM_REQUIRE_PRIVATE_IP", False)
REGEX_HOSTNAME = re.compile("^(?!-)[A-Z\d-]{1,63}(?<!-)"
"(\.(?!-)[A-Z\d-]{1,63}(?<!-))*\.?$"
, re.IGNORECASE)
AgentSoftwareAssociation = db.Table(
TABLE_AGENT_SOFTWARE_ASSOC, db.metadata,
db.Column("agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % TABLE_AGENT), primary_key=True),
db.Column("software_id", db.Integer,
db.ForeignKey("%s.id" % TABLE_SOFTWARE), primary_key=True))
AgentTagAssociation = db.Table(
TABLE_AGENT_TAG_ASSOC, db.metadata,
db.Column("agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % TABLE_AGENT), primary_key=True),
db.Column("tag_id", db.Integer,
db.ForeignKey("%s.id" % TABLE_TAG), primary_key=True))
AgentProjects = db.Table(
TABLE_PROJECT_AGENTS, db.metadata,
db.Column("agent_id", IDTypeAgent,
db.ForeignKey("%s.id" % TABLE_AGENT), primary_key=True),
db.Column("project_id", db.Integer,
db.ForeignKey("%s.id" % TABLE_PROJECT), primary_key=True))
class AgentTaggingMixin(object):
"""
Mixin used which provides some common structures to
:class:`.AgentTag` and :class:`.AgentSoftware`
"""
if not PY3:
NUMERIC_TYPES = (int, long)
else:
NUMERIC_TYPES = int
@validates("tag", "software")
def validate_string_column(self, key, value):
"""
Ensures `value` is a string or something that can be converted
to a string.
"""
if isinstance(value, self.NUMERIC_TYPES):
value = str(value)
elif not isinstance(value, STRING_TYPES):
raise ValueError("expected a string for `%s`" % key)
return value
class Agent(db.Model, ValidatePriorityMixin, UtilityMixins, ReprMixin):
"""
Stores information about an agent include its network address,
state, allocation configuration, etc.
.. note::
This table enforces two forms of uniqueness. The :attr:`id` column
must be unique and the combination of these columns must also be
unique to limit the frequency of duplicate data:
* :attr:`hostname`
* :attr:`ip`
* :attr:`port`
"""
__tablename__ = TABLE_AGENT
__table_args__ = (UniqueConstraint("hostname", "ip", "port"), )
STATE_DEFAULT = "online"
REPR_COLUMNS = (
"id", "hostname", "state", "ip", "remote_ip", "port", "cpus",
"ram", "free_ram")
REPR_CONVERT_COLUMN = {
"ip": repr_ip,
"remote_ip": repr_ip,
"state": repr}
MIN_PORT = read_env_int("PYFARM_AGENT_MIN_PORT", 1024)
MAX_PORT = read_env_int("PYFARM_AGENT_MAX_PORT", 65535)
MIN_CPUS = read_env_int("PYFARM_AGENT_MIN_CPUS", 1)
MAX_CPUS = read_env_int("PYFARM_AGENT_MAX_CPUS", 256)
MIN_RAM = read_env_int("PYFARM_AGENT_MIN_RAM", 16)
MAX_RAM = read_env_int("PYFARM_AGENT_MAX_RAM", 262144)
# quick check of the configured data
assert MIN_PORT >= 1, "$PYFARM_AGENT_MIN_PORT must be > 0"
assert MAX_PORT >= 1, "$PYFARM_AGENT_MAX_PORT must be > 0"
assert MAX_PORT >= MIN_PORT, "MIN_PORT must be <= MAX_PORT"
assert MIN_CPUS >= 1, "$PYFARM_AGENT_MIN_CPUS must be > 0"
assert MAX_CPUS >= 1, "$PYFARM_AGENT_MAX_CPUS must be > 0"
assert MAX_CPUS >= MIN_CPUS, "MIN_CPUS must be <= MAX_CPUS"
assert MIN_RAM >= 1, "$PYFARM_AGENT_MIN_RAM must be > 0"
assert MAX_RAM >= 1, "$PYFARM_AGENT_MAX_RAM must be > 0"
assert MAX_RAM >= MIN_RAM, "MIN_RAM must be <= MAX_RAM"
id = id_column(IDTypeAgent)
# basic host attribute information
hostname = db.Column(db.String(MAX_HOSTNAME_LENGTH), nullable=False,
doc=dedent("""
The hostname we should use to talk to this host.
Preferably this value will be the fully qualified
name instead of the base hostname alone."""))
ip = db.Column(IPv4Address, nullable=True,
doc="The IPv4 network address this host resides on")
remote_ip = db.Column(IPv4Address, nullable=True,
doc="the remote address which came in with the "
"request")
use_address = db.Column(UseAgentAddressEnum, nullable=False,
default="remote",
doc="The address we should use when communicating "
"with the agent")
ram = db.Column(db.Integer, nullable=False,
doc="The amount of ram installed on the agent in megabytes")
free_ram = db.Column(db.Integer, nullable=False,
doc="The amount of ram which was last considered free")
cpus = db.Column(db.Integer, nullable=False,
doc="The number of cpus installed on the agent")
port = db.Column(db.Integer, nullable=False,
doc="The port the agent is currently running on")
time_offset = db.Column(db.Integer, nullable=False, default=0,
doc="the offset in seconds the agent is from "
"an official time server")
# host state
state = db.Column(AgentStateEnum, default=AgentState.ONLINE,
nullable=False,
doc=dedent("""
Stores the current state of the host. This value can be
changed either by a master telling the host to do
something with a task or from the host via REST api."""))
# Max allocation of the two primary resources which `1.0` is 100%
# allocation. For `cpu_allocation` 100% allocation typically means
# one task per cpu.
ram_allocation = db.Column(db.Float,
default=read_env_number(
"PYFARM_AGENT_RAM_ALLOCATION", .8),
doc=dedent("""
The amount of ram the agent is allowed to
allocate towards work. A value of 1.0 would
mean to let the agent use all of the memory
installed on the system when assigning work."""))
cpu_allocation = db.Column(db.Float,
default=read_env_number(
"PYFARM_AGENT_CPU_ALLOCATION", 1.0),
doc=dedent("""
The total amount of cpu space an agent is
allowed to process work in. A value of 1.0
would mean an agent can handle as much work
as the system could handle given the
requirements of a task. For example if an agent
has 8 cpus, cpu_allocation is .5, and a task
requires 4 cpus then only that task will run
on the system."""))
# relationships
tasks = db.relationship("Task", backref="agent", lazy="dynamic",
doc=dedent("""
Relationship between an :class:`Agent`
and any :class:`pyfarm.models.Task`
objects"""))
tags = db.relationship("Tag", secondary=AgentTagAssociation,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="Tags associated with this agent")
software = db.relationship("Software",
secondary=AgentSoftwareAssociation,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="software this agent has installed or is "
"configured for")
projects = db.relationship("Project",
secondary=AgentProjects,
backref=db.backref("agents", lazy="dynamic"),
lazy="dynamic",
doc="The project or projects this agent is "
"associated with. By default an agent "
"which is not associated with any projects "
"will be a member of all projects.")
@classmethod
def validate_hostname(cls, key, value):
"""
Ensures that the hostname provided by `value` matches a regular
expression that expresses what a valid hostname is.
"""
# ensure hostname does not contain characters we can't use
if not REGEX_HOSTNAME.match(value):
raise ValueError("%s is not valid for %s" % (value, key))
return value
@classmethod
def validate_resource(cls, key, value):
"""
Ensure the `value` provided for `key` is within an expected range as
specified in `agent.yml`
"""
min_value = getattr(cls, "MIN_%s" % key.upper())
max_value = getattr(cls, "MAX_%s" % key.upper())
# check the provided input
if min_value > value or value > max_value:
msg = "value for `%s` must be between " % key
msg += "%s and %s" % (min_value, max_value)
raise ValueError(msg)
return value
@classmethod
def validate_ip_address(cls, key, value):
"""
Ensures the :attr:`ip` address is valid. This checks to ensure
that the value provided is:
* not a hostmask
* not link local (:rfc:`3927`)
* not used for multicast (:rfc:`1112`)
* not a netmask (:rfc:`4632`)
* not reserved (:rfc:`6052`)
* a private address (:rfc:`1918`)
"""
if not value:
return
try:
ip = netaddr.IPAddress(value)
except (AddrFormatError, ValueError) as e:
raise ValueError(
"%s is not a valid address format: %s" % (value, e))
if not app.config.get("DEV_ALLOW_ANY_AGENT_ADDRESS", False):
if PYFARM_REQUIRE_PRIVATE_IP and not ip.is_private():
raise ValueError("%s is not a private ip address" % value)
if not app.config.get("DEV_ALLOW_ANY_AGENT_ADDRESS", False) and \
not all([
not ip.is_hostmask(), not ip.is_link_local(),
not ip.is_loopback(), not ip.is_multicast(),
not ip.is_netmask(), not ip.is_reserved()
]):
raise ValueError("%s is not a usable ip address" % value)
return value
@validates("ip")
def validate_address_column(self, key, value):
"""validates the ip column"""
return self.validate_ip_address(key, value)
@validates("hostname")
def validate_hostname_column(self, key, value):
"""validates the hostname column"""
return self.validate_hostname(key, value)
@validates("ram", "cpus", "port")
def validate_resource_column(self, key, value):
"""validates the ram, cpus, and port columns"""
return self.validate_resource(key, value)
def serialize_column(self, column):
"""serializes a single column, typically used by a dictionary mixin"""
if isinstance(column, IPAddress):
return str(column)
return column
|
{
"content_hash": "c2848bd403bc839e8b8708a51979483f",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 80,
"avg_line_length": 41.903333333333336,
"alnum_prop": 0.5590645135629624,
"repo_name": "opalmer/deprecated-pyfarm-models",
"id": "6753ba3a64b54fa72aa5835ae187091962c8b92c",
"size": "13205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfarm/models/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128130"
}
],
"symlink_target": ""
}
|
import os
import sys
import stat
import pwd
import shutil
import logging
from sandstone import settings
import grp
import subprocess
from sandstone.lib.filesystem.schemas import FilesystemObject
from sandstone.lib.filesystem.schemas import VolumeObject
from sandstone.lib.filesystem.schemas import FileObject
class PosixFS:
"""
Interface for a Posix filesystem.
"""
def _format_volume_paths(self):
volume_patterns = settings.VOLUMES
formatted_patterns = []
for patt in volume_patterns:
fmt = os.path.expandvars(patt)
formatted_patterns.append(fmt)
return formatted_patterns
def get_filesystem_details(self):
details = {
'type': 'filesystem'
}
# get volume details
volumes = []
for volume_path in self._format_volume_paths():
vd = {
'type': 'volume',
'filepath': volume_path
}
p = subprocess.Popen(['df', '-h', volume_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
size, used, avail, used_pct = out.split()[-5:-1]
vd.update({
'used': used,
'available': avail,
'used_pct': float(used_pct.strip('%')),
'size': size
})
vol = VolumeObject(**vd)
volumes.append(vol)
details.update({
'volumes': volumes
})
# get groups
groups = self.get_groups()
details.update({
'groups': groups
})
fs = FilesystemObject(**details)
return fs
def get_groups(self):
groups = subprocess.check_output(["id", "--name", "-G"]).strip().split()
return groups
def _parse_ls_line(self, out):
contents = out.split()
perms = contents[1]
t = 'file'
if perms[0] == 'd':
t = 'directory'
if perms[-1] == '.':
perms = perms[:-1]
perms = perms[1:]
owner = contents[3]
group = contents[4]
size = contents[5]
if not size[-1].isalpha():
size += 'b'
name_cmps = contents[9:]
name = ' '.join(name_cmps)
details = {
'type': t,
'permissions': perms,
'owner': owner,
'group': group,
'size': size,
'name': name
}
return details
def get_file_details(self, filepath):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
details = {
'filepath': filepath
}
p = subprocess.Popen(['ls', '-lsahd', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')
if lines[0] == '':
out = lines[1]
ls_det = self._parse_ls_line(out)
details.update(ls_det)
file_details = FileObject(**details)
return file_details
def get_directory_details(self, filepath, contents=True, dir_sizes=False):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
details = {
'filepath': filepath
}
if not contents:
p = subprocess.Popen(['ls', '-lsahd', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
ls_det = self._parse_ls_line(out)
details.update(ls_det)
dir_details = FileObject(**details)
return dir_details
else:
# Otherwise, get dir contents
p = subprocess.Popen(['ls','--group-directories-first','-lsah', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
lines = out.split('\n')[1:]
# the directory details
ls_det = self._parse_ls_line(lines[0])
details.update(ls_det)
# directory contents
contents = []
for line in lines[2:-1]:
line_details = self._parse_ls_line(line)
name = line_details['name']
fp = os.path.join(filepath,name)
line_details.update({
'filepath': fp
})
if dir_sizes and line_details['type'] == 'directory':
line_details['size'] = self.get_size(fp)
file_details = FileObject(**line_details)
contents.append(file_details)
details['contents'] = contents
dir_details = FileObject(**details)
return dir_details
def exists(self, filepath):
filepath = os.path.abspath(filepath)
exists = os.path.exists(filepath)
return exists
def get_type_from_path(self, filepath):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
if os.path.isdir(filepath):
return 'directory'
return 'file'
def get_size(self, filepath):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
p = subprocess.Popen(['du','-hs',filepath],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = p.communicate()
out = out.strip()
size, fp = out.split('\t')
if not size[-1].isalpha():
size += 'b'
return size
def create_file(self, filepath):
filepath = os.path.abspath(filepath)
if os.path.exists(filepath):
logging.info('File {} already exists, not creating'.format(filepath))
return filepath
fd = open(filepath,'w')
fd.close()
def read_file(self, filepath):
filepath = os.path.abspath(filepath)
with open(filepath, 'r') as f:
content = f.read()
return content
def write_file(self, filepath, content):
filepath = os.path.abspath(filepath)
if not os.path.exists(filepath):
raise IOError
with open(filepath, 'w') as local_file:
for line in content:
local_file.write(line.encode('utf8'))
def delete(self, filepath):
filepath = os.path.abspath(filepath)
if os.path.isdir(filepath):
shutil.rmtree(filepath)
else:
os.remove(filepath)
def create_directory(self, filepath):
filepath = os.path.abspath(filepath)
os.makedirs(filepath)
def move(self, origpath, newpath):
origpath = os.path.abspath(origpath)
newpath = os.path.abspath(newpath)
shutil.move(origpath,newpath)
def copy(self, origpath, newpath):
origpath = os.path.abspath(origpath)
newpath = os.path.abspath(newpath)
if os.path.isdir(origpath):
shutil.copytree(origpath,newpath)
else:
shutil.copy2(origpath,newpath)
def rename(self, origpath, newname):
origpath = os.path.abspath(origpath)
dirname, name = os.path.split(origpath)
newpath = os.path.join(dirname,newname)
os.rename(origpath,newpath)
return newpath
def _permissions_to_octal(self, perm_string):
try:
perm_octal = int(perm_string, 8)
# Already an octal string
return perm_string
except ValueError:
pass
perms = []
count = 0
p = 0
if len(perm_string) == 10:
perm_string = perm_string[1:]
for i in perm_string:
if i == 'r':
p += 4
elif i == 'w':
p += 2
elif i == 'x':
p += 1
if count == 2:
perms.append('%d' % p)
count = 0
p = 0
else:
count += 1
return ''.join(perms)
def update_permissions(self, filepath, perm_string):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
perm_octal = self._permissions_to_octal(perm_string)
os.chmod(filepath, int(perm_octal, 8))
def update_group(self, filepath, group_name):
filepath = os.path.abspath(filepath)
if not self.exists(filepath):
raise OSError('File not found')
# Get uid
uid = os.stat(filepath).st_uid
# Get GID of new group
gid = grp.getgrnam(group_name).gr_gid
# change group
os.chown(filepath, uid, gid)
|
{
"content_hash": "b3142172148219c2d70716b40ec99072",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 134,
"avg_line_length": 31.901459854014597,
"alnum_prop": 0.5367806887083858,
"repo_name": "SandstoneHPC/OIDE",
"id": "69c7959b482b4ad6858626333e36a599d8af34ff",
"size": "8741",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sandstone/lib/filesystem/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9804"
},
{
"name": "HTML",
"bytes": "37057"
},
{
"name": "JavaScript",
"bytes": "159011"
},
{
"name": "Python",
"bytes": "93507"
},
{
"name": "Smarty",
"bytes": "531"
}
],
"symlink_target": ""
}
|
def centroid(func, step=0.1):
"""
Determine the center of gravity (centroid) of a function's curve
:param func: Function object
:param step: Distance between domain's values
:return: Numeric value representing the domain's value which approximately corresponds to the centroid
"""
points = func.points(step)
num, den = 0, 0
for x, y in points:
num += x * y
den += y
return num / den
def bisecter(func, step=0.1):
"""
Determine the center of area (splits the curve in two pices with the same area) (bisecter) of a function
:param func: Function object
:param step: Distance between domain's values
:return: Numeric value representing the domain's value which approximately corresponds to the bisecter
"""
points = list(func.points(step))
area = sum(map(lambda p: p[1], points))
current = 0.
for x, y in points:
current += y
if current >= area / 2:
return x
def mean_max(func, step=0.1):
"""
Determine the point which corresponds with the mean of the function's maximums
:param func: Function object
:param step: Distance between domain's values
:return: Numeric value representing the domain's value which approximately corresponds to the mean of maximums
"""
points = func.points(step)
maximums = []
k = 0
for x, y in points:
if abs(y - k) < 1e-6: # y == k
maximums.append(x)
elif y > k:
k = y
maximums.clear()
maximums.append(x)
return sum(maximums) / len(maximums)
|
{
"content_hash": "c6b9d3cb92f5f981733e395bce6c838a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 114,
"avg_line_length": 29.796296296296298,
"alnum_prop": 0.6239900559353636,
"repo_name": "ealmuina/fuzzy-logic-evaluator",
"id": "17e8de9a7035acd134013cb5bea5d54db38275c8",
"size": "1609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzy/defuzzifiers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "1555"
},
{
"name": "Python",
"bytes": "52445"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'XForm.uuid'
db.add_column('odk_logger_xform', 'uuid', self.gf('django.db.models.fields.CharField')(default=u'', max_length=32), keep_default=False)
def backwards(self, orm):
# Deleting field 'XForm.uuid'
db.delete_column('odk_logger_xform', 'uuid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['odk_logger']
|
{
"content_hash": "4540fd576e51f3b6629db454759b868c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 182,
"avg_line_length": 71.00990099009901,
"alnum_prop": 0.5493586168432795,
"repo_name": "ultimateprogramer/formhub",
"id": "7e6f4fd4ebb0680ad7dd23b7e511099efbe54064",
"size": "7190",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "odk_logger/migrations/0010_auto__add_field_xform_uuid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "286133"
},
{
"name": "HTML",
"bytes": "1669852"
},
{
"name": "JavaScript",
"bytes": "2294844"
},
{
"name": "Makefile",
"bytes": "8446"
},
{
"name": "Python",
"bytes": "1543287"
},
{
"name": "Shell",
"bytes": "11919"
}
],
"symlink_target": ""
}
|
"""
Params Module: Compute characteristic scales of a plume model
==============================================================
Use the ``TAMOC`` module `params` to compute the characteristic length and
velocity scales of a plume simulation. These empirical scales are taken
from Socolofsky and Adams (2002 and 2005).
This simulation uses the ambient data stored in the file
`./test/output/test_BM54.nc`. This dataset is created by the test files in the
`./test` directory. Please be sure that all of the tests pass using ``py.test
-v`` at the command prompt before trying to run this simulation.
"""
# S. Socolofsky, February 2014, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import stratified_plume_model
from tamoc import params
import numpy as np
if __name__ == '__main__':
# Get the ambient CTD profile data
nc = '../../test/output/test_BM54.nc'
try:
# Open the lake dataset as a Profile object if it exists
ctd = ambient.Profile(nc, chem_names='all')
except RuntimeError:
# Tell the user to create the dataset
print('CTD data not available; run test cases in ./test first.')
# Create the stratified plume model object
spm = stratified_plume_model.Model(ctd)
# Set the release conditions
T0 = 273.15 + 35. # Release temperature in K
R = 0.15 # Radius of leak source in m
# Create the gas phase particles
composition = ['methane', 'ethane', 'propane', 'oxygen']
yk = np.array([0.93, 0.05, 0.02, 0.0])
gas = dbm.FluidParticle(composition)
z0 = 1000.
disp_phases = []
# Larger free gas bubbles
mb0 = 8. # total mass flux in kg/s
de = 0.025 # bubble diameter in m
lambda_1 = 0.85
disp_phases.append(stratified_plume_model.particle_from_mb0(ctd, z0, gas,
yk, mb0, de, lambda_1, T0))
# Smaller free gas bubbles (note, it is not necessary to have more than
# one bubble size)
mb0 = 2. # total mass flux in kg/s
de = 0.0075 # bubble diameter in m
lambda_1 = 0.9
disp_phases.append(stratified_plume_model.particle_from_mb0(ctd, z0, gas,
yk, mb0, de, lambda_1, T0))
# Liquid hydrocarbon. This could either be a dissolving phase (mixture
# of liquid phases) or an inert phase. We demonstrate here the simple
# case of an inert oil phase
oil = dbm.InsolubleParticle(True, True, rho_p=890., gamma=30.,
beta=0.0007, co=2.90075e-9)
mb0 = 10. # total mass flux in kg/s
de = 0.004 # bubble diameter in m
lambda_1 = 0.9
disp_phases.append(stratified_plume_model.particle_from_mb0(ctd, z0, oil,
np.array([1.]), mb0, de, lambda_1, T0))
# Compute the governing scales
case = params.Scales(ctd, disp_phases)
(B, N, u_slip, u_inf) = case.get_variables(z0, 0.15)
print('Plume parameters:')
print(' z = %f (m)' % z0)
print(' B = %f (m^4/s^3)' % B)
print(' N = %f (s^(-1))' % N)
print(' u_s = %f (m/s)' % u_slip)
print(' u_a = %f (m/s)\n' % u_inf)
print('Plume empirical scales:')
print(' h_T = %f (m)' % case.h_T(z0))
print(' h_P = %f (m)' % case.h_P(z0))
print(' h_S = %f (m)' % case.h_S(z0, 0.15))
print(' lambda_1 = %f (--)\n' % case.lambda_1(z0, 0))
print('Critical cross-flow velocity:')
print(' ua_crit = %f (m/s)' % case.u_inf_crit(z0))
|
{
"content_hash": "119fc51f806e4033a773a87b25436dc5",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 78,
"avg_line_length": 37.50515463917526,
"alnum_prop": 0.5909840571742716,
"repo_name": "socolofs/tamoc",
"id": "941fb416c0b965ac3ad15167351530d6b72e1070",
"size": "3638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/params/scales.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "70820"
},
{
"name": "Jupyter Notebook",
"bytes": "11853"
},
{
"name": "Python",
"bytes": "1346822"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.forms.util import ErrorList
from django.forms.formsets import formset_factory
from kitabu.search.available import ExclusivelyAvailableSubjects
from kitabu.exceptions import (
ReservationError,
InvalidPeriod,
OverlappingReservations,
SizeExceeded,
TooManyReservations,
)
from spa.forms import RequiredFormSet
from spa.settings import MAX_LANE_RESERVATIONS_NR
from forms import LaneReservationForm, AvailableLanesSearchForm, LaneReservationsNrForm
from models import Lane, LaneReservationGroup
@login_required
def reserve(request, lane_id):
try:
forms_nr = int(request.GET.get('forms_nr', 1))
except ValueError:
forms_nr = 1
lane_reservations_nr_form = LaneReservationsNrForm({'forms_nr': forms_nr})
forms_nr = max(1, forms_nr)
lane = get_object_or_404(Lane, pk=lane_id)
success_msg = ""
ReservationFormset = formset_factory(LaneReservationForm, extra=forms_nr, max_num=MAX_LANE_RESERVATIONS_NR,
formset=RequiredFormSet)
if request.POST:
formset = ReservationFormset(request.POST)
if formset.is_valid():
arguments = []
for form in formset:
arguments.append((lane, form.cleaned_data))
try:
LaneReservationGroup.reserve(*arguments, owner=request.user)
except ReservationError as e:
message = (
"Size exceeded. There aren't %s places available." % e.requested_size
if isinstance(e, SizeExceeded) else
"There are other reservations that overlap with selected period."
if isinstance(e, OverlappingReservations) else
e.message
if isinstance(e, InvalidPeriod) else
"You have reached limit of reservation for you account."
if isinstance(e, TooManyReservations) else
"Disallowed reservation parameters (%s)." % e.message
)
if "__all__" not in form._errors:
form._errors["__all__"] = ErrorList()
form.errors['__all__'].append(message)
else:
return redirect('reserve-lane', lane_id)
else:
formset = ReservationFormset()
return render(
request,
'lanes/reserve.html',
{
'lane': lane,
'pool': lane.cluster,
'formset': formset,
'success_msg': success_msg,
'lane_reservations_nr_form': lane_reservations_nr_form
}
)
available_lane_searcher = ExclusivelyAvailableSubjects(Lane)
def search(request):
form = AvailableLanesSearchForm(request.GET or None)
results = (
available_lane_searcher.search(**form.cleaned_data)
if form.is_valid()
else []
)
return render(request, 'lanes/search.html', {'form': form, 'results': results})
|
{
"content_hash": "d82a57f370111efcf1fb29a932fc7211",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 111,
"avg_line_length": 33.77173913043478,
"alnum_prop": 0.6157064692629546,
"repo_name": "mbad/kitabu",
"id": "310911127c546d11a40a820b05c36e294181b584",
"size": "3130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/lanes/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19960"
},
{
"name": "JavaScript",
"bytes": "91"
},
{
"name": "Python",
"bytes": "178099"
},
{
"name": "Shell",
"bytes": "885"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
# examples of filled contour plots on map projections.
# read in data on lat/lon grid.
hgt = np.loadtxt('500hgtdata.gz')
lons = np.loadtxt('500hgtlons.gz')
lats = np.loadtxt('500hgtlats.gz')
lons, lats = np.meshgrid(lons, lats)
# create new figure
fig=plt.figure()
# setup of sinusoidal basemap
m = Basemap(resolution='c',projection='sinu',lon_0=0)
# make a filled contour plot.
# create contour lines
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
# fill between contour lines.
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Sinusoidal Filled Contour Demo')
sys.stdout.write('plotting with sinusoidal basemap ...\n')
# create new figure
fig=plt.figure()
# setup of mollweide basemap
m = Basemap(resolution='c',projection='moll',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Mollweide Filled Contour Demo')
sys.stdout.write('plotting with mollweide basemap ...\n')
# create new figure
fig=plt.figure()
# set up Robinson map projection.
m = Basemap(resolution='c',projection='robin',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 = m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,60.)
m.drawmeridians(meridians,labels=[0,0,0,1])
plt.title('Robinson Filled Contour Demo')
sys.stdout.write('plotting with robinson basemap ...\n')
# create new figure
fig=plt.figure()
# set up map projection (azimuthal equidistant).
m = Basemap(projection='npaeqd',lon_0=-90,boundinglat=15.,resolution='c')
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS2.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2,pad='12%') # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[0,0,1,1])
meridians = np.arange(10.,360.,20.)
m.drawmeridians(meridians,labels=[1,1,1,1])
plt.title('Azimuthal Equidistant Filled Contour Demo',y=1.075)
sys.stdout.write('plotting with azimuthal equidistant basemap ...\n')
# create new figure
fig=plt.figure()
# setup of orthographic basemap
m = Basemap(resolution='c',projection='ortho',\
lat_0=45.,lon_0=-120.)
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS1.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.fillcontinents()
m.drawmapboundary()
# draw parallels and meridians.
parallels = np.arange(-80.,90,20.)
m.drawparallels(parallels)
meridians = np.arange(-360.,360.,20.)
m.drawmeridians(meridians)
plt.title('Orthographic Filled Contour Demo')
sys.stdout.write('plotting with orthographic basemap ..\n')
plt.show()
|
{
"content_hash": "7eecad2c2bcead7b5e0cace6d395ca2f",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 84,
"avg_line_length": 34.18852459016394,
"alnum_prop": 0.742747542555742,
"repo_name": "matplotlib/basemap",
"id": "b2fbba4fb36a2bbf76bf743719b5b29f2e6b3a27",
"size": "4171",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "examples/contour_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "14661"
},
{
"name": "Python",
"bytes": "444967"
},
{
"name": "Shell",
"bytes": "1161"
}
],
"symlink_target": ""
}
|
import os
import re
def collect_files(seeds, extensions=None):
'''
Collect files from given seeds: files or folders to scan through recursively.
@param seeds list of root folders or files
@param extensions optional list of file extensions (lowercase) to filter files
'''
files = set()
# Local function to check extensions (or accept everything)
if extensions is not None:
check_extension = lambda path: os.path.splitext(path)[1].lower() in extensions
else:
check_extension = lambda path: True
for seed in seeds:
if os.path.isfile(seed) and check_extension(seed):
files.add(seed)
elif os.path.isdir(seed):
for (dirpath, dirnames, filenames) in os.walk(seed):
for filename in filenames:
path = os.path.join(dirpath, filename)
if check_extension(path):
files.add(path)
return files
def file_get_contents(file_name):
with open(file_name) as f:
contents = f.read()
return contents
def get_occuring_words(words, content):
'''
Return the subset of given words that occur in content.
'''
found = set()
for word in words:
if re.search(r'\b%s\b' % word, content):
found.add(word)
return found
|
{
"content_hash": "0acb814f42ec288c276e1430ab22f3aa",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 86,
"avg_line_length": 26.215686274509803,
"alnum_prop": 0.6110695587135377,
"repo_name": "soxofaan/CssDeadwood",
"id": "b991523732f5af3cb4d27190c5e5041a8f7c4251",
"size": "1338",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cssdeadwood/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25044"
}
],
"symlink_target": ""
}
|
''' Installer for the OpenLambda python bindings '''
from setuptools import setup
setup(
name='open_lambda',
version='0.1.0',
py_modules=['open_lambda'],
install_requires=["requests"]
)
|
{
"content_hash": "acf4eda9ef7416cc324c79563a5e8057",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 20.4,
"alnum_prop": 0.6617647058823529,
"repo_name": "open-lambda/open-lambda",
"id": "caa307b06d93fa1748ce242a92bc6083890efb87",
"size": "204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "31159"
},
{
"name": "Dockerfile",
"bytes": "1220"
},
{
"name": "Go",
"bytes": "205702"
},
{
"name": "Makefile",
"bytes": "3474"
},
{
"name": "Python",
"bytes": "63477"
},
{
"name": "Rust",
"bytes": "45352"
},
{
"name": "Shell",
"bytes": "1182"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.services.disk_types import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import DEFAULT_CLIENT_INFO, DiskTypesTransport
from .transports.rest import DiskTypesRestTransport
class DiskTypesClientMeta(type):
"""Metaclass for the DiskTypes client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[DiskTypesTransport]]
_transport_registry["rest"] = DiskTypesRestTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[DiskTypesTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DiskTypesClient(metaclass=DiskTypesClientMeta):
"""The DiskTypes API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DiskTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DiskTypesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DiskTypesTransport:
"""Returns the transport used by the client instance.
Returns:
DiskTypesTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, DiskTypesTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the disk types client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DiskTypesTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
NOTE: "rest" transport functionality is currently in a
beta state (preview). We welcome your feedback via an
issue in this library's source repository.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DiskTypesTransport):
# transport is a DiskTypesTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def aggregated_list(
self,
request: Optional[Union[compute.AggregatedListDiskTypesRequest, dict]] = None,
*,
project: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.AggregatedListPager:
r"""Retrieves an aggregated list of disk types.
Args:
request (Union[google.cloud.compute_v1.types.AggregatedListDiskTypesRequest, dict]):
The request object. A request message for
DiskTypes.AggregatedList. See the method description for
details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.disk_types.pagers.AggregatedListPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.AggregatedListDiskTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.AggregatedListDiskTypesRequest):
request = compute.AggregatedListDiskTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.aggregated_list]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("project", request.project),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.AggregatedListPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get(
self,
request: Optional[Union[compute.GetDiskTypeRequest, dict]] = None,
*,
project: Optional[str] = None,
zone: Optional[str] = None,
disk_type: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.DiskType:
r"""Returns the specified disk type. Gets a list of
available disk types by making a list() request.
Args:
request (Union[google.cloud.compute_v1.types.GetDiskTypeRequest, dict]):
The request object. A request message for DiskTypes.Get.
See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
The name of the zone for this
request.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
disk_type (str):
Name of the disk type to return.
This corresponds to the ``disk_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.DiskType:
Represents a Disk Type resource. Google Compute Engine
has two Disk Type resources: \*
[Regional](/compute/docs/reference/rest/v1/regionDiskTypes)
\* [Zonal](/compute/docs/reference/rest/v1/diskTypes)
You can choose from a variety of disk types based on
your needs. For more information, read Storage options.
The diskTypes resource represents disk types for a zonal
persistent disk. For more information, read Zonal
persistent disks. The regionDiskTypes resource
represents disk types for a regional persistent disk.
For more information, read Regional persistent disks.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, zone, disk_type])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetDiskTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetDiskTypeRequest):
request = compute.GetDiskTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if zone is not None:
request.zone = zone
if disk_type is not None:
request.disk_type = disk_type
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project", request.project),
("zone", request.zone),
("disk_type", request.disk_type),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list(
self,
request: Optional[Union[compute.ListDiskTypesRequest, dict]] = None,
*,
project: Optional[str] = None,
zone: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves a list of disk types available to the
specified project.
Args:
request (Union[google.cloud.compute_v1.types.ListDiskTypesRequest, dict]):
The request object. A request message for
DiskTypes.List. See the method description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
zone (str):
The name of the zone for this
request.
This corresponds to the ``zone`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.disk_types.pagers.ListPager:
Contains a list of disk types.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, zone])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListDiskTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListDiskTypesRequest):
request = compute.ListDiskTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if zone is not None:
request.zone = zone
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project", request.project),
("zone", request.zone),
)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-compute",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DiskTypesClient",)
|
{
"content_hash": "ef05869344139cf5d915300303935411",
"timestamp": "",
"source": "github",
"line_count": 723,
"max_line_length": 120,
"avg_line_length": 40.15767634854772,
"alnum_prop": 0.6032926913274093,
"repo_name": "googleapis/python-compute",
"id": "58691eb5d85bb78a3abd1a7d746ac1c34fa9b9e8",
"size": "29634",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/disk_types/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.conch.checkers}.
"""
try:
import crypt
except ImportError:
cryptSkip = 'cannot run without crypt module'
else:
cryptSkip = None
import os, base64
from twisted.python import util
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase, ShadowDatabase
from twisted.test.test_process import MockOS
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
dependencySkip = "can't run without Crypto and PyASN1"
else:
dependencySkip = None
from twisted.conch.ssh import keys
from twisted.conch import checkers
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
if getattr(os, 'geteuid', None) is None:
euidSkip = "Cannot run without effective UIDs (questionable)"
else:
euidSkip = None
class HelperTests(TestCase):
"""
Tests for helper functions L{verifyCryptedPassword}, L{_pwdGetByName} and
L{_shadowGetByName}.
"""
skip = cryptSkip or dependencySkip
def setUp(self):
self.mockos = MockOS()
def test_verifyCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{True} if the plaintext password
passed to it matches the encrypted password passed to it.
"""
password = 'secret string'
salt = 'salty'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %r' % (
crypted, password))
def test_verifyCryptedPasswordMD5(self):
"""
L{verifyCryptedPassword} returns True if the provided cleartext password
matches the provided MD5 password hash.
"""
password = 'password'
salt = '$1$salt'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %s' % (
crypted, password))
def test_refuteCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{False} if the plaintext password
passed to it does not match the encrypted password passed to it.
"""
password = 'string secret'
wrong = 'secret string'
crypted = crypt.crypt(password, password)
self.assertFalse(
checkers.verifyCryptedPassword(crypted, wrong),
'%r not supposed to be valid encrypted password for %s' % (
crypted, wrong))
def test_pwdGetByName(self):
"""
L{_pwdGetByName} returns a tuple of items from the UNIX /etc/passwd
database if the L{pwd} module is present.
"""
userdb = UserDatabase()
userdb.addUser(
'alice', 'secrit', 1, 2, 'first last', '/foo', '/bin/sh')
self.patch(checkers, 'pwd', userdb)
self.assertEqual(
checkers._pwdGetByName('alice'), userdb.getpwnam('alice'))
def test_pwdGetByNameWithoutPwd(self):
"""
If the C{pwd} module isn't present, L{_pwdGetByName} returns C{None}.
"""
self.patch(checkers, 'pwd', None)
self.assertIs(checkers._pwdGetByName('alice'), None)
def test_shadowGetByName(self):
"""
L{_shadowGetByName} returns a tuple of items from the UNIX /etc/shadow
database if the L{spwd} is present.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', userdb)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(util, 'os', self.mockos)
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutSpwd(self):
"""
L{_shadowGetByName} uses the C{shadow} module to return a tuple of items
from the UNIX /etc/shadow database if the C{spwd} module is not present
and the C{shadow} module is.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', None)
self.patch(checkers, 'shadow', userdb)
self.patch(util, 'os', self.mockos)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutEither(self):
"""
L{_shadowGetByName} returns C{None} if neither C{spwd} nor C{shadow} is
present.
"""
self.patch(checkers, 'spwd', None)
self.patch(checkers, 'shadow', None)
self.assertIs(checkers._shadowGetByName('bob'), None)
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
class SSHPublicKeyDatabaseTestCase(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
skip = euidSkip or dependencySkip
def setUp(self):
self.checker = checkers.SSHPublicKeyDatabase()
self.key1 = base64.encodestring("foobar")
self.key2 = base64.encodestring("eggspam")
self.content = "t1 %s foo\nt2 %s egg\n" % (self.key1, self.key2)
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.patch(util, 'os', self.mockos)
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser(
'user', 'password', 1, 2, 'first last',
self.mockos.path.path, '/bin/shell')
self.checker._userdb = userdb
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = "eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = "notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = self.mockos.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(self.mockos, "seteuid", seteuid)
self.patch(util, 'os', self.mockos)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEqual(self.mockos.seteuidCalls, [0, 1, 0, 2345])
self.assertEqual(self.mockos.setegidCalls, [2, 1234])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, 'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEqual(avatarId, 'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, 'foo',
keys.Key.fromString(keydata.privateDSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', None, 'blob', 'sigData', 'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTestCase(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
skip = dependencySkip
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(), )
self.assertEqual(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
checkers.SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a apecific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(),
IUsernamePassword)
self.assertEqual(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
checkers.SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = checkers.SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
def _callback(avatarId):
self.assertEqual(avatarId, 'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = checkers.SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = checkers.SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertEqual(checkers.SSHProtocolChecker().areDone(None), True)
class UNIXPasswordDatabaseTests(TestCase):
"""
Tests for L{UNIXPasswordDatabase}.
"""
skip = cryptSkip or dependencySkip
def assertLoggedIn(self, d, username):
"""
Assert that the L{Deferred} passed in is called back with the value
'username'. This represents a valid login for this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{Deferred}
"""
result = []
d.addBoth(result.append)
self.assertEqual(len(result), 1, "login incomplete")
if isinstance(result[0], Failure):
result[0].raiseException()
self.assertEqual(result[0], username)
def test_defaultCheckers(self):
"""
L{UNIXPasswordDatabase} with no arguments has checks the C{pwd} database
and then the C{spwd} database.
"""
checker = checkers.UNIXPasswordDatabase()
def crypted(username, password):
salt = crypt.crypt(password, username)
crypted = crypt.crypt(password, '$1$' + salt)
return crypted
pwd = UserDatabase()
pwd.addUser('alice', crypted('alice', 'password'),
1, 2, 'foo', '/foo', '/bin/sh')
# x and * are convention for "look elsewhere for the password"
pwd.addUser('bob', 'x', 1, 2, 'bar', '/bar', '/bin/sh')
spwd = ShadowDatabase()
spwd.addUser('alice', 'wrong', 1, 2, 3, 4, 5, 6, 7)
spwd.addUser('bob', crypted('bob', 'password'),
8, 9, 10, 11, 12, 13, 14)
self.patch(checkers, 'pwd', pwd)
self.patch(checkers, 'spwd', spwd)
mockos = MockOS()
self.patch(util, 'os', mockos)
mockos.euid = 2345
mockos.egid = 1234
cred = UsernamePassword("alice", "password")
self.assertLoggedIn(checker.requestAvatarId(cred), 'alice')
self.assertEqual(mockos.seteuidCalls, [])
self.assertEqual(mockos.setegidCalls, [])
cred.username = "bob"
self.assertLoggedIn(checker.requestAvatarId(cred), 'bob')
self.assertEqual(mockos.seteuidCalls, [0, 2345])
self.assertEqual(mockos.setegidCalls, [0, 1234])
def assertUnauthorizedLogin(self, d):
"""
Asserts that the L{Deferred} passed in is erred back with an
L{UnauthorizedLogin} L{Failure}. This reprsents an invalid login for
this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{None}
"""
self.assertRaises(
checkers.UnauthorizedLogin, self.assertLoggedIn, d, 'bogus value')
def test_passInCheckers(self):
"""
L{UNIXPasswordDatabase} takes a list of functions to check for UNIX
user information.
"""
password = crypt.crypt('secret', 'secret')
userdb = UserDatabase()
userdb.addUser('anybody', password, 1, 2, 'foo', '/bar', '/bin/sh')
checker = checkers.UNIXPasswordDatabase([userdb.getpwnam])
self.assertLoggedIn(
checker.requestAvatarId(UsernamePassword('anybody', 'secret')),
'anybody')
def test_verifyPassword(self):
"""
If the encrypted password provided by the getpwnam function is valid
(verified by the L{verifyCryptedPassword} function), we callback the
C{requestAvatarId} L{Deferred} with the username.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertLoggedIn(checker.requestAvatarId(credential), 'username')
def test_failOnKeyError(self):
"""
If the getpwnam function raises a KeyError, the login fails with an
L{UnauthorizedLogin} exception.
"""
def getpwnam(username):
raise KeyError(username)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_failOnBadPassword(self):
"""
If the verifyCryptedPassword function doesn't verify the password, the
login fails with an L{UnauthorizedLogin} exception.
"""
def verifyCryptedPassword(crypted, pw):
return False
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_loopThroughFunctions(self):
"""
UNIXPasswordDatabase.requestAvatarId loops through each getpwnam
function associated with it and returns a L{Deferred} which fires with
the result of the first one which returns a value other than None.
ones do not verify the password.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam1(username):
return [username, 'not the password']
def getpwnam2(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam1, getpwnam2])
credential = UsernamePassword('username', 'username')
self.assertLoggedIn(checker.requestAvatarId(credential), 'username')
def test_failOnSpecial(self):
"""
If the password returned by any function is C{""}, C{"x"}, or C{"*"} it
is not compared against the supplied password. Instead it is skipped.
"""
pwd = UserDatabase()
pwd.addUser('alice', '', 1, 2, '', 'foo', 'bar')
pwd.addUser('bob', 'x', 1, 2, '', 'foo', 'bar')
pwd.addUser('carol', '*', 1, 2, '', 'foo', 'bar')
self.patch(checkers, 'pwd', pwd)
checker = checkers.UNIXPasswordDatabase([checkers._pwdGetByName])
cred = UsernamePassword('alice', '')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword('bob', 'x')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword('carol', '*')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
|
{
"content_hash": "a6596d9d601b752977b9c785d2f34030",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 80,
"avg_line_length": 36.68666666666667,
"alnum_prop": 0.6388787933854261,
"repo_name": "candy7393/VTK",
"id": "98e4e6f876fb55098fa979d006a7a41aa9cd3df4",
"size": "22085",
"binary": false,
"copies": "27",
"ref": "refs/heads/master",
"path": "ThirdParty/Twisted/twisted/conch/test/test_checkers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "45726018"
},
{
"name": "C++",
"bytes": "69182935"
},
{
"name": "CMake",
"bytes": "1676815"
},
{
"name": "CSS",
"bytes": "50625"
},
{
"name": "Cuda",
"bytes": "29062"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "GLSL",
"bytes": "205024"
},
{
"name": "HTML",
"bytes": "292104"
},
{
"name": "Java",
"bytes": "147449"
},
{
"name": "JavaScript",
"bytes": "1130278"
},
{
"name": "Lex",
"bytes": "45258"
},
{
"name": "M4",
"bytes": "121356"
},
{
"name": "Makefile",
"bytes": "253851"
},
{
"name": "Objective-C",
"bytes": "23327"
},
{
"name": "Objective-C++",
"bytes": "191806"
},
{
"name": "Perl",
"bytes": "173168"
},
{
"name": "Python",
"bytes": "15675703"
},
{
"name": "Roff",
"bytes": "65394"
},
{
"name": "Shell",
"bytes": "72670"
},
{
"name": "Slash",
"bytes": "1476"
},
{
"name": "Smarty",
"bytes": "1325"
},
{
"name": "Tcl",
"bytes": "1406798"
},
{
"name": "Yacc",
"bytes": "174481"
}
],
"symlink_target": ""
}
|
import httpretty
import logging
import requests
import json
class _NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger(__name__)
logger.addHandler(_NullHandler())
class ZabbixAPIException(Exception):
""" generic zabbix api exception
code list:
-32602 - Invalid params (eg already exists)
-32500 - no permissions
"""
pass
class ZabbixAPI(object):
def __init__(self,
server='http://localhost/zabbix',
session=None,
use_authenticate=False,
timeout=None):
"""
Parameters:
server: Base URI for zabbix web interface (omitting /api_jsonrpc.php)
session: optional pre-configured requests.Session instance
use_authenticate: Use old (Zabbix 1.8) style authentication
timeout: optional connect and read timeout in seconds, default: None (if you're using Requests >= 2.4 you can set it as tuple: "(connect, read)" which is used to set individual connect and read timeouts.)
"""
if session:
self.session = session
else:
self.session = requests.Session()
# Default headers for all requests
self.session.headers.update({
'Content-Type': 'application/json-rpc',
'User-Agent': 'python/pyzabbix'
})
self.use_authenticate = use_authenticate
self.auth = ''
self.id = 0
self.timeout = timeout
self.url = server + '/api_jsonrpc.php'
logger.info("JSON-RPC Server Endpoint: %s", self.url)
def login(self, user='', password=''):
"""Convenience method for calling user.authenticate and storing the resulting auth token
for further commands.
If use_authenticate is set, it uses the older (Zabbix 1.8) authentication command"""
# If we have an invalid auth token, we are not allowed to send a login
# request. Clear it before trying.
self.auth = ''
if self.use_authenticate:
self.auth = self.user.authenticate(user=user, password=password)
else:
self.auth = self.user.login(user=user, password=password)
def confimport(self, format='', source='', rules=''):
"""Alias for configuration.import because it clashes with
Python's import reserved keyword"""
return self.do_request(
method="configuration.import",
params={"format": format, "source": source, "rules": rules}
)['result']
def api_version(self):
return self.apiinfo.version()
def do_request(self, method, params=None):
request_json = {
'jsonrpc': '2.0',
'method': method,
'params': params or {},
'id': self.id,
}
# We don't have to pass the auth token if asking for the apiinfo.version
if self.auth and method != 'apiinfo.version':
request_json['auth'] = self.auth
logger.debug("Sending: %s", json.dumps(request_json,
indent=4,
separators=(',', ': ')))
response = self.session.post(
self.url,
data=json.dumps(request_json),
timeout=self.timeout
)
logger.debug("Response Code: %s", str(response.status_code))
# NOTE: Getting a 412 response code means the headers are not in the
# list of allowed headers.
response.raise_for_status()
if not len(response.text):
raise ZabbixAPIException("Received empty response")
try:
response_json = json.loads(response.text)
except ValueError:
raise ZabbixAPIException(
"Unable to parse json: %s" % response.text
)
logger.debug("Response Body: %s", json.dumps(response_json,
indent=4,
separators=(',', ': ')))
self.id += 1
if 'error' in response_json: # some exception
msg = "Error {code}: {message}, {data} while sending {json}".format(
code=response_json['error']['code'],
message=response_json['error']['message'],
data=response_json['error']['data'],
json=str(request_json)
)
raise ZabbixAPIException(msg, response_json['error']['code'])
return response_json
def __getattr__(self, attr):
"""Dynamically create an object class (ie: host)"""
return ZabbixAPIObjectClass(attr, self)
class ZabbixAPIObjectClass(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __getattr__(self, attr):
"""Dynamically create a method (ie: get)"""
def fn(*args, **kwargs):
if args and kwargs:
raise TypeError("Found both args and kwargs")
return self.parent.do_request(
'{0}.{1}'.format(self.name, attr),
args or kwargs
)['result']
return fn
|
{
"content_hash": "6ec6b4d01abce7432fd20a67d5d6681f",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 216,
"avg_line_length": 34.21518987341772,
"alnum_prop": 0.5392156862745098,
"repo_name": "ThinkboxSoftware/Deadline",
"id": "45dcfa8a0b2260d53a1683dc4897de56f52159ff",
"size": "5406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Custom/events/Zabbix/API/pyzabbix/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "698"
},
{
"name": "Dockerfile",
"bytes": "3978"
},
{
"name": "Python",
"bytes": "1011982"
},
{
"name": "Ruby",
"bytes": "6570"
},
{
"name": "Shell",
"bytes": "7174"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from tqdm import tqdm
usefull_columns = ['genres','keywords','vote_average']
Movies = pd.read_csv("../Datasets/Transformed.csv",usecols=usefull_columns)
#Constant definition
#Cost added if the first genre is similar between two films
first_genre = 5
#Cost added if the secondary genre is similar between two films
second_genre = 1
#Cost added by similar keyword identical between two films
keyword_cost = 1
def get_genres(film):
genres = str(film['genres'])
if genres == 'nan':
return[]
else:
genres = genres.split(",")
return genres
def get_keywords(film):
kw = str(film['keywords'])
if kw == 'nan':
return[]
else:
kw = kw.split(",")
return kw
"""Define the cost between the film given in index and the others one."""
costs = np.zeros([Movies.shape[0],Movies.shape[0]])
Movies = Movies.loc[Movies['vote_average'] > 0]
for i in tqdm(range(0,Movies.shape[0])):
current_film = Movies.iloc[i]
genres_current = get_genres(current_film)
kw_current = get_keywords(current_film)
vote_current = current_film['vote_average']
for j in range(i,Movies.shape[0]):
cost = 0
b_film = Movies.iloc[j]
genres_b = get_genres(b_film)
vote_b = b_film['vote_average']
#First we only select the first genre to determine the similarity because it's more important that the other genre.
if len(genres_current) > 0 & len(genres_b) > 0:
if (genres_current[0] == genres_b[0]):
cost += first_genre
#This give us the number of similar genres. We pop the first one because we already compare them.
cost += np.sum(np.in1d(genres_current,genres_b.pop(0),assume_unique='True')) * second_genre
kw_b = get_keywords(b_film)
#This give us the number of similar keywords.
cost += np.sum(np.in1d(kw_current,kw_b,assume_unique='True')) * keyword_cost
#impossible here because we ignore to much popularity
#cost = (cost * popularity_b/100) / (popularity_current/100)
if vote_current == 0:
costs[i,j] = cost
else:
costs[i,j] = cost + vote_b / vote_current
if vote_b == 0:
costs[j,i] = cost
else:
costs[j,i] = cost + vote_current / vote_b
np.savez_compressed("../Datasets/costs_2.npz", costs, costs = costs)
|
{
"content_hash": "0be606ce9011df50d9190abd8759fac7",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 123,
"avg_line_length": 33.013333333333335,
"alnum_prop": 0.6118739903069467,
"repo_name": "mdeff/ntds_2017",
"id": "53e3df2a9da9e5a00b2d500a292b36c6cd1b90fa",
"size": "2476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/reports/movie_network/python/costs_function.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6481"
},
{
"name": "HTML",
"bytes": "25493"
},
{
"name": "JavaScript",
"bytes": "30452"
},
{
"name": "Jupyter Notebook",
"bytes": "196798661"
},
{
"name": "Makefile",
"bytes": "947"
},
{
"name": "Python",
"bytes": "447355"
},
{
"name": "TeX",
"bytes": "22767"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Credit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField()),
('datecreated', models.DateField()),
('particulars', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Debit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.FloatField()),
('datecreated', models.DateField()),
('particulars', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ExpenditureCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Expenses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amountSpent', models.FloatField()),
('dateSpent', models.DateField()),
('category', models.CharField(max_length=50)),
('detailSpent', models.TextField(blank=True, null=True)),
('monthSpent', models.IntegerField()),
('yearSpent', models.IntegerField()),
('issuerSpent', models.CharField(default=b'Null', max_length=50)),
],
),
migrations.CreateModel(
name='IncomeCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Records',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.CharField(max_length=50)),
('datecreated', models.DateField()),
('amount', models.FloatField()),
('paid', models.FloatField()),
('unitprice', models.FloatField()),
('quantity', models.IntegerField()),
('balance', models.FloatField()),
('description', models.TextField(blank=True, null=True)),
('customertype', models.CharField(choices=[(b'Individual', b'Individual'), (b'Group', b'Group'), (b'Company', b'Company')], max_length=40)),
('customername', models.CharField(max_length=50)),
('customeraddress', models.CharField(blank=True, max_length=50, null=True)),
('customerphone', models.IntegerField(blank=True, null=True)),
('customeremail', models.EmailField(blank=True, max_length=254, null=True)),
('month', models.IntegerField()),
('year', models.IntegerField()),
('issuer', models.CharField(default=b'Null', max_length=50)),
],
),
]
|
{
"content_hash": "2ae1a15d530edf4ff9854a38753e8bfd",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 156,
"avg_line_length": 43.98765432098765,
"alnum_prop": 0.5335391523996632,
"repo_name": "Ashaba/rms",
"id": "9a04171d0d3f3ea509a91adfeed98848f18d8eb7",
"size": "3635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bmanager/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "650487"
},
{
"name": "HTML",
"bytes": "2109946"
},
{
"name": "JavaScript",
"bytes": "3041523"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "10446887"
},
{
"name": "Shell",
"bytes": "3332"
}
],
"symlink_target": ""
}
|
"""TFX Evaluator component definition."""
from typing import List, Optional, Union
from absl import logging
import tensorflow_model_analysis as tfma
from tfx import types
from tfx.components.evaluator import executor
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_beam_component
from tfx.dsl.components.base import executor_spec
from tfx.orchestration import data_types
from tfx.proto import evaluator_pb2
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import json_utils
class Evaluator(base_beam_component.BaseBeamComponent):
"""A TFX component to evaluate models trained by a TFX Trainer component.
Component `outputs` contains:
- `evaluation`: Channel of type `standard_artifacts.ModelEvaluation` to
store
the evaluation results.
- `blessing`: Channel of type `standard_artifacts.ModelBlessing' that
contains the blessing result.
See [the Evaluator guide](https://www.tensorflow.org/tfx/guide/evaluator) for
more details.
"""
SPEC_CLASS = standard_component_specs.EvaluatorSpec
EXECUTOR_SPEC = executor_spec.BeamExecutorSpec(executor.Executor)
def __init__(
self,
examples: types.BaseChannel,
model: Optional[types.BaseChannel] = None,
baseline_model: Optional[types.BaseChannel] = None,
# TODO(b/148618405): deprecate feature_slicing_spec.
feature_slicing_spec: Optional[Union[evaluator_pb2.FeatureSlicingSpec,
data_types.RuntimeParameter]] = None,
fairness_indicator_thresholds: Optional[Union[
List[float], data_types.RuntimeParameter]] = None,
example_splits: Optional[List[str]] = None,
eval_config: Optional[tfma.EvalConfig] = None,
schema: Optional[types.BaseChannel] = None,
module_file: Optional[str] = None,
module_path: Optional[str] = None):
"""Construct an Evaluator component.
Args:
examples: A BaseChannel of type `standard_artifacts.Examples`, usually
produced by an ExampleGen component. _required_
model: A BaseChannel of type `standard_artifacts.Model`, usually produced
by a Trainer component.
baseline_model: An optional channel of type 'standard_artifacts.Model' as
the baseline model for model diff and model validation purpose.
feature_slicing_spec: Deprecated, please use eval_config instead. Only
support estimator.
[evaluator_pb2.FeatureSlicingSpec](https://github.com/tensorflow/tfx/blob/master/tfx/proto/evaluator.proto)
instance that describes how Evaluator should slice the data.
fairness_indicator_thresholds: Optional list of float (or
RuntimeParameter) threshold values for use with TFMA fairness
indicators. Experimental functionality: this interface and
functionality may change at any time. TODO(b/142653905): add a link
to additional documentation for TFMA fairness indicators here.
example_splits: Names of splits on which the metrics are computed.
Default behavior (when example_splits is set to None or Empty) is using
the 'eval' split.
eval_config: Instance of tfma.EvalConfig containg configuration settings
for running the evaluation. This config has options for both estimator
and Keras.
schema: A `Schema` channel to use for TFXIO.
module_file: A path to python module file containing UDFs for Evaluator
customization. This functionality is experimental and may change at any
time. The module_file can implement following functions at its top
level.
def custom_eval_shared_model(
eval_saved_model_path, model_name, eval_config, **kwargs,
) -> tfma.EvalSharedModel:
def custom_extractors(
eval_shared_model, eval_config, tensor_adapter_config,
) -> List[tfma.extractors.Extractor]:
module_path: A python path to the custom module that contains the UDFs.
See 'module_file' for the required signature of UDFs. This functionality
is experimental and this API may change at any time. Note this can not
be set together with module_file.
"""
if bool(module_file) and bool(module_path):
raise ValueError(
'Python module path can not be set together with module file path.')
if eval_config is not None and feature_slicing_spec is not None:
raise ValueError("Exactly one of 'eval_config' or 'feature_slicing_spec' "
'must be supplied.')
if eval_config is None and feature_slicing_spec is None:
feature_slicing_spec = evaluator_pb2.FeatureSlicingSpec()
logging.info('Neither eval_config nor feature_slicing_spec is passed, '
'the model is treated as estimator.')
if feature_slicing_spec:
logging.warning('feature_slicing_spec is deprecated, please use '
'eval_config instead.')
blessing = types.Channel(type=standard_artifacts.ModelBlessing)
evaluation = types.Channel(type=standard_artifacts.ModelEvaluation)
spec = standard_component_specs.EvaluatorSpec(
examples=examples,
model=model,
baseline_model=baseline_model,
feature_slicing_spec=feature_slicing_spec,
fairness_indicator_thresholds=(
fairness_indicator_thresholds if isinstance(
fairness_indicator_thresholds, data_types.RuntimeParameter) else
json_utils.dumps(fairness_indicator_thresholds)),
example_splits=json_utils.dumps(example_splits),
evaluation=evaluation,
eval_config=eval_config,
blessing=blessing,
schema=schema,
module_file=module_file,
module_path=module_path)
super().__init__(spec=spec)
if udf_utils.should_package_user_modules():
# In this case, the `MODULE_PATH_KEY` execution property will be injected
# as a reference to the given user module file after packaging, at which
# point the `MODULE_FILE_KEY` execution property will be removed.
udf_utils.add_user_module_dependency(
self, standard_component_specs.MODULE_FILE_KEY,
standard_component_specs.MODULE_PATH_KEY)
|
{
"content_hash": "70a800525d84dcd279d9d95c0dffd77c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 115,
"avg_line_length": 47.37593984962406,
"alnum_prop": 0.6984605618155848,
"repo_name": "tensorflow/tfx",
"id": "191ce7ac27419840c71b7f457574d83de20ca248",
"size": "6897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/components/evaluator/component.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
}
|
from impacket import smbserver, smb
import ntpath
from threading import RLock
import json
from quickcrack import try_to_crack_hash
"""
This script acts as an SMB server and gathers credentials from connecting users.
Developed by Brian Wallace @botnet_hutner
"""
sessions = {}
output_file_lock = RLock()
def report_authentication_attempt(connId, auth_details):
global output_file_lock
sessions[connId] = {"authentication": auth_details, "shares": []}
with output_file_lock:
with open("credentials.txt", "a") as f:
f.write(json.dumps(auth_details) + "\n")
if "UnicodePwd" in auth_details and auth_details['UnicodePwd'] != "":
print "{0}: {1}".format(auth_details['client_ip'], auth_details['UnicodePwd'])
password = try_to_crack_hash(auth_details['UnicodePwd'])
if password is not None:
print "{0}: {1}::{2} has password '{3}'".format(auth_details['client_ip'], auth_details["PrimaryDomain"], auth_details['Account'], password)
if "AnsiPwd" in auth_details and auth_details['AnsiPwd'] != "":
print "{0}: {1}".format(auth_details['client_ip'], auth_details['AnsiPwd'])
password = try_to_crack_hash(auth_details['AnsiPwd'])
if password is not None:
print "{0}: {1}::{2} has password '{3}'".format(auth_details['client_ip'], auth_details["PrimaryDomain"], auth_details['Account'], password)
def report_tree_connect_attempt(connId, connect_details):
session = sessions[connId]
if "client_ip" in session:
print "{2}: {0} accessed {1}".format(session['client_ip'], connect_details['Path'], connId)
session['shares'].append(connect_details)
sessions[connId] = session
def smbCommandHook_SMB_COM_SESSION_SETUP_ANDX(connId, smbServer, SMBCommand, recvPacket):
# Accept any authentication except for empty authentication
supplied_creds = False
# The following is impacket code modified to extract credentials
connData = smbServer.getConnectionData(connId, checkStatus=False)
respSMBCommand = smb.SMBCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX)
# Process Standard Security
respParameters = smb.SMBSessionSetupAndXResponse_Parameters()
respData = smb.SMBSessionSetupAndXResponse_Data()
sessionSetupParameters = smb.SMBSessionSetupAndX_Parameters(SMBCommand['Parameters'])
sessionSetupData = smb.SMBSessionSetupAndX_Data(flags=recvPacket['Flags2'])
sessionSetupData['AnsiPwdLength'] = sessionSetupParameters['AnsiPwdLength']
sessionSetupData['UnicodePwdLength'] = sessionSetupParameters['UnicodePwdLength']
sessionSetupData.fromString(SMBCommand['Data'])
connData['Capabilities'] = sessionSetupParameters['Capabilities']
# Let's get those credentials
to_extract_from_session_setup_data = [
"Account",
"AnsiPwd",
"NativeLanMan",
"UnicodePwd",
"NativeOS",
"PrimaryDomain",
]
extracted_data = {}
for key in (i for i in to_extract_from_session_setup_data if i in sessionSetupData.__dict__['fields']):
extracted_data[key] = sessionSetupData[key]
if 'AnsiPwd' in extracted_data:
if len([i for i in extracted_data['AnsiPwd'] if i != "\x00"]) == 0:
# It's null, we should just remove it
extracted_data['AnsiPwd'] = ""
elif len(extracted_data['AnsiPwd']) == 24:
if 'UnicodePwd' in extracted_data and extracted_data['AnsiPwd'] == extracted_data['UnicodePwd']:
# Hash has been duplicated across fields, likely NTLM, not LM
extracted_data['AnsiPwd'] = ""
else:
extracted_data['AnsiPwd'] = extracted_data['AnsiPwd'].encode("hex") # long live Python 2.7
extracted_data['AnsiPwd'] = "{1}:$NETLM$1122334455667788${0}".format(extracted_data['AnsiPwd'], extracted_data['Account'] if 'Account' in extracted_data else "")
supplied_creds = True
else:
# its plaintext? lol
supplied_creds = True
pass
if 'UnicodePwd' in extracted_data:
if len(extracted_data['UnicodePwd']) >= 56:
# NTLMv2
hmac = extracted_data['UnicodePwd'][0:16].encode("hex")
rest = extracted_data['UnicodePwd'][16:].encode("hex")
extracted_data['UnicodePwd'] = "{0}::{1}:1122334455667788:{2}:{3}".format(extracted_data['Account'] if 'Account' in extracted_data else "", extracted_data['PrimaryDomain'] if 'PrimaryDomain' in extracted_data else "", hmac, rest)
supplied_creds = True
elif len(extracted_data['UnicodePwd']) == 24:
# NTLMv1?
extracted_data['UnicodePwd'] = extracted_data['UnicodePwd'].encode("hex")
extracted_data['UnicodePwd'] = "{1}:$NETNTLM$1122334455667788${0}".format(extracted_data['UnicodePwd'], extracted_data['Account'] if 'Account' in extracted_data else "")
supplied_creds = True
conn_data = smbServer.getConnectionData(connId, False)
extracted_data['client_ip'] = conn_data['ClientIP']
report_authentication_attempt(connId, extracted_data)
errorCode = smbserver.STATUS_SUCCESS if supplied_creds else smbserver.STATUS_LOGON_FAILURE
connData['Uid'] = 10
respParameters['Action'] = 0
smbServer.log('User %s\\%s authenticated successfully (basic)' % (sessionSetupData['PrimaryDomain'], sessionSetupData['Account']))
respData['NativeOS'] = smbserver.encodeSMBString(recvPacket['Flags2'], smbServer.getServerOS())
respData['NativeLanMan'] = smbserver.encodeSMBString(recvPacket['Flags2'], smbServer.getServerOS())
respSMBCommand['Parameters'] = respParameters
respSMBCommand['Data'] = respData
connData['Authenticated'] = supplied_creds
smbServer.setConnectionData(connId, connData)
return [respSMBCommand], None, errorCode
def smbCommandHook_SMB_COM_NEGOTIATE(connId, smbServer, SMBCommand, recvPacket):
if recvPacket['Flags2'] & smb.SMB.FLAGS2_EXTENDED_SECURITY:
recvPacket['Flags2'] -= smb.SMB.FLAGS2_EXTENDED_SECURITY
return smbserver.SMBCommands.smbComNegotiate(smbserver.SMBCommands(), connId, smbServer, SMBCommand, recvPacket)
def smbCommandHook_SMB_COM_TREE_CONNECT_ANDX(connId, smbServer, SMBCommand, recvPacket):
treeConnectAndXParameters = smb.SMBTreeConnectAndX_Parameters(SMBCommand['Parameters'])
treeConnectAndXData = smb.SMBTreeConnectAndX_Data(flags=recvPacket['Flags2'])
treeConnectAndXData['_PasswordLength'] = treeConnectAndXParameters['PasswordLength']
treeConnectAndXData.fromString(SMBCommand['Data'])
path = smbserver.decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Path'])
local_path = ntpath.basename(path)
service = smbserver.decodeSMBString(recvPacket['Flags2'], treeConnectAndXData['Service'])
report_tree_connect_attempt(connId, {"Path": path, "local_path": local_path, "Service": service})
return smbserver.SMBCommands.smbComTreeConnectAndX(smbserver.SMBCommands(), connId, smbServer, SMBCommand, recvPacket)
# Overriding this allows us to claim we have no shares, so we still get ANDX data, but don't need to share anything
def override_searchShare(connId, share, smbServer):
return None
smbserver.searchShare = override_searchShare
if __name__ == "__main__":
server = smbserver.SMBSERVER(('0.0.0.0', 445))
# todo Look into manually setting configuration to not rely on a configuration file
server.processConfigFile("smb.conf")
server.registerNamedPipe('srvsvc', ('0.0.0.0', 4344))
# Auth and information gathering hooks
# Hook session setup to grab the credentials and deny any empty authentication requests
server.hookSmbCommand(smb.SMB.SMB_COM_SESSION_SETUP_ANDX, smbCommandHook_SMB_COM_SESSION_SETUP_ANDX)
# Hook the negotiate call to disable SPNEGO
server.hookSmbCommand(smb.SMB.SMB_COM_NEGOTIATE, smbCommandHook_SMB_COM_NEGOTIATE)
# Hook tree connect
server.hookSmbCommand(smb.SMB.SMB_COM_TREE_CONNECT_ANDX, smbCommandHook_SMB_COM_TREE_CONNECT_ANDX)
server.serve_forever()
|
{
"content_hash": "136b3c3bc7d9179cf420113781852d51",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 241,
"avg_line_length": 48.55421686746988,
"alnum_prop": 0.6931761786600497,
"repo_name": "jorik041/SMBTrap",
"id": "ceabc910e288d09d3670b09e5fcaf39568e7c601",
"size": "8060",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "smbtrap/smbtrap2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14131"
}
],
"symlink_target": ""
}
|
"""CGI test 3 (persistent data)."""
import cgitb; cgitb.enable()
from wiki import main
if __name__ == "__main__":
main()
|
{
"content_hash": "12bca6ad7a46e1602ab5582e95873b37",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 35,
"avg_line_length": 15.333333333333334,
"alnum_prop": 0.5507246376811594,
"repo_name": "MattDevo/edk2",
"id": "224fc83fefe790b3ca3a00dcaa28f475b664e239",
"size": "161",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "AppPkg/Applications/Python/Python-2.7.2/Demo/cgi/cgi3.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "4545237"
},
{
"name": "Batchfile",
"bytes": "93042"
},
{
"name": "C",
"bytes": "94289702"
},
{
"name": "C++",
"bytes": "20170310"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "GAP",
"bytes": "698245"
},
{
"name": "GDB",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "472114"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "231845"
},
{
"name": "NSIS",
"bytes": "2229"
},
{
"name": "Objective-C",
"bytes": "4147834"
},
{
"name": "PHP",
"bytes": "674"
},
{
"name": "PLSQL",
"bytes": "24782"
},
{
"name": "Perl",
"bytes": "6218"
},
{
"name": "Python",
"bytes": "27130096"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Roff",
"bytes": "28192"
},
{
"name": "Shell",
"bytes": "104362"
},
{
"name": "SourcePawn",
"bytes": "29427"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
"""\
This modules provides all non-visualization tools for advanced gene ranking and exploration of genes
"""
from typing import Optional, Collection
import pandas as pd
from anndata import AnnData
from sklearn import metrics
from scipy.sparse import issparse
from .. import logging as logg
from .._utils import select_groups
from .._compat import Literal
def correlation_matrix(
adata: AnnData,
name_list: Optional[Collection[str]] = None,
groupby: Optional[str] = None,
group: Optional[int] = None,
n_genes: int = 20,
data: Literal['Complete', 'Group', 'Rest'] = 'Complete',
method: Literal['pearson', 'kendall', 'spearman'] = 'pearson',
annotation_key: Optional[str] = None,
) -> None:
"""\
Calculate correlation matrix.
Calculate a correlation matrix for genes strored in sample annotation
using :func:`~scanpy.tl.rank_genes_groups`.
Parameters
----------
adata
Annotated data matrix.
name_list
Takes a list of genes for which to calculate the correlation matrix
groupby
If no name list is passed, genes are selected from the
results of rank_gene_groups. Then this is the key of the sample grouping to consider.
Note that in this case also a group index has to be specified.
group
Group index for which the correlation matrix for top_ranked genes should be calculated.
Currently only int is supported, will change very soon
n_genes
For how many genes to calculate correlation matrix? If specified, cuts the name list
(in whatever order it is passed).
data
At the moment, this is only relevant for the case that name_list is drawn from rank_gene_groups results.
If specified, collects mask for the called group and then takes only those cells specified.
If 'Complete', calculate correlation using full data
If 'Group', calculate correlation within the selected group.
If 'Rest', calculate corrlation for everything except the group
method
Which kind of correlation coefficient to use
pearson
standard correlation coefficient
kendall
Kendall Tau correlation coefficient
spearman
Spearman rank correlation
annotation_key
Allows to define the name of the anndata entry where results are stored.
"""
# TODO: At the moment, only works for int identifiers
# If no genes are passed, selects ranked genes from sample annotation.
# At the moment, only calculate one table (Think about what comes next)
if name_list is None:
name_list = list()
for j, k in enumerate(adata.uns['rank_genes_groups_gene_names']):
if j >= n_genes:
break
name_list.append(adata.uns['rank_genes_groups_gene_names'][j][group])
else:
if len(name_list) > n_genes:
name_list = name_list[0:n_genes]
# If special method (later) , truncate
adata_relevant = adata[:, name_list]
# This line just makes group_mask access easier. Nothing else but 'all' will stand here.
groups = 'all'
if data == 'Complete' or groupby is None:
if issparse(adata_relevant.X):
Data_array = adata_relevant.X.todense()
else:
Data_array = adata_relevant.X
else:
# get group_mask
groups_order, groups_masks = select_groups(adata, groups, groupby)
if data == 'Group':
if issparse(adata_relevant.X):
Data_array = adata_relevant.X[groups_masks[group], :].todense()
else:
Data_array = adata_relevant.X[groups_masks[group], :]
elif data == 'Rest':
if issparse(adata_relevant.X):
Data_array = adata_relevant.X[~groups_masks[group], :].todense()
else:
Data_array = adata_relevant.X[~groups_masks[group], :]
else:
logg.error('data argument should be either <Complete> or <Group> or <Rest>')
# Distinguish between sparse and non-sparse data
DF_array = pd.DataFrame(Data_array, columns=name_list)
cor_table = DF_array.corr(method=method)
if annotation_key is None:
if groupby is None:
adata.uns['Correlation_matrix'] = cor_table
else:
adata.uns['Correlation_matrix' + groupby + str(group)] = cor_table
else:
adata.uns[annotation_key] = cor_table
def ROC_AUC_analysis(
adata: AnnData,
groupby: str,
group: Optional[str] = None,
n_genes: int = 100,
):
"""\
Calculate correlation matrix.
Calculate a correlation matrix for genes strored in sample annotation
Parameters
----------
adata
Annotated data matrix.
groupby
The key of the sample grouping to consider.
group
Group name or index for which the correlation matrix for top ranked
genes should be calculated.
If no parameter is passed, ROC/AUC is calculated for all groups
n_genes
For how many genes to calculate ROC and AUC. If no parameter is passed,
calculation is done for all stored top ranked genes.
"""
if group is None:
pass
# TODO: Loop over all groups instead of just taking one.
# Assume group takes an int value for one group for the moment.
name_list = list()
for j, k in enumerate(adata.uns['rank_genes_groups_gene_names']):
if j >= n_genes:
break
name_list.append(adata.uns['rank_genes_groups_gene_names'][j][group])
# TODO: For the moment, see that everything works for comparison against the rest. Resolve issues later.
groups = 'all'
groups_order, groups_masks = select_groups(adata, groups, groupby)
# Use usual convention, better for looping later.
mask = groups_masks[group]
# TODO: Allow for sample weighting requires better mask access... later
# We store calculated data in dict, access it via dict to dict. Check if this is the best way.
fpr = {}
tpr = {}
thresholds = {}
roc_auc = {}
y_true = mask
for i, j in enumerate(name_list):
vec = adata[:, [j]].X
if issparse(vec):
y_score = vec.todense()
else:
y_score = vec
(
fpr[name_list[i]],
tpr[name_list[i]],
thresholds[name_list[i]],
) = metrics.roc_curve(
y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=False
)
roc_auc[name_list[i]] = metrics.auc(fpr[name_list[i]], tpr[name_list[i]])
adata.uns['ROCfpr' + groupby + str(group)] = fpr
adata.uns['ROCtpr' + groupby + str(group)] = tpr
adata.uns['ROCthresholds' + groupby + str(group)] = thresholds
adata.uns['ROC_AUC' + groupby + str(group)] = roc_auc
def subsampled_estimates(mask, mask_rest=None, precision=0.01, probability=0.99):
# Simple method that can be called by rank_gene_group. It uses masks that have been passed to the function and
# calculates how much has to be subsampled in order to reach a certain precision with a certain probability
# Then it subsamples for mask, mask rest
# Since convergence speed varies, we take the slower one, i.e. the variance. This might have future speed-up
# potential
if mask_rest is None:
mask_rest = ~mask
# TODO: DO precision calculation for mean variance shared
# TODO: Subsample
def dominated_ROC_elimination(adata, grouby):
# This tool has the purpose to take a set of genes (possibly already pre-selected) and analyze AUC.
# Those and only those are eliminated who are dominated completely
# TODO: Potentially (But not till tomorrow), this can be adapted to only consider the AUC in the given
# TODO: optimization frame
pass
def _gene_preselection(adata, mask, thresholds):
# This tool serves to
# It is not thought to be addressed directly but rather using rank_genes_group or ROC analysis or comparable
# TODO: Pass back a truncated adata object with only those genes that fullfill thresholding criterias
# This function should be accessible by both rank_genes_groups and ROC_curve analysis
pass
|
{
"content_hash": "2f745d2b192d2029fd283b3fb36f49a2",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 114,
"avg_line_length": 37.903225806451616,
"alnum_prop": 0.6510638297872341,
"repo_name": "theislab/scanpy",
"id": "28a9390590bf0d30f15ecfc77bd57878dddd1821",
"size": "8248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scanpy/tools/_top_genes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1255713"
},
{
"name": "R",
"bytes": "2315"
}
],
"symlink_target": ""
}
|
'''
extract_fastqc = [
"Conventional base calls",
"Encoding",
"Total Sequences",
"Filtered Sequences",
"Sequence length",
"%GC"]
extract_alignment = [
"Number of left reads",
"Number of right reads",
"Paired reads with only one aligned pair",
"Paired reads with unique alignments",
"Paired reads with multiple alignments",
"Paired reads with only one unaligned pair",
"Paired reads which could not be aligned"]
extract_rnaseqc = [
"Sample",
"Note",
"End 2 Mapping Rate",
"Chimeric Pairs",
"Intragenic Rate",
"Num. Gaps",
"Mapping Rate",
"Exonic Rate",
"5' Norm",
"Genes Detected",
"Unique Rate of Mapped",
"Read Length",
"Mean Per Base Cov.",
"End 1 Mismatch Rate",
"Fragment Length StdDev",
"Estimated Library Size",
"Mapped",
"Intergenic Rate",
"rRNA",
"Total Purity Filtered Reads Sequenced",
"Failed Vendor QC Check",
"Mean CV",
"Transcripts Detected",
"Mapped Pairs",
"Cumul. Gap Length",
"Gap %",
"Unpaired Reads",
"Intronic Rate",
"Mapped Unique Rate of Total",
"Expression Profiling Efficiency",
"Mapped Unique",
"End 2 Mismatch Rate",
"End 2 Antisense",
"Alternative Aligments",
"End 2 Sense",
"Fragment Length Mean",
"End 1 Antisense",
"Base Mismatch Rate",
"End 1 Sense",
"End 1 % Sense",
"rRNA rate",
"End 1 Mapping Rate",
"No. Covered 5'",
"Duplication Rate of Mapped",
"End 2 % Sense"]
'''
from sys import argv
from sys import exit
from glob import glob
import os.path
import re
script, fastqc_dir, fastqc_post_trim_dir, alignment_stats_dir, rnaseqc_dir, \
qc_summary_file, paired_end = argv
extract_fastqc = ["Total Sequences"]
if paired_end == "paired":
extract_alignment = [
"Number of left reads",
"Number of right reads",
"Paired reads with unique alignments",
"Paired reads with multiple alignments",
"Paired reads which could not be aligned"]
else:
extract_alignment = [
"Number of reads",
"Reads with unique alignments",
"Reads with multiple alignments",
"Reads which could not be aligned"]
extract_rnaseqc = [
"Mapped",
"Mapping Rate",
"Mapped Pairs",
"Unpaired Reads",
"Intragenic Rate",
"Exonic Rate",
"Mapped Unique",
"Unique Rate of Mapped",
"Mapped Unique Rate of Total",
"Duplication Rate of Mapped"]
CSS = """<html>
<head><title>RNA-Seq QC Summary</title>
<style type="text/css">
table {
border-width: 1px;
border-spacing: 2px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
}
table td {
border-width: 2px;
padding: 4px;
border-style: solid;
border-color: gray;
}
</style>
</head>
"""
def parse_fastqc(filename):
file = open(filename)
dict = {}
for i in file.read().split("\n>>")[1:-1]:
if i != "END_MODULE":
lines = i.split("\n")
module_name, status = lines[0].split("\t")
dict[module_name] = lines
file.close()
return dict
def parse_alignment_stats(filename):
file = open(filename)
dict = {}
for line in file.read().strip().split("\n"):
metric = line.split("\t")
dict[metric[1]] = metric[0]
file.close()
return dict
def parse_rna_file(filename):
file = open(filename)
dict = {}
lines = file.read().split("\n")
metrics = lines[0].split("\t")
values = lines[1].split("\t")
for i in range(0,len(metrics)):
dict[metrics[i]] = values[i]
return dict
def extract_info(module, extract):
dict = {}
list = []
for i in module:
dict[i.split("\t")[0]] = i.split("\t")[1]
for i in extract:
try:
list.append(dict[i])
except:
list.append("-")
return list
def extract_alignment_stats(dict, extract):
list = []
for i in extract:
try:
list.append(dict[i])
except:
list.append("-")
return list
def table(output, basic_statistics, list):
output.write("<table>\n")
output.write('<tr>\n<td colspan=2></td>\n<td colspan="%d" ' \
'align="center">FastQC Pre-Trim</td>\n<td colspan="%d" ' \
'align="center">FastQC Post-Trim</td>\n<td colspan="%d" ' \
'align="center">TopHat Alignment Stats</td>\n<td ' \
'colspan="%d" align="center">RNA-SeQC</td>\n</tr>' % \
(len(extract_fastqc), len(extract_fastqc),
len(extract_alignment), len(extract_rnaseqc)))
output.write("<tr><td>Sample</td>\n<td>File</td>\n" + \
td(extract_fastqc, "left") * 2 + \
td(extract_alignment, "left") + \
td(extract_rnaseqc, "left") + "</tr>\n")
for i in range(0,len(list)):
row_span = len(list[i][1][0])
output.write("<tr>\n")
# Sample
output.write(td_rowspan([list[i][0]], row_span, "left"))
count = 0
for file in list[i][1][0]:
if count == 0:
# Files
output.write(td([file], "left"))
# FastQC stats
output.write(td(basic_statistics[file], "right"))
# Alignment and RNA-SeQC stats
output.write(td_rowspan(list[i][1][1], row_span, "right"))
else:
output.write("<tr>\n")
# Files
output.write(td([file], "left"))
# FastQC stats
output.write(td(basic_statistics[file], "right"))
count += 1
output.write("</tr>\n")
output.write("</table>")
def td(list, align):
string = ""
for i in list:
string += '<td align="%s">%s</td>\n' % (align, parse_number(i))
return string
def td_colour(list, align):
string = ""
for i in list:
if i == "pass":
colour = "#C5D8A2"
elif i == "warn":
colour = "#FFFFE7"
elif i == "fail":
colour = "#FCD8D4"
else:
colour = "#FFFFFF"
string += '<td align="%s" bgcolor="%s">%s</td>\n' % \
(align, colour, i.upper())
return string
def td_rowspan(list, row_span, align):
string = ""
if row_span == None:
return string
for i in range(0,len(list)):
try:
string += '<td rowspan=%d align="%s">%s</td>\n' % \
(row_span, align, parse_number(list[i]))
except:
string += '<td rowspan=%d align="%s">%s</td>\n' % \
(row_span, align, "-")
return string
def parse_number(number):
try:
int(number)
return format(int(number), ",d")
except:
return number
def main():
try:
pre_trim_files = glob(os.path.join(fastqc_dir, "*/fastqc_data.txt"))
post_trim_files = glob(os.path.join(fastqc_post_trim_dir,
"*/fastqc_data.txt"))
alignment_stats_files = glob(os.path.join(alignment_stats_dir,
"*.alignmentStats.txt"))
rnaqc_files = glob(os.path.join(rnaseqc_dir, "*/metrics.tsv"))
except:
print "ERROR"
exit()
#-------------------------------------------------------
# FastQC stats for pre-trimming and post-trimming
#-------------------------------------------------------
# parse files
pre_trim_samples = {}
for filename in pre_trim_files:
pre_trim_samples[filename] = parse_fastqc(filename)
post_trim_samples = {}
for filename in post_trim_files:
post_trim_samples[filename] = parse_fastqc(filename)
# extract info
basic_statistics_results = {}
for filename in pre_trim_samples:
sample_name = filename.split("/")[-2][:-7]
basic_statistics_results[sample_name] = extract_info(
pre_trim_samples[filename]["Basic Statistics"], extract_fastqc)
information_post_trim = {}
for filename in post_trim_samples:
sample_name = filename.split("/")[-2][:-22]
try:
basic_statistics_results[sample_name] = \
basic_statistics_results[sample_name] + \
extract_info(post_trim_samples[filename]\
["Basic Statistics"], extract_fastqc)
except:
pass
# if no post-trim file, fill in empty cells with "-"
for sample_name in basic_statistics_results:
if len(basic_statistics_results[sample_name]) != \
2 * len(extract_fastqc):
basic_statistics_results[sample_name] = \
basic_statistics_results[sample_name] + \
["-"] * len(extract_fastqc)
#-------------------------------------------------------
# Alignment stats from TopHat
#-------------------------------------------------------
# parse files
alignment_samples = {}
for filename in alignment_stats_files:
sample_name = filename.split("/")[-1][:-19]
alignment_samples[sample_name] = parse_alignment_stats(filename)
# extract info
alignment_stats_results = {}
for sample_name in alignment_samples:
alignment_stats_results[sample_name] = \
extract_alignment_stats(alignment_samples[sample_name],
extract_alignment)
#-------------------------------------------------------
# RNA-Seq stats from RNA-SeQC
#-------------------------------------------------------
# parse RNA-SeQC files
rna_samples = {}
for filename in rnaqc_files:
sample_name = filename.split("/")[-2]
rna_samples[sample_name] = parse_rna_file(filename)
# extract info
for sample_name in rna_samples:
alignment_stats_results[sample_name] = \
alignment_stats_results[sample_name] + \
extract_alignment_stats(rna_samples[sample_name],
extract_rnaseqc)
#-------------------------------------------------------
# Output to HTML table
#-------------------------------------------------------
# join dictionaries
statistics = {}
for sample_name in alignment_stats_results:
individual_files = []
for file in basic_statistics_results:
match = re.search('(SM_[A-Za-z0-9-.]+_RP_[A-Za-z0-9-.]+)_.*',
os.path.basename(file)).group(1)
if sample_name == match:
individual_files.append(file)
individual_files.sort()
statistics[sample_name] = [individual_files,
alignment_stats_results[sample_name]]
statistics_sorted = statistics.items()
statistics_sorted.sort()
try:
output = open(qc_summary_file,'w')
output.write(CSS)
output.write("<body>\n<h1>QC Metrics</h1>\n")
table(output, basic_statistics_results, statistics_sorted)
output.write("</body>\n</html>")
output.close()
except:
print "ERROR. Could not create file %s." % qc_summary_file
if __name__ == "__main__":
main()
|
{
"content_hash": "1bcd40d0b535866852af5061721cff82",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 79,
"avg_line_length": 30.64343163538874,
"alnum_prop": 0.5184601924759406,
"repo_name": "jessicachung/rna_seq_pipeline",
"id": "09ad3aaa5480ca6898395b7de78cc85fea3185dc",
"size": "11473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/qc_parse.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111756"
},
{
"name": "R",
"bytes": "22398"
},
{
"name": "Shell",
"bytes": "5306"
}
],
"symlink_target": ""
}
|
"""
Support for RFXtrx components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/rfxtrx/
"""
import logging
from collections import OrderedDict
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
ATTR_ENTITY_ID, TEMP_CELSIUS,
CONF_DEVICE_CLASS, CONF_COMMAND_ON, CONF_COMMAND_OFF
)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyRFXtrx==0.19.0']
DOMAIN = 'rfxtrx'
DEFAULT_SIGNAL_REPETITIONS = 1
ATTR_AUTOMATIC_ADD = 'automatic_add'
ATTR_DEVICE = 'device'
ATTR_DEBUG = 'debug'
ATTR_STATE = 'state'
ATTR_NAME = 'name'
ATTR_FIREEVENT = 'fire_event'
ATTR_DATA_TYPE = 'data_type'
ATTR_DATA_BITS = 'data_bits'
ATTR_DUMMY = 'dummy'
ATTR_OFF_DELAY = 'off_delay'
CONF_SIGNAL_REPETITIONS = 'signal_repetitions'
CONF_DEVICES = 'devices'
EVENT_BUTTON_PRESSED = 'button_pressed'
DATA_TYPES = OrderedDict([
('Temperature', TEMP_CELSIUS),
('Temperature2', TEMP_CELSIUS),
('Humidity', '%'),
('Barometer', ''),
('Wind direction', ''),
('Rain rate', ''),
('Energy usage', 'W'),
('Total usage', 'W'),
('Sound', ''),
('Sensor Status', ''),
('Counter value', ''),
('UV', 'uv')])
RECEIVED_EVT_SUBSCRIBERS = []
RFX_DEVICES = {}
_LOGGER = logging.getLogger(__name__)
RFXOBJECT = None
def _valid_device(value, device_type):
"""Validate a dictionary of devices definitions."""
config = OrderedDict()
for key, device in value.items():
# Still accept old configuration
if 'packetid' in device.keys():
msg = 'You are using an outdated configuration of the rfxtrx ' +\
'device, {}.'.format(key) +\
' Your new config should be:\n {}: \n name: {}'\
.format(device.get('packetid'),
device.get(ATTR_NAME, 'deivce_name'))
_LOGGER.warning(msg)
key = device.get('packetid')
device.pop('packetid')
key = str(key)
if not len(key) % 2 == 0:
key = '0' + key
if get_rfx_object(key) is None:
raise vol.Invalid('Rfxtrx device {} is invalid: '
'Invalid device id for {}'.format(key, value))
if device_type == 'sensor':
config[key] = DEVICE_SCHEMA_SENSOR(device)
elif device_type == 'binary_sensor':
config[key] = DEVICE_SCHEMA_BINARYSENSOR(device)
elif device_type == 'light_switch':
config[key] = DEVICE_SCHEMA(device)
else:
raise vol.Invalid('Rfxtrx device is invalid')
if not config[key][ATTR_NAME]:
config[key][ATTR_NAME] = key
return config
def valid_sensor(value):
"""Validate sensor configuration."""
return _valid_device(value, "sensor")
def valid_binary_sensor(value):
"""Validate binary sensor configuration."""
return _valid_device(value, "binary_sensor")
def _valid_light_switch(value):
return _valid_device(value, "light_switch")
DEVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
})
DEVICE_SCHEMA_SENSOR = vol.Schema({
vol.Optional(ATTR_NAME, default=None): cv.string,
vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
vol.Optional(ATTR_DATA_TYPE, default=[]):
vol.All(cv.ensure_list, [vol.In(DATA_TYPES.keys())]),
})
DEVICE_SCHEMA_BINARYSENSOR = vol.Schema({
vol.Optional(ATTR_NAME, default=None): cv.string,
vol.Optional(CONF_DEVICE_CLASS, default=None): cv.string,
vol.Optional(ATTR_FIREEVENT, default=False): cv.boolean,
vol.Optional(ATTR_OFF_DELAY, default=None):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(ATTR_DATA_BITS, default=None): cv.positive_int,
vol.Optional(CONF_COMMAND_ON, default=None): cv.byte,
vol.Optional(CONF_COMMAND_OFF, default=None): cv.byte
})
DEFAULT_SCHEMA = vol.Schema({
vol.Required("platform"): DOMAIN,
vol.Optional(CONF_DEVICES, default={}): vol.All(dict, _valid_light_switch),
vol.Optional(ATTR_AUTOMATIC_ADD, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS, default=DEFAULT_SIGNAL_REPETITIONS):
vol.Coerce(int),
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(ATTR_DEVICE): cv.string,
vol.Optional(ATTR_DEBUG, default=False): cv.boolean,
vol.Optional(ATTR_DUMMY, default=False): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the RFXtrx component."""
# Declare the Handle event
def handle_receive(event):
"""Handle revieved messgaes from RFXtrx gateway."""
# Log RFXCOM event
if not event.device.id_string:
return
_LOGGER.debug("Receive RFXCOM event from "
"(Device_id: %s Class: %s Sub: %s, Pkt_id: %s)",
slugify(event.device.id_string.lower()),
event.device.__class__.__name__,
event.device.subtype,
"".join("{0:02x}".format(x) for x in event.data))
# Callback to HA registered components.
for subscriber in RECEIVED_EVT_SUBSCRIBERS:
subscriber(event)
# Try to load the RFXtrx module.
import RFXtrx as rfxtrxmod
# Init the rfxtrx module.
global RFXOBJECT
device = config[DOMAIN][ATTR_DEVICE]
debug = config[DOMAIN][ATTR_DEBUG]
dummy_connection = config[DOMAIN][ATTR_DUMMY]
if dummy_connection:
RFXOBJECT =\
rfxtrxmod.Connect(device, handle_receive, debug=debug,
transport_protocol=rfxtrxmod.DummyTransport2)
else:
RFXOBJECT = rfxtrxmod.Connect(device, handle_receive, debug=debug)
def _shutdown_rfxtrx(event):
"""Close connection with RFXtrx."""
RFXOBJECT.close_connection()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown_rfxtrx)
return True
def get_rfx_object(packetid):
"""Return the RFXObject with the packetid."""
import RFXtrx as rfxtrxmod
try:
binarypacket = bytearray.fromhex(packetid)
except ValueError:
return None
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is None:
return None
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
return obj
def get_pt2262_deviceid(device_id, nb_data_bits):
"""Extract and return the address bits from a Lighting4/PT2262 packet."""
import binascii
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ~((1 << nb_data_bits) - 1)
data[len(data)-1] &= mask
return binascii.hexlify(data)
def get_pt2262_cmd(device_id, data_bits):
"""Extract and return the data bits from a Lighting4/PT2262 packet."""
try:
data = bytearray.fromhex(device_id)
except ValueError:
return None
mask = 0xFF & ((1 << data_bits) - 1)
return hex(data[-1] & mask)
# pylint: disable=unused-variable
def get_pt2262_device(device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id, device in RFX_DEVICES.items():
try:
if device.masked_id == get_pt2262_deviceid(device_id,
device.data_bits):
_LOGGER.info("rfxtrx: found matching device %s for %s",
device_id,
get_pt2262_deviceid(device_id, device.data_bits))
return device
except AttributeError:
continue
return None
# pylint: disable=unused-variable
def find_possible_pt2262_device(device_id):
"""Look for the device which id matches the given device_id parameter."""
for dev_id, device in RFX_DEVICES.items():
if len(dev_id) == len(device_id):
size = None
for i in range(0, len(dev_id)):
if dev_id[i] != device_id[i]:
break
size = i
if size is not None:
size = len(dev_id) - size - 1
_LOGGER.info("rfxtrx: found possible device %s for %s "
"with the following configuration:\n"
"data_bits=%d\n"
"command_on=0x%s\n"
"command_off=0x%s\n",
device_id,
dev_id,
size * 4,
dev_id[-size:], device_id[-size:])
return device
return None
def get_devices_from_config(config, device, hass):
"""Read rfxtrx configuration."""
signal_repetitions = config[CONF_SIGNAL_REPETITIONS]
devices = []
for packet_id, entity_info in config[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
device_id = slugify(event.device.id_string.lower())
if device_id in RFX_DEVICES:
continue
_LOGGER.info("Add %s rfxtrx", entity_info[ATTR_NAME])
# Check if i must fire event
fire_event = entity_info[ATTR_FIREEVENT]
datas = {ATTR_STATE: False, ATTR_FIREEVENT: fire_event}
new_device = device(entity_info[ATTR_NAME], event, datas,
signal_repetitions)
new_device.hass = hass
RFX_DEVICES[device_id] = new_device
devices.append(new_device)
return devices
def get_new_device(event, config, device, hass):
"""Add entity if not exist and the automatic_add is True."""
device_id = slugify(event.device.id_string.lower())
if device_id in RFX_DEVICES:
return
if not config[ATTR_AUTOMATIC_ADD]:
return
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
_LOGGER.info(
"Automatic add %s rfxtrx device (Class: %s Sub: %s Packet_id: %s)",
device_id,
event.device.__class__.__name__,
event.device.subtype,
pkt_id
)
datas = {ATTR_STATE: False, ATTR_FIREEVENT: False}
signal_repetitions = config[CONF_SIGNAL_REPETITIONS]
new_device = device(pkt_id, event, datas,
signal_repetitions)
new_device.hass = hass
RFX_DEVICES[device_id] = new_device
return new_device
def apply_received_command(event):
"""Apply command from rfxtrx."""
device_id = slugify(event.device.id_string.lower())
# Check if entity exists or previously added automatically
if device_id not in RFX_DEVICES:
return
_LOGGER.debug(
"Device_id: %s device_update. Command: %s",
device_id,
event.values['Command']
)
if event.values['Command'] == 'On'\
or event.values['Command'] == 'Off':
# Update the rfxtrx device state
is_on = event.values['Command'] == 'On'
RFX_DEVICES[device_id].update_state(is_on)
elif hasattr(RFX_DEVICES[device_id], 'brightness')\
and event.values['Command'] == 'Set level':
_brightness = (event.values['Dim level'] * 255 // 100)
# Update the rfxtrx device state
is_on = _brightness > 0
RFX_DEVICES[device_id].update_state(is_on, _brightness)
# Fire event
if RFX_DEVICES[device_id].should_fire_event:
RFX_DEVICES[device_id].hass.bus.fire(
EVENT_BUTTON_PRESSED, {
ATTR_ENTITY_ID:
RFX_DEVICES[device_id].entity_id,
ATTR_STATE: event.values['Command'].lower()
}
)
_LOGGER.info(
"Rfxtrx fired event: (event_type: %s, %s: %s, %s: %s)",
EVENT_BUTTON_PRESSED,
ATTR_ENTITY_ID,
RFX_DEVICES[device_id].entity_id,
ATTR_STATE,
event.values['Command'].lower()
)
class RfxtrxDevice(Entity):
"""Represents a Rfxtrx device.
Contains the common logic for Rfxtrx lights and switches.
"""
def __init__(self, name, event, datas, signal_repetitions):
"""Initialize the device."""
self.signal_repetitions = signal_repetitions
self._name = name
self._event = event
self._state = datas[ATTR_STATE]
self._should_fire_event = datas[ATTR_FIREEVENT]
self._brightness = 0
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def should_fire_event(self):
"""Return is the device must fire event."""
return self._should_fire_event
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
def turn_off(self, **kwargs):
"""Turn the device off."""
self._send_command("turn_off")
def update_state(self, state, brightness=0):
"""Update det state of the device."""
self._state = state
self._brightness = brightness
self.schedule_update_ha_state()
def _send_command(self, command, brightness=0):
if not self._event:
return
if command == "turn_on":
for _ in range(self.signal_repetitions):
self._event.device.send_on(RFXOBJECT.transport)
self._state = True
elif command == "dim":
for _ in range(self.signal_repetitions):
self._event.device.send_dim(RFXOBJECT.transport,
brightness)
self._state = True
elif command == 'turn_off':
for _ in range(self.signal_repetitions):
self._event.device.send_off(RFXOBJECT.transport)
self._state = False
self._brightness = 0
elif command == "roll_up":
for _ in range(self.signal_repetitions):
self._event.device.send_open(RFXOBJECT.transport)
elif command == "roll_down":
for _ in range(self.signal_repetitions):
self._event.device.send_close(RFXOBJECT.transport)
elif command == "stop_roll":
for _ in range(self.signal_repetitions):
self._event.device.send_stop(RFXOBJECT.transport)
self.schedule_update_ha_state()
|
{
"content_hash": "1c027dab73ead0f4efef61389c5b6690",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 79,
"avg_line_length": 31.662393162393162,
"alnum_prop": 0.5907004993926306,
"repo_name": "MungoRae/home-assistant",
"id": "e3ffc2f24a87d678125274f9eac61494848f5cf0",
"size": "14818",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rfxtrx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1686638"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7266062"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15114"
}
],
"symlink_target": ""
}
|
import os.path
import urwid
import netlib.utils
from . import pathedit, signals, common
from .. import utils
class ActionBar(urwid.WidgetWrap):
def __init__(self):
urwid.WidgetWrap.__init__(self, None)
self.clear()
signals.status_message.connect(self.sig_message)
signals.status_prompt.connect(self.sig_prompt)
signals.status_prompt_path.connect(self.sig_path_prompt)
signals.status_prompt_onekey.connect(self.sig_prompt_onekey)
self.last_path = ""
self.prompting = False
self.onekey = False
self.pathprompt = False
def sig_message(self, sender, message, expire=None):
w = urwid.Text(message)
self._w = w
if expire:
def cb(*args):
if w == self._w:
self.clear()
signals.call_in.send(seconds=expire, callback=cb)
def prep_prompt(self, p):
return p.strip() + ": "
def sig_prompt(self, sender, prompt, text, callback, args=()):
signals.focus.send(self, section="footer")
self._w = urwid.Edit(self.prep_prompt(prompt), text or "")
self.prompting = (callback, args)
def sig_path_prompt(self, sender, prompt, callback, args=()):
signals.focus.send(self, section="footer")
self._w = pathedit.PathEdit(
self.prep_prompt(prompt),
os.path.dirname(self.last_path)
)
self.pathprompt = True
self.prompting = (callback, args)
def sig_prompt_onekey(self, sender, prompt, keys, callback, args=()):
"""
Keys are a set of (word, key) tuples. The appropriate key in the
word is highlighted.
"""
signals.focus.send(self, section="footer")
prompt = [prompt, " ("]
mkup = []
for i, e in enumerate(keys):
mkup.extend(common.highlight_key(e[0], e[1]))
if i < len(keys) - 1:
mkup.append(",")
prompt.extend(mkup)
prompt.append(")? ")
self.onekey = set(i[1] for i in keys)
self._w = urwid.Edit(prompt, "")
self.prompting = (callback, args)
def selectable(self):
return True
def keypress(self, size, k):
if self.prompting:
if k == "esc":
self.prompt_done()
elif self.onekey:
if k == "enter":
self.prompt_done()
elif k in self.onekey:
self.prompt_execute(k)
elif k == "enter":
self.prompt_execute(self._w.get_edit_text())
else:
if common.is_keypress(k):
self._w.keypress(size, k)
else:
return k
def clear(self):
self._w = urwid.Text("")
def prompt_done(self):
self.prompting = False
self.onekey = False
self.pathprompt = False
signals.status_message.send(message="")
signals.focus.send(self, section="body")
def prompt_execute(self, txt):
if self.pathprompt:
self.last_path = txt
p, args = self.prompting
self.prompt_done()
msg = p(txt, *args)
if msg:
signals.status_message.send(message=msg, expire=1)
class StatusBar(urwid.WidgetWrap):
def __init__(self, master, helptext):
self.master, self.helptext = master, helptext
self.ab = ActionBar()
self.ib = urwid.WidgetWrap(urwid.Text(""))
self._w = urwid.Pile([self.ib, self.ab])
signals.update_settings.connect(self.sig_update_settings)
signals.flowlist_change.connect(self.sig_update_settings)
self.redraw()
def sig_update_settings(self, sender):
self.redraw()
def keypress(self, *args, **kwargs):
return self.ab.keypress(*args, **kwargs)
def get_status(self):
r = []
if self.master.setheaders.count():
r.append("[")
r.append(("heading_key", "H"))
r.append("eaders]")
if self.master.replacehooks.count():
r.append("[")
r.append(("heading_key", "R"))
r.append("eplacing]")
if self.master.client_playback:
r.append("[")
r.append(("heading_key", "cplayback"))
r.append(":%s to go]" % self.master.client_playback.count())
if self.master.server_playback:
r.append("[")
r.append(("heading_key", "splayback"))
if self.master.nopop:
r.append(":%s in file]" % self.master.server_playback.count())
else:
r.append(":%s to go]" % self.master.server_playback.count())
if self.master.get_ignore_filter():
r.append("[")
r.append(("heading_key", "I"))
r.append("gnore:%d]" % len(self.master.get_ignore_filter()))
if self.master.get_tcp_filter():
r.append("[")
r.append(("heading_key", "T"))
r.append("CP:%d]" % len(self.master.get_tcp_filter()))
if self.master.state.intercept_txt:
r.append("[")
r.append(("heading_key", "i"))
r.append(":%s]" % self.master.state.intercept_txt)
if self.master.state.limit_txt:
r.append("[")
r.append(("heading_key", "l"))
r.append(":%s]" % self.master.state.limit_txt)
if self.master.stickycookie_txt:
r.append("[")
r.append(("heading_key", "t"))
r.append(":%s]" % self.master.stickycookie_txt)
if self.master.stickyauth_txt:
r.append("[")
r.append(("heading_key", "u"))
r.append(":%s]" % self.master.stickyauth_txt)
if self.master.state.default_body_view.name != "Auto":
r.append("[")
r.append(("heading_key", "M"))
r.append(":%s]" % self.master.state.default_body_view.name)
opts = []
if self.master.anticache:
opts.append("anticache")
if self.master.anticomp:
opts.append("anticomp")
if self.master.showhost:
opts.append("showhost")
if not self.master.refresh_server_playback:
opts.append("norefresh")
if self.master.killextra:
opts.append("killextra")
if self.master.server.config.no_upstream_cert:
opts.append("no-upstream-cert")
if self.master.state.follow_focus:
opts.append("following")
if self.master.stream_large_bodies:
opts.append(
"stream:%s" % netlib.utils.pretty_size(
self.master.stream_large_bodies.max_size
)
)
if opts:
r.append("[%s]" % (":".join(opts)))
if self.master.server.config.mode in ["reverse", "upstream"]:
dst = self.master.server.config.mode.dst
scheme = "https" if dst[0] else "http"
if dst[1] != dst[0]:
scheme += "2https" if dst[1] else "http"
r.append("[dest:%s]" % utils.unparse_url(scheme, *dst[2:]))
if self.master.scripts:
r.append("[")
r.append(("heading_key", "s"))
r.append("cripts:%s]" % len(self.master.scripts))
# r.append("[lt:%0.3f]"%self.master.looptime)
if self.master.stream:
r.append("[W:%s]" % self.master.stream_path)
return r
def redraw(self):
fc = self.master.state.flow_count()
if self.master.state.focus is None:
offset = 0
else:
offset = min(self.master.state.focus + 1, fc)
t = [
('heading', ("[%s/%s]" % (offset, fc)).ljust(9))
]
if self.master.server.bound:
host = self.master.server.address.host
if host == "0.0.0.0":
host = "*"
boundaddr = "[%s:%s]" % (host, self.master.server.address.port)
else:
boundaddr = ""
t.extend(self.get_status())
status = urwid.AttrWrap(urwid.Columns([
urwid.Text(t),
urwid.Text(
[
self.helptext,
boundaddr
],
align="right"
),
]), "heading")
self.ib._w = status
def update(self, text):
self.helptext = text
self.redraw()
self.master.loop.draw_screen()
def selectable(self):
return True
|
{
"content_hash": "d2adeba8eb896d6ea4fa79b676eb066e",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 78,
"avg_line_length": 33.610236220472444,
"alnum_prop": 0.5197376127445238,
"repo_name": "byt3bl33d3r/mitmproxy",
"id": "7eb2131bea13d4bfc992547c62514dccd47f326e",
"size": "8537",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "libmproxy/console/statusbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "411"
},
{
"name": "CSS",
"bytes": "340350"
},
{
"name": "HTML",
"bytes": "98093"
},
{
"name": "JavaScript",
"bytes": "1728505"
},
{
"name": "Python",
"bytes": "703684"
},
{
"name": "Shell",
"bytes": "5081"
}
],
"symlink_target": ""
}
|
"""Google Cloud Text-To-Speech API sample application .
Example usage:
python quickstart.py
"""
def run_quickstart():
# [START tts_quickstart]
"""Synthesizes speech from the input string of text or ssml.
Make sure to be working in a virtual environment.
Note: ssml must be well-formed according to:
https://www.w3.org/TR/speech-synthesis/
"""
from google.cloud import texttospeech
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Set the text input to be synthesized
synthesis_input = texttospeech.SynthesisInput(text="Hello, World!")
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.VoiceSelectionParams(
language_code="en-US", ssml_gender=texttospeech.SsmlVoiceGender.NEUTRAL
)
# Select the type of audio file you want returned
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(
input=synthesis_input, voice=voice, audio_config=audio_config
)
# The response's audio_content is binary.
with open("output.mp3", "wb") as out:
# Write the response to the output file.
out.write(response.audio_content)
print('Audio content written to file "output.mp3"')
# [END tts_quickstart]
if __name__ == "__main__":
run_quickstart()
|
{
"content_hash": "1a40a9e415cfba5eea62762afb8e440c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 31.44,
"alnum_prop": 0.6857506361323156,
"repo_name": "googleapis/python-texttospeech",
"id": "2c2434df75ec314a74018ce03db74d3ab188d6ed",
"size": "2193",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/quickstart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "368339"
},
{
"name": "Shell",
"bytes": "30678"
}
],
"symlink_target": ""
}
|
extensions = [
'sphinx.ext.mathjax',
'breathe',
]
breathe_projects = { "immer": "_doxygen/xml" }
breathe_default_project = "immer"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
source_parsers = { '.md': CommonMarkParser, }
source_suffix = ['.rst', '.md']
def setup(app):
app.add_config_value('recommonmark_config', {
'enable_eval_rst': True,
}, True)
app.add_transform(AutoStructify)
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'immer'
copyright = u'2016, 2017 Juan Pedro Bolivar Puente'
author = u'Juan Pedro Bolivar Puente'
raw_enabled = True
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.0'
# The full version, including alpha/beta/rc tags.
release = u'0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__),
'../tools/sinusoidal-sphinx-theme'))
import sinusoidal_sphinx_theme
html_theme_path = sinusoidal_sphinx_theme.html_theme_path()
html_theme = 'sinusoidal_sphinx_theme'
extensions.append("sinusoidal_sphinx_theme")
html_theme_options = {
"project_nav_name": "immer",
"github_link" : "https://github.com/arximboldi/immer",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'immer v0.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/logo-black.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'immerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'immer.tex', u'immer Documentation',
u'Juan Pedro Bolivar Puente', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'immer', u'immer Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'immer', u'immer Documentation',
author, 'immer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
{
"content_hash": "0fb106748ca65dbaf76e52244fdd5c64",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 80,
"avg_line_length": 27.69833729216152,
"alnum_prop": 0.6911071091673099,
"repo_name": "graetzer/arangodb",
"id": "3c32cc505ba392b7d03d59a0beba635c628a2232",
"size": "12710",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "3rdParty/immer/v0.6.2/doc/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "63025"
},
{
"name": "C",
"bytes": "7952921"
},
{
"name": "C#",
"bytes": "96431"
},
{
"name": "C++",
"bytes": "274543069"
},
{
"name": "CMake",
"bytes": "646773"
},
{
"name": "CSS",
"bytes": "1054160"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "131"
},
{
"name": "HTML",
"bytes": "2215528"
},
{
"name": "Java",
"bytes": "922156"
},
{
"name": "JavaScript",
"bytes": "53300241"
},
{
"name": "LLVM",
"bytes": "24129"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "17899"
},
{
"name": "M4",
"bytes": "575204"
},
{
"name": "Makefile",
"bytes": "492694"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "28404"
},
{
"name": "Objective-C",
"bytes": "18435"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "107274"
},
{
"name": "Pascal",
"bytes": "150599"
},
{
"name": "Perl",
"bytes": "564374"
},
{
"name": "Perl6",
"bytes": "9918"
},
{
"name": "Python",
"bytes": "4527647"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1007604"
},
{
"name": "Ruby",
"bytes": "929950"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "424800"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "551977"
},
{
"name": "Yacc",
"bytes": "53072"
}
],
"symlink_target": ""
}
|
"""
Django settings for BlogApp project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jz!dvjy(htp9%-a=woderiy0)&gcbgf&d0w3t60l(sfrxf=ya9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'BlogApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BlogApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Redirect URL after login
LOGIN_REDIRECT_URL = '/blog/'
|
{
"content_hash": "49af6ee2683609bd96a496f823f76c37",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 25.99056603773585,
"alnum_prop": 0.6929219600725953,
"repo_name": "disrupticons/django-blog",
"id": "c6862197feca4b5d3df86a27d85d5a8aaf9a714d",
"size": "2755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BlogApp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41"
},
{
"name": "HTML",
"bytes": "8290"
},
{
"name": "JavaScript",
"bytes": "429"
},
{
"name": "Python",
"bytes": "12649"
}
],
"symlink_target": ""
}
|
"""Tests for zmake multiproc."""
import threading
import zmake.multiproc
def test_single_function_executor_success():
"""Test single function success."""
executor = zmake.multiproc.Executor()
executor.append(lambda: 0)
assert executor.wait() == 0
def test_single_function_executor_fail():
"""Test single function fail."""
executor = zmake.multiproc.Executor()
executor.append(lambda: -2)
assert executor.wait() == -2
def test_single_function_executor_raise():
"""Test single function raising an exception."""
executor = zmake.multiproc.Executor()
executor.append(lambda: 1 / 0)
assert executor.wait() != 0
def _lock_step(cond, predicate, step, return_value=0):
with cond:
cond.wait_for(predicate=lambda: step[0] == predicate)
step[0] += 1
cond.notify_all()
return return_value
def test_two_function_executor_wait_for_both():
"""Test two functions in executor."""
cond = threading.Condition()
step = [0]
executor = zmake.multiproc.Executor()
executor.append(lambda: _lock_step(cond=cond, predicate=0, step=step))
executor.append(lambda: _lock_step(cond=cond, predicate=1, step=step))
assert executor.wait() == 0
assert step[0] == 2
def test_two_function_executor_one_fails():
"""Test two functions in executor, when one fails."""
cond = threading.Condition()
step = [0]
executor = zmake.multiproc.Executor()
executor.append(
lambda: _lock_step(cond=cond, predicate=0, step=step, return_value=-1)
)
executor.append(lambda: _lock_step(cond=cond, predicate=1, step=step))
assert executor.wait() == -1
assert step[0] == 2
|
{
"content_hash": "028e5600d9a540e25045442a82a80dcd",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 29.086206896551722,
"alnum_prop": 0.6621221102548903,
"repo_name": "coreboot/chrome-ec",
"id": "ff443e2f4b1c9ec2c3cfbdd063eedd353bc8bf57",
"size": "1830",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zephyr/zmake/tests/test_multiproc_executor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "153372"
},
{
"name": "C",
"bytes": "25514204"
},
{
"name": "C++",
"bytes": "617015"
},
{
"name": "CMake",
"bytes": "114317"
},
{
"name": "Emacs Lisp",
"bytes": "136"
},
{
"name": "Go",
"bytes": "40545"
},
{
"name": "HTML",
"bytes": "602017"
},
{
"name": "Makefile",
"bytes": "247601"
},
{
"name": "Pawn",
"bytes": "3004"
},
{
"name": "Python",
"bytes": "1006209"
},
{
"name": "Shell",
"bytes": "138354"
},
{
"name": "SourcePawn",
"bytes": "3051"
},
{
"name": "Tcl",
"bytes": "5238"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
from subprocess import *
import tarfile
import nltk
import nlpnet
from textblob import TextBlob
from compare import Compare
"""
This class will test to see whether the dependencies for the given parser
are installed. In the event that they are not already installed, the program
will ask the user whether to install them. If the user agrees, it will
download and install the dependencies.
Class information:
- name: run_dependency_tests
- version: 1.4.6
- author: Vale Tolpegin
"""
class RunDependencyTests:
def __init__(self, *args, **kwargs):
"""
Test for dependencies.
"""
self.test()
def test(self):
"""
Testing each dependency
"""
# Testing for textblob corpora which is used in most parsers
self.test_for_textblob()
# Testing for parser corpora for each parser
self.test_for_nltk()
self.test_for_nlpnet()
def test_for_textblob(self):
"""
Install textblob dependencies. It automatically
checks to see if dependencies are already
installed, so I do not need to do that.
"""
# Installing data
os.system("python -m textblob.download_corpora")
def test_for_nltk(self):
"""
Downloading all required NLTK dependencies.
"""
from nltk.data import find
from nltk import download
# Download data if needed
try:
find('stopwords.zip')
except LookupError:
download('stopwords')
try:
find('maxent_ne_chunker')
except LookupError:
download('maxent_ne_chunker')
try:
find('words')
except LookupError:
download('words')
def test_for_nlpnet(self):
"""
Attempting to use nlpnet. This will cause an
error if the required dependencies are not
downloaded.
"""
try:
# Creating a new compare object
compare_nlpnet = Compare()
# Comparing using the nltk parser
compare_nlpnet.compare_strings(text=["what time is it here?", "This is the cat's hat"], pattern_detection=False, parser="nlpnet")
# If that was successfuly, getting information
sentence_information = compare_nlpnet.get_pattern_information()
for sentence in sentence_information:
my_pattern = "[ Pattern ] : " + sentence.pattern
my_subject = "[ Subject ] : " + sentence.subject
my_verb = "[ Verb ] : " + sentence.verb
my_object = "[ Object ] : " + sentence.object[0]
my_preps = "[ Prep Phrases ] : " + str(sentence.prepositional_phrases)
my_reliability_score = "[ Reliability Score ]: " + str(sentence.reliability_score)
except:
# Getting nltk data path
running = Popen(['python -c "import nltk;print nltk.data.path"'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
stdin, stdout = running.communicate()
# Setting the path that the nlpnet dependency will be downloaded from
path = re.sub(r"\'", "", re.sub(r"\[", '', str(stdin.split('\n')[0].split(',')[0])))
path = path.split(r"/")
path = '/'.join(path[0 : len(path) - 1]) + '/nlpnet_dependency/'
# Download the dependencies & extract
current_directory = os.getcwd()
os.mkdir(path)
os.chdir(path)
os.system("wget http://nilc.icmc.usp.br/nlpnet/data/dependency-en.tgz")
tar = tarfile.open(path + 'dependency-en.tgz', 'r:gz')
tar.extractall(path)
os.remove(path + 'dependency-en.tgz')
os.chdir(current_directory)
|
{
"content_hash": "fee5c3eba4a578a02eb11fcc7652b419",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 141,
"avg_line_length": 31.495934959349594,
"alnum_prop": 0.57769747031492,
"repo_name": "DarkmatterVale/regex4dummies",
"id": "95bfac409771eaca093eb7d5f9cf6637346415b9",
"size": "3874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regex4dummies/test_dependencies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118642"
}
],
"symlink_target": ""
}
|
import sys
import mox
from neutronclient.neutron.v2_0.lb import healthmonitor
from neutronclient.tests.unit import test_cli20
class CLITestV20LbHealthmonitorJSON(test_cli20.CLITestV20Base):
def test_create_healthmonitor_with_mandatory_params(self):
"""lb-healthmonitor-create with mandatory params only."""
resource = 'health_monitor'
cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
admin_state_up = False
delay = '60'
max_retries = '2'
timeout = '10'
type = 'TCP'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--admin-state-down',
'--delay', delay,
'--max-retries', max_retries,
'--timeout', timeout,
'--type', type,
'--tenant-id', tenant_id]
position_names = ['admin_state_up', 'delay', 'max_retries', 'timeout',
'type', 'tenant_id']
position_values = [admin_state_up, delay, max_retries, timeout, type,
tenant_id]
self._test_create_resource(resource, cmd, '', my_id, args,
position_names, position_values)
def test_create_healthmonitor_with_all_params(self):
"""lb-healthmonitor-create with all params set."""
resource = 'health_monitor'
cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
admin_state_up = False
delay = '60'
expected_codes = '200-202,204'
http_method = 'HEAD'
max_retries = '2'
timeout = '10'
type = 'TCP'
tenant_id = 'my-tenant'
url_path = '/health'
my_id = 'my-id'
args = ['--admin-state-down',
'--delay', delay,
'--expected-codes', expected_codes,
'--http-method', http_method,
'--max-retries', max_retries,
'--timeout', timeout,
'--type', type,
'--tenant-id', tenant_id,
'--url-path', url_path]
position_names = ['admin_state_up', 'delay',
'expected_codes', 'http_method',
'max_retries', 'timeout',
'type', 'tenant_id', 'url_path']
position_values = [admin_state_up, delay,
expected_codes, http_method,
max_retries, timeout,
type, tenant_id, url_path]
self._test_create_resource(resource, cmd, '', my_id, args,
position_names, position_values)
def test_list_healthmonitors(self):
"""lb-healthmonitor-list."""
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
def test_list_healthmonitors_pagination(self):
"""lb-healthmonitor-list."""
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_healthmonitors_sort(self):
"""lb-healthmonitor-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_healthmonitors_limit(self):
"""lb-healthmonitor-list -P."""
resources = "health_monitors"
cmd = healthmonitor.ListHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_healthmonitor_id(self):
"""lb-healthmonitor-show test_id."""
resource = 'health_monitor'
cmd = healthmonitor.ShowHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_healthmonitor_id_name(self):
"""lb-healthmonitor-show."""
resource = 'health_monitor'
cmd = healthmonitor.ShowHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_health_monitor(self):
"""lb-healthmonitor-update myid --name myname --tags a b."""
resource = 'health_monitor'
cmd = healthmonitor.UpdateHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--timeout', '5'],
{'timeout': '5', })
def test_delete_healthmonitor(self):
"""lb-healthmonitor-delete my-id."""
resource = 'health_monitor'
cmd = healthmonitor.DeleteHealthMonitor(test_cli20.MyApp(sys.stdout),
None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_associate_healthmonitor(self):
cmd = healthmonitor.AssociateHealthMonitor(
test_cli20.MyApp(sys.stdout),
None)
resource = 'health_monitor'
health_monitor_id = 'hm-id'
pool_id = 'p_id'
args = [health_monitor_id, pool_id]
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = {resource: {'id': health_monitor_id}}
result = {resource: {'id': health_monitor_id}, }
result_str = self.client.serialize(result)
path = getattr(self.client,
"associate_pool_health_monitors_path") % pool_id
return_tup = (test_cli20.MyResp(200), result_str)
self.client.httpclient.request(
test_cli20.end_url(path), 'POST',
body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(return_tup)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_disassociate_healthmonitor(self):
cmd = healthmonitor.DisassociateHealthMonitor(
test_cli20.MyApp(sys.stdout),
None)
resource = 'health_monitor'
health_monitor_id = 'hm-id'
pool_id = 'p_id'
args = [health_monitor_id, pool_id]
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
path = (getattr(self.client,
"disassociate_pool_health_monitors_path") %
{'pool': pool_id, 'health_monitor': health_monitor_id})
return_tup = (test_cli20.MyResp(204), None)
self.client.httpclient.request(
test_cli20.end_url(path), 'DELETE',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(return_tup)
self.mox.ReplayAll()
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(args)
cmd.run(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
class CLITestV20LbHealthmonitorXML(CLITestV20LbHealthmonitorJSON):
format = 'xml'
|
{
"content_hash": "bdd3859456c0aa49326b4ce9836ec0d6",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 78,
"avg_line_length": 41.955,
"alnum_prop": 0.5365272315576213,
"repo_name": "asadoughi/python-neutronclient",
"id": "959bd98bf88bc536c2a50ee20897ec921157e779",
"size": "9106",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/lb/test_cli20_healthmonitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "624057"
},
{
"name": "Shell",
"bytes": "5278"
}
],
"symlink_target": ""
}
|
from tempest_lib import exceptions as lib_exc
from tempest.api.compute.security_groups import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
def not_existing_id():
if CONF.service_available.neutron:
return data_utils.rand_uuid()
else:
return data_utils.rand_int_id(start=999)
class SecurityGroupRulesNegativeTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def setup_clients(cls):
super(SecurityGroupRulesNegativeTestJSON, cls).setup_clients()
cls.client = cls.security_groups_client
cls.rules_client = cls.security_group_rules_client
@test.attr(type=['negative'])
@test.idempotent_id('1d507e98-7951-469b-82c3-23f1e6b8c254')
@test.services('network')
def test_create_security_group_rule_with_non_existent_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with non existent Parent group id
# Adding rules to the non existent Security Group id
parent_group_id = not_existing_id()
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(lib_exc.NotFound,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('2244d7e4-adb7-4ecb-9930-2d77e123ce4f')
@test.services('network')
def test_create_security_group_rule_with_invalid_id(self):
# Negative test: Creation of Security Group rule should FAIL
# with Parent group id which is not integer
# Adding rules to the non int Security Group id
parent_group_id = data_utils.rand_name('non_int_id')
ip_protocol = 'tcp'
from_port = 22
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('8bd56d02-3ffa-4d67-9933-b6b9a01d6089')
@test.services('network')
def test_create_security_group_rule_duplicate(self):
# Negative test: Create Security Group rule duplicate should fail
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 22
rule = self.rules_client.create_security_group_rule(
parent_group_id=parent_group_id, ip_protocol=ip_protocol,
from_port=from_port, to_port=to_port)
self.addCleanup(self.rules_client.delete_security_group_rule,
rule['id'])
# Add the same rule to the group should fail
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('84c81249-9f6e-439c-9bbf-cbb0d2cddbdf')
@test.services('network')
def test_create_security_group_rule_with_invalid_ip_protocol(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid ip_protocol
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = data_utils.rand_name('999')
from_port = 22
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('12bbc875-1045-4f7a-be46-751277baedb9')
@test.services('network')
def test_create_security_group_rule_with_invalid_from_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid from_port
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = data_utils.rand_int_id(start=65536)
to_port = 22
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('ff88804d-144f-45d1-bf59-dd155838a43a')
@test.services('network')
def test_create_security_group_rule_with_invalid_to_port(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid to_port
# Creating a Security Group to add rule to it
sg = self.create_security_group()
# Adding rules to the created Security Group
parent_group_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = data_utils.rand_int_id(start=65536)
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=parent_group_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('00296fa9-0576-496a-ae15-fbab843189e0')
@test.services('network')
def test_create_security_group_rule_with_invalid_port_range(self):
# Negative test: Creation of Security Group rule should FAIL
# with invalid port range.
# Creating a Security Group to add rule to it.
sg = self.create_security_group()
# Adding a rule to the created Security Group
secgroup_id = sg['id']
ip_protocol = 'tcp'
from_port = 22
to_port = 21
self.assertRaises(lib_exc.BadRequest,
self.rules_client.create_security_group_rule,
parent_group_id=secgroup_id,
ip_protocol=ip_protocol, from_port=from_port,
to_port=to_port)
@test.attr(type=['negative'])
@test.idempotent_id('56fddcca-dbb8-4494-a0db-96e9f869527c')
@test.services('network')
def test_delete_security_group_rule_with_non_existent_id(self):
# Negative test: Deletion of Security Group rule should be FAIL
# with non existent id
non_existent_rule_id = not_existing_id()
self.assertRaises(lib_exc.NotFound,
self.rules_client.delete_security_group_rule,
non_existent_rule_id)
|
{
"content_hash": "4f8783305c2c51620e3999b1a2fb5cd4",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 73,
"avg_line_length": 43.21052631578947,
"alnum_prop": 0.6057653268371904,
"repo_name": "nunogt/tempest",
"id": "d12306a43fbbc2cc12097fc394ad9e4ddd7645d0",
"size": "8033",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/api/compute/security_groups/test_security_group_rules_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2725691"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
from .base_plotter import IPlotter
# from .export import VirtualBrowser
from .c3_plotter import C3Plotter
from .plotly_plotter import PlotlyPlotter
from .chartjs_plotter import ChartJSPlotter
from .chartist_plotter import ChartistPlotter
from .google_plotter import GCPlotter
__version__ = '0.4.3'
|
{
"content_hash": "05ea9daf3cc5ee829fe398870696cbe8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 37.25,
"alnum_prop": 0.8154362416107382,
"repo_name": "WaylonWalker/pyDataVizDay",
"id": "4cd17cadc58bf1d6b1b80cc58d1918a5c3fe93e2",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/iplotter/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547492"
},
{
"name": "HTML",
"bytes": "1393902"
},
{
"name": "JavaScript",
"bytes": "288852"
},
{
"name": "Jupyter Notebook",
"bytes": "555759"
},
{
"name": "Python",
"bytes": "44993"
},
{
"name": "Shell",
"bytes": "44"
}
],
"symlink_target": ""
}
|
"""
Speedbar module
This provides performance metrics, details of operations performed, and Chrome SpeedTracer integration
for page loads.
Information is provided by a set of modules, which are responsible for recording and reporting data.
The collected data is then collected and made available via template tags, headers, and a HAR file.
On startup each module is given a chance to initialize itself, typically this consists of monkey
patching a set of built in django functionality. A per request module object is created in response
to the start of each request. Over the course of the request modules record data, using thread local
storage to associate correctly with the right request. A middleware then writes out summary data,
and the headers required to fetch more detailed information from the server. Finally the request_finished
signal handler stores detailed information to memcache which can then be retrieved.
"""
import re
try:
# for Django >= 1.10
from django.utils.deprecation import MiddlewareMixin
except ImportError:
# for Django < 1.10
MiddlewareMixin = object
from django.conf import settings
from django.core.signals import request_started, request_finished
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode, smart_str
from speedbar.signals import setup_request_tracing, store_request_trace
from speedbar.utils import init_modules
from speedbar.modules.base import RequestTrace
if getattr(settings, 'SPEEDBAR_ENABLE', True):
# We hook everything up in the middleware file as loading the middleware is one of the first things performed
# by the django WSGI implementation.
init_modules()
request_started.connect(setup_request_tracing, dispatch_uid='request_started_speedbar_setup_request_tracing')
request_finished.connect(store_request_trace, dispatch_uid='request_started_speedbar_store_request_trace')
HTML_TYPES = ('text/html', 'application/xhtml+xml')
METRIC_PLACEHOLDER_RE = re.compile('<span data-module="(?P<module>[^"]+)" data-metric="(?P<metric>[^"]+)"></span>')
class SpeedbarMiddleware(MiddlewareMixin):
"""
Middleware module to add speedbar related headers to respones and replace any
speedbar template tags with their correct values.
For most accurate results place this as near to the top of your middleware stack
as possible.
If you wish to customize how features are enabled and disabled on a per-request
basis you can do so by inheriting from this class and overriding some or all of the
should_ methods.
"""
def process_request(self, request):
if getattr(settings, 'SPEEDBAR_ENABLE', True):
request_trace = RequestTrace.instance()
request_trace.stacktracer.root.label = '%s %s' % (request.method, request.path)
request_trace.request = request
def process_response(self, request, response):
if not getattr(settings, 'SPEEDBAR_ENABLE', True):
return response
request_trace = RequestTrace.instance()
# TODO: Do we also need to stash this on in case of exception?
request_trace.response = response
metrics = dict((key, module.get_metrics()) for key, module in request_trace.modules.items())
if self.should_return_response_headers(request):
self.add_response_headers(response, metrics)
if self.should_return_trace_header(request):
response['X-TraceUrl'] = reverse('speedbar_trace', args=[request_trace.id])
request_trace.persist_log = True
if self.should_replace_template_tags(request):
if 'gzip' not in response.get('Content-Encoding', '') and response.get('Content-Type', '').split(';')[0] in HTML_TYPES:
# Force render of response (from lazy TemplateResponses) before speedbar is injected
if hasattr(response, 'render'):
response.render()
content = smart_unicode(response.content)
content = self.replace_templatetag_placeholders(content, metrics)
# Note: The URLs returned here do not exist at this point. The relevant data is added to the cache by a signal handler
# once all page processing is finally done. This means it is possible summary values displayed and the detailed
# break down won't quite correspond.
if getattr(settings, 'SPEEDBAR_PANEL', True):
panel_url = reverse('speedbar_panel', args=[request_trace.id])
panel_placeholder_url = reverse('speedbar_details_for_this_request')
content = content.replace(panel_placeholder_url, panel_url)
request_trace.persist_details = True
response.content = smart_str(content)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
def should_return_response_headers(self, request):
return getattr(settings, 'SPEEDBAR_RESPONSE_HEADERS', False)
def should_return_trace_header(self, request):
return hasattr(request, 'user') and request.user.is_staff and getattr(settings, 'SPEEDBAR_TRACE', True)
def should_replace_template_tags(self, request):
return hasattr(request, 'user') and request.user.is_staff
def add_response_headers(self, response, metrics):
"""
Adds all summary metrics to the response headers, so they can be stored in nginx logs if desired.
"""
def sanitize(string):
return string.title().replace(' ', '-')
for module, module_values in metrics.items():
for key, value in module_values.items():
response['X-Speedbar-%s-%s' % (sanitize(module), sanitize(key))] = value
def replace_templatetag_placeholders(self, content, metrics):
"""
The templatetags defined in this module add placeholder values which we replace with true values here. They
cannot just insert the values directly as not all processing may have happened by that point.
"""
def replace_placeholder(match):
module = match.group('module')
metric = match.group('metric')
return unicode(metrics[module][metric])
return METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)
|
{
"content_hash": "a31ed5c9b10b686edb4b9b1549cf0fb1",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 134,
"avg_line_length": 46.81021897810219,
"alnum_prop": 0.6909402775612038,
"repo_name": "mixcloud/django-speedbar",
"id": "614e8055420a0cec3d2382774fee379defa758b9",
"size": "6413",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "speedbar/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1215"
},
{
"name": "HTML",
"bytes": "1934"
},
{
"name": "JavaScript",
"bytes": "675"
},
{
"name": "Python",
"bytes": "39840"
}
],
"symlink_target": ""
}
|
import sys
import string
class IssueFormatter(string.Formatter):
"""
Custom string formatter, adds
u : uppercase
l : lowercase
c : capitalize
to string formatting, e.g.
issue_formatter.format("{foo!c}", foo="hello world!")
returns "HELLO WORLD!"
Note that autonumbering, i.e. use of {}, is not possible.
"""
# http://stackoverflow.com/q/17848202/1007047
# http://stackoverflow.com/q/21664318/1007047
def __init__(self):
super(IssueFormatter, self).__init__()
def convert_field(self, value, conversion):
if conversion == 'c':
return value.capitalize()
elif conversion == 'u':
return value.upper()
elif conversion == 'l':
return value.lower()
return super(IssueFormatter, self).convert_field(value, conversion)
class IssueTracker(object):
"""
A pretty generic issue tracker that can sort, format, and report back issues.
An issue is a dict with information, guaranteed to have keys, 'type' and 'reason',
where value of 'type' is either 'error' or 'warning', and the value of 'reason'
is (should be) a human readable string explaining the issue.
"""
issue_types = ['error', 'warning']
def __init__(self, allow_duplicates=False):
"""
If allow_duplicates is True issues identical to an already tracked issue
will be added, default is to discard duplicates.
"""
super(IssueTracker, self).__init__()
self.allow_duplicates = allow_duplicates
self._issues = []
self._err_count = 0
self._warn_count = 0
self._default_format = "{type!c}: {reason}"
def _add_issue(self, issue_type, reason, info):
if issue_type not in self.issue_types:
raise Exception("issue_type should be one of {}".format(str(self.issue_types)))
issue = {
'type': issue_type,
'reason': reason,
}
if type(info) is dict:
issue.update(info)
elif hasattr(info, 'debug_info'):
issue.update(info.debug_info or {})
if self.allow_duplicates or issue not in self._issues:
self._issues.append(issue)
if issue['type'] == 'error':
self._err_count += 1
else:
self._warn_count += 1
def add_error(self, reason, info=None):
"""
Add an error.
Mandatory argument reason should be a human readable string.
Optional argument info is a dictionary with information like line number etc.
"""
self._add_issue('error', reason, info)
def add_warning(self, reason, info=None):
"""
Add an error.
Mandatory argument reason should be a human readable string.
Optional argument info is a dictionary with information like line number etc.
"""
self._add_issue('warning', reason, info)
@property
def error_count(self):
"""Number of errors tracked"""
return self._err_count
@property
def warning_count(self):
"""Number of warnings tracked"""
return self._warn_count
@property
def issue_count(self):
"""Total number of issues tracked"""
return self._warn_count + self._err_count
def issues(self, issue_type=None, sort_key=None):
"""
Return a list of issues.
If issue_type is given, only return issues of that type.
If sort_key is given, use that to retrieve a value from each issue
used to sort the result. If sort_key is not present in issue, sort
order is undefined.
"""
if issue_type:
issues = [i for i in self._issues if i['type'] == issue_type]
else:
issues = self._issues[:]
if sort_key:
# Sort in place since we have a copy
# If sort_key not in issue(s) order is undefined
# Note: Won't work on python3
issues.sort(key=lambda k: k.get(sort_key))
return issues
def errors(self, sort_key=None):
"""
Return a list of errors.
If sort_key is given, use that to retrieve a value from each error
used to sort the result. If sort_key is not present in error, sort
order is undefined.
"""
return self.issues(issue_type='error', sort_key=sort_key)
# return [e for e in self._issues if e['type'] == 'error']
def warnings(self, sort_key=None):
"""
Return a list of warnings.
If sort_key is given, use that to retrieve a value from each warning
used to sort the result. If sort_key is not present in warning, sort
order is undefined.
"""
return self.issues(issue_type='warning', sort_key=sort_key)
# return [e for e in self._issues if e['type'] == 'warning']
def _format_items(self, items, item_format, **kwargs):
if not item_format:
item_format = self._default_format
did_warn = False
fmt = IssueFormatter()
result = []
for item in items:
combined = {}
combined.update(kwargs)
combined.update(item)
try:
x = fmt.format(item_format, **combined)
except:
# This should not fail
if not did_warn:
sys.stderr.write("Bad format string '{}', using default.\n".format(str(item_format)))
sys.stderr.write("Available keys: {}\n".format(str(combined.keys())))
did_warn = True
x = fmt.format(self._default_format, **combined)
result.append(x)
return result
def formatted_issues(self, issue_type=None, sort_key=None, custom_format=None, **kwargs):
"""
Return a list of issues formatted as strings.
Optional parameters issue_type and sort_key behaves as for issues().
If custom_format is given it should follow string.format() rules, with the
additional string conversion options 'u', 'l', and 'c' for upper, lower,
and capitalize, respectively, e.g. "{foo!u}" for uppercasing. The default format is
"{type!c}: {reason}", which is also the fallback format in case of any errors.
Any additional key-value arguments will be available to the formatter, e.g.
custom_format="{type!c}: {reason} {filename}:{line}", filename="foo/bar.calvin".
Values from the actual issue will take precedence over additional values for the same key.
Note that autonumbering, i.e. use of {}, is not allowed in the format strings, and while
indexed references, i.e. {0} {1}, are allowed they make no sense in this context.
"""
return self._format_items(self.issues(sort_key=sort_key), custom_format, **kwargs)
def formatted_errors(self, sort_key=None, custom_format=None, **kwargs):
"""
Return a list of errors formatted as strings.
Optional parameter sort_key behaves as for issues(). Custom_format and extra
key-value arguments are explained in formatted_issues().
"""
return self._format_items(self.errors(sort_key=sort_key), custom_format, **kwargs)
def formatted_warnings(self, sort_key=None, custom_format=None, **kwargs):
"""
Return a list of warnings formatted as strings.
Optional parameter sort_key behaves as for issues(). Custom_format and extra
key-value arguments are explained in formatted_issues().
"""
return self._format_items(self.warnings(sort_key=sort_key), custom_format, **kwargs)
if __name__ == '__main__':
myfmt = IssueFormatter()
print myfmt.format("{foo!u} {foo!c}", foo="hello")
print myfmt.format("{0!u} {0!c}", "hello")
t = IssueTracker()
t.add_error("Foo")
t.add_warning("Bar")
print t.issues()
print t.error_count, t.errors()
print t.warning_count, t.warnings()
print t.issue_count, t.issues()
t.add_warning("Bar")
assert t.warning_count == 1
t.allow_duplicates = True
t.add_warning("Bar")
assert t.warning_count == 2
for f in t.formatted_issues():
print f
for f in t.formatted_warnings():
print f
for f in t.formatted_errors():
print f
for f in t.formatted_issues(custom_format="{reason}"):
print f
for f in t.formatted_warnings(custom_format="{type!u} - {reason}"):
print f
for f in t.formatted_errors(custom_format="{type} - {reason!u}"):
print f
for f in t.formatted_issues(custom_format="{no_reason}"):
print f
t.add_error("Line", {'line':0, 'col':0, 'extra':42})
for f in t.formatted_issues(custom_format="{no_reason}"):
print f
t.add_error("Apa")
t.add_error("Ara")
print "--- Sorted (type) ---"
for f in t.formatted_issues(custom_format="{no_reason}", sort_key="type"):
print f
print "--- Sorted (reason) ---"
for f in t.formatted_issues(custom_format="{no_reason}", sort_key="reason"):
print f
print "--- Sorted (BAD: no_reason) ---"
for f in t.formatted_issues(sort_key="no_reason"):
print f
print "--- Sorted (line) ---"
for f in t.formatted_errors(custom_format="{type!c}: {reason} {line}:{col}", sort_key="line"):
print f
print "--- Sorted (extras) ---"
for f in t.formatted_errors(custom_format="{type!c}: {reason} {filename}, line: {line}", sort_key="line", filename="baz.calvin", line="bogus"):
print f
|
{
"content_hash": "5be92a72b1a0e4182f7ce71cff4ec9a8",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 147,
"avg_line_length": 34.03533568904594,
"alnum_prop": 0.5945805647840532,
"repo_name": "EricssonResearch/calvin-base",
"id": "e2abce137b97364b4ce40c6e894d1047bb815459",
"size": "9632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/utilities/issuetracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
}
|
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class DeletionRecoveryLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Reflects the deletion recovery level currently in effect for secrets in the current vault. If
it contains 'Purgeable', the secret can be permanently deleted by a privileged user; otherwise,
only the system can purge the secret, at the end of the retention interval.
"""
#: Denotes a vault state in which deletion is an irreversible operation, without the possibility
#: for recovery. This level corresponds to no protection being available against a Delete
#: operation; the data is irretrievably lost upon accepting a Delete operation at the entity level
#: or higher (vault, resource group, subscription etc.)
PURGEABLE = "Purgeable"
#: Denotes a vault state in which deletion is recoverable, and which also permits immediate and
#: permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted entity
#: during the retention interval (90 days), unless a Purge operation is requested, or the
#: subscription is cancelled. System wil permanently delete it after 90 days, if not recovered
RECOVERABLE_PURGEABLE = "Recoverable+Purgeable"
#: Denotes a vault state in which deletion is recoverable without the possibility for immediate
#: and permanent deletion (i.e. purge). This level guarantees the recoverability of the deleted
#: entity during the retention interval(90 days) and while the subscription is still available.
#: System wil permanently delete it after 90 days, if not recovered
RECOVERABLE = "Recoverable"
#: Denotes a vault and subscription state in which deletion is recoverable within retention
#: interval (90 days), immediate and permanent deletion (i.e. purge) is not permitted, and in
#: which the subscription itself cannot be permanently canceled. System wil permanently delete it
#: after 90 days, if not recovered
RECOVERABLE_PROTECTED_SUBSCRIPTION = "Recoverable+ProtectedSubscription"
#: Denotes a vault state in which deletion is recoverable, and which also permits immediate and
#: permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90). This level guarantees
#: the recoverability of the deleted entity during the retention interval, unless a Purge
#: operation is requested, or the subscription is cancelled.
CUSTOMIZED_RECOVERABLE_PURGEABLE = "CustomizedRecoverable+Purgeable"
#: Denotes a vault state in which deletion is recoverable without the possibility for immediate
#: and permanent deletion (i.e. purge when 7<= SoftDeleteRetentionInDays < 90).This level
#: guarantees the recoverability of the deleted entity during the retention interval and while the
#: subscription is still available.
CUSTOMIZED_RECOVERABLE = "CustomizedRecoverable"
#: Denotes a vault and subscription state in which deletion is recoverable, immediate and
#: permanent deletion (i.e. purge) is not permitted, and in which the subscription itself cannot
#: be permanently canceled when 7<= SoftDeleteRetentionInDays < 90. This level guarantees the
#: recoverability of the deleted entity during the retention interval, and also reflects the fact
#: that the subscription itself cannot be cancelled.
CUSTOMIZED_RECOVERABLE_PROTECTED_SUBSCRIPTION = "CustomizedRecoverable+ProtectedSubscription"
|
{
"content_hash": "e4650b00a00ddb36a422855a89cbaa15",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 102,
"avg_line_length": 75.23913043478261,
"alnum_prop": 0.7665414620052008,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ded059738c1e5a20f5c02bafb93451ebc0cdd86e",
"size": "3929",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_3/models/_key_vault_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
'''This program just reads some meta-data from the label of
an ISIS cube file and presents the text in a way that might help
someone get started on writing a figure caption.'''
# Copyright 2019, Ross A. Beyer (rbeyer@seti.org)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This program was motivated by this Issue from Laszlo Kestay:
# https://github.com/USGS-Astrogeology/ISIS3/issues/1588
# He says:
# It is a royal pain to find all the data to put in the caption for
# a figure for a paper. It would be lovely if ISIS would make
# publication-ready figures. One part of this would be to pull the
# key data out of the labels and present the user with a simple pile
# of text that they can cut-paste (or put in a text file) for the
# caption. Would be good to be able to select what information you
# wanted in the caption too.
#
# Optional things to add: local time of day, season, type of SPICE used. If
# not map projected, give information on the viewing geometry.
#
# You will need to install the pvl library at the very least, and can install
# the kalasiris library (and ISIS) to get some additional information.
import argparse
import datetime
import math
import os
import subprocess
import sys
import pvl
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('cube', help='Cube file(s) to read.')
args = parser.parse_args()
# Gather data elements into elem:
elem = dict()
label = pvl.load(args.cube)['IsisCube']
elem.update(get_instrument(label.get('Instrument')))
elem['productid'] = None
if label.get('Archive') is not None:
elem['productid'] = label['Archive'].get('ProductId')
elem.update(get_mapping(label.get('Mapping')))
elem.update(get_campt(args.cube))
# Print out results.
for k in elem.keys():
print('{}: {}'.format(k, elem.get(k)))
print('')
print(' '.join(get_sentences(elem)))
return
def get_instrument(label: dict) -> dict:
d = dict(scname=None, instrument=None, time=None)
if label is None:
return d
d['scname'] = label.get('SpacecraftName')
d['instrument'] = label.get('InstrumentId')
t = label.get('StartTime')
if t is not None:
if isinstance(t, datetime.datetime):
d['time'] = str(t)
else:
d['time'] = str(t[0])
return d
def get_mapping(label: dict) -> dict:
d = dict(pixelres=None, projection=None)
if label is None:
return d
pr = label.get('PixelResolution')
if pr is not None:
d['pixelres'] = '{} {}'.format(*pr)
d['projection'] = label.get('ProjectionName')
return d
def get_campt(path: os.PathLike) -> dict:
d = dict(northaz=None, subsolargroundaz=None, abovehoriz=None)
try:
import kalasiris as isis
cpvl = pvl.loads(isis.campt(path).stdout)['GroundPoint']
d['northaz'] = cpvl.get('NorthAzimuth')
d['subsolargroundaz'] = cpvl.get('SubSolarGroundAzimuth')
incid = cpvl.get('Incidence')
if incid is not None:
d['abovehoriz'] = pvl.Units(str(90 - float(incid[0])), incid[1])
except subprocess.CalledProcessError:
# Couln't get any data from campt, maybe it was a level 2 image?
pass
except ModuleNotFoundError:
# To get some additional functionality,
# install the kalasiris library.
pass
return d
def inst_sentence(scname=None, instrument=None, time=None, productid=None) -> str:
if all(x is None for x in (scname, instrument, time, productid)):
raise ValueError('All passed elements were None, '
'at least one of them should have a string.')
sent = 'This image contains data acquried'
if scname is not None or instrument is not None:
sent += ' by'
if instrument is not None:
sent += f' {instrument}'
if scname is not None:
sent += f' onboard {scname}'
else:
sent += f' {scname}'
if time is not None:
sent += f' on {time}'
if productid is not None:
sent += f' with the Product ID, {productid}'
sent += '.'
return sent
def mapping_sentence(projection=None, pixelres=None) -> str:
if all(x is None for x in (projection, pixelres)):
raise ValueError('All passed elements were None, '
'at least one of them should have a string.')
sent = 'The image'
prep = 'has'
if projection is not None:
sent += f' is a {projection} projection'
prep = 'with'
if pixelres is not None:
sent += f' {prep} a ground sample distance of {pixelres}'
sent += '.'
return sent
def f_pair(pair: list) -> tuple:
units = pair[1]
if units.casefold().startswith('degree'):
units = 'degrees'
return (float(pair[0]), units)
def campt_sentence(northaz=None, subsolargroundaz=None, abovehoriz=None) -> str:
# Assumes the arguments are either None or pvl.Units objects.
if all(x is None for x in (northaz, subsolargroundaz, abovehoriz)):
raise ValueError('All passed elements were None, '
'at least one of them should have a string.')
sents = list()
if northaz is not None:
s = 'North'
if math.floor(float(northaz[0])) == 270:
s += ' is up.'
else:
units = northaz[1]
if units.casefold().startswith('degree'):
units = 'degrees'
s += ' azimuth is {:.2f} {} from the +x direction.'.format(*f_pair(northaz))
sents.append(s)
if subsolargroundaz is not None or abovehoriz is not None:
s = 'The Sun is'
if abovehoriz is not None:
s += ' {:.2f} {} above the horizon'.format(*f_pair(abovehoriz))
if subsolargroundaz is not None:
s += ' at an azimuth of {:.2f} {} from North'.format(*f_pair(subsolargroundaz))
s += '.'
sents.append(s)
return ' '.join(sents)
def get_sentences(elem: dict) -> list:
s = list()
try:
s.append(inst_sentence(elem.get('scname'),
elem.get('instrument'),
elem.get('time'),
elem.get('productid')))
except ValueError:
pass
try:
s.append(mapping_sentence(elem.get('projection'),
elem.get('pixelres')))
except ValueError:
pass
try:
s.append(campt_sentence(elem.get('northaz'),
elem.get('subsolargroundaz'),
elem.get('abovehoriz')))
except ValueError:
pass
return s
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "a5fb077d6120c7d43393e283199a63dd",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 91,
"avg_line_length": 30.670886075949365,
"alnum_prop": 0.6115008942082818,
"repo_name": "rbeyer/scriptorium",
"id": "6407acea1aaa29fbc9caf77af782cac6a002b289",
"size": "7291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caption_helper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "49301"
},
{
"name": "Vim script",
"bytes": "855"
}
],
"symlink_target": ""
}
|
import json
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.urls import reverse
from filebrowser.fields import FileBrowseField
from taggit.managers import TaggableManager
from notaro.managers import GenManager
from notaro.models import Note
from tags.models import CustomTagThrough
from .tasks import render_map
def cleanname(name):
"""Replace umlauts (ä by ae, etc.) and then remove all non-ASCII letters
from string."""
for umlaut, expansion in [('Ä', 'Ae'), ('Ö', 'Oe'), ('Ü', 'Ue'),
('ä', 'ae'), ('ö', 'oe'), ('ü', 'ue'),
('ß', 'ss'), ]:
name = name.replace(umlaut, expansion)
return u''.join([c for c in name
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' +
'abcdefghijklmnopqrstuvwxyz'])
class Url(models.Model):
title = models.CharField(max_length=200, blank=True, verbose_name="Titel")
link = models.CharField(max_length=200, verbose_name="Link")
def __str__(self):
return self.title or self.link[:50]
def related_label(self):
return '<a href="%s">%s</a>' %\
(self.link, self.title or self.link[:50], )
class PlaceUrl(models.Model):
url = models.ForeignKey(
Url,
verbose_name="URL", on_delete=models.CASCADE)
place = models.ForeignKey(
'Place',
verbose_name="Ort", on_delete=models.CASCADE)
position = models.PositiveIntegerField(default=1)
class Meta:
ordering = ('position', )
verbose_name = 'URL zum Ort'
verbose_name_plural = 'URLs zum Ort'
class PlaceNote(models.Model):
place = models.ForeignKey(
'Place',
verbose_name="Ort", on_delete=models.CASCADE)
note = models.ForeignKey(
Note,
verbose_name="Text", on_delete=models.CASCADE)
position = models.IntegerField(default=1)
class Meta:
verbose_name = 'Text zu Ort'
verbose_name_plural = 'Texte zu Ort'
class Place(models.Model):
title = models.CharField(max_length=200, blank=True, verbose_name="Titel")
slug = models.SlugField(blank=True)
handle = models.CharField(max_length=50, unique=True)
urls = models.ManyToManyField(
Url,
through=PlaceUrl,
blank=True,
verbose_name="URLs")
location = models.PointField(
blank=True,
null=True,
verbose_name="Geo-Koordinaten")
notes = models.ManyToManyField(
Note,
blank=True,
through=PlaceNote,
verbose_name="Texte")
@property
def latitude(self):
if self.location:
# pylint: disable=no-member
return self.location.y
return None
@property
def longitude(self):
if self.location:
# pylint: disable=no-member
return self.location.x
return None
def reset_handle(self):
"""Recompute handle for a Place object which already has an id."""
self.handle = 'L_'
if self.title:
self.handle += cleanname(self.title)[:20]
if self.location:
# pylint: disable=no-member
self.handle += str(self.location.x)[:10] + '_'
self.handle += str(self.location.y)[:10]
self.handle += '-' + str(self.id)
self.handle = self.handle[:49]
# pylint: disable=no-member
self.save()
def as_tag(self):
return ("Ort: %s" % self.title, "Ort: %s" % self.title)
def __str__(self):
return self.title
def related_label(self):
return self.__str__()
@staticmethod
def autocomplete_search_fields():
return ("title__startswith",)
def get_absolute_url(self):
"""Return URL where this object can be viewed."""
return reverse('place-detail',
kwargs={'pk': self.id, })
class Meta:
ordering = ('title', )
verbose_name = 'Ort'
verbose_name_plural = 'Orte'
class CustomMapMarker(models.Model):
custommap = models.ForeignKey(
'CustomMap',
verbose_name="Karte", on_delete=models.CASCADE)
place = models.ForeignKey(
Place,
verbose_name="Ort", on_delete=models.CASCADE)
label = models.CharField(max_length=30, blank=True)
description = models.CharField(max_length=200, blank="True",
verbose_name="Beschreibung")
label_offset_x = models.FloatField(
default=0,
verbose_name="Positionskorrektur Label X")
label_offset_y = models.FloatField(
default=0,
verbose_name="Positionskorrektur Label Y")
style = models.CharField(
max_length=400,
blank=True,
null=True,
verbose_name="Stil")
position = models.IntegerField(default=1)
def get_label_html(self):
if len(self.label) == 1 and self.label in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
return '<img width="18" src="%spng/%s.png">'\
% (settings.STATIC_URL, self.label)
else:
return '<b>(%s)</b>' % self.label
def get_label_tex(self):
if len(self.label) == 1 and self.label in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
return r'\includegraphics[width=0.4cm]{%s}' % self.label
else:
return r'\textbf{(%s)}' % self.label
def get_description(self):
if self.description == '-':
return None
if self.description:
return self.description
return self.place.title
class Meta:
ordering = ('position', )
class CustomMap(models.Model):
# possible values of render_status
NOTRENDERED = "NOTRENDERED" # no rendering task has been started yet
RENDERED = "RENDERED" # rendered map stored in rendered field
# if rendering task currently running, then store celery task id
# in this field
title = models.CharField(max_length=200, blank=True, verbose_name="Titel")
description = models.TextField(blank=True, verbose_name="Beschreibung")
bbox = models.PolygonField(verbose_name="Begrenzung")
markers = models.ManyToManyField(
Place,
blank=True,
through=CustomMapMarker,
verbose_name="Markierungen")
map_style = models.CharField(
max_length=50, blank=True, null=True,
verbose_name="Kartenstil")
render_status = models.CharField(default=NOTRENDERED, max_length=800)
# Use refresh field to allow user in admin to explicitly trigger
# a new rendering task
refresh = models.BooleanField(
default=False,
verbose_name="Gerenderte Karte aktualisieren")
rendered = FileBrowseField("Bilddatei", max_length=200, directory="maps/",
extensions=[".png", ],
blank=True, null=True,
help_text="Gerenderte Karte im png-Format")
sites = models.ManyToManyField(Site)
all_objects = GenManager()
objects = CurrentSiteManager()
tags = TaggableManager(
through=CustomTagThrough,
blank=True, help_text="")
def save(self, *args, **kwargs):
# always save self.refresh as False, so that it is by default
# set to False in admin
refresh = self.refresh
self.refresh = False
# pylint: disable=no-member
super(CustomMap, self).save(*args, **kwargs)
# Now trigger rendering process
# (do this after saving so that the rendering task does not
# save the model with outdated values)
if refresh:
render_map.delay(self.id)
def as_html_in_list(self):
"""
HTML representing this map (typically to be used to show a list of
objects with a certain tag, e.g. in a detail view of a Person, ...
"""
# pylint: disable=no-member
return self.title
def get_render_status(self):
if self.render_status == CustomMap.RENDERED:
return 'Aktuelle Karte ist gerendert.'
if self.render_status == CustomMap.NOTRENDERED:
return 'Aktuelle Karte ist noch nicht gerendert.'
return 'Aktuelle Karte wird gerade gerendert (id %s).'\
% self.render_status
def geojson(self):
gj = {
"type": "FeatureCollection",
"features":
[
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
m.place.location.x + m.label_offset_x,
m.place.location.y + m.label_offset_y, ]
},
"properties": {
"label": m.label,
"scale": 1.0,
# the following settings have no effect so far;
# need to wait for Mapnik 3.0
"font": "Open Sans Bold",
"font-size": 12.0,
"image": "circle_black.svg"
}
}
for m in self.custommapmarker_set.all()]
}
return json.dumps(gj)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('custommap-detail',
kwargs={'pk': self.id, })
class Meta:
ordering = ('-id', )
verbose_name = 'Eigene Landkarte'
verbose_name_plural = 'Eigene Landkarten'
|
{
"content_hash": "4b7d0e3815784cac9951351c3f5b51d1",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 79,
"avg_line_length": 32.08709677419355,
"alnum_prop": 0.5574545088971549,
"repo_name": "ugoertz/django-familio",
"id": "6e7a8aa119889e3876fde13af841c1f068d0ec1b",
"size": "9979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maps/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61023"
},
{
"name": "HTML",
"bytes": "632961"
},
{
"name": "JavaScript",
"bytes": "1352913"
},
{
"name": "Makefile",
"bytes": "1735"
},
{
"name": "Python",
"bytes": "532976"
},
{
"name": "Shell",
"bytes": "352"
},
{
"name": "TeX",
"bytes": "16522"
}
],
"symlink_target": ""
}
|
"""cell.bin.base"""
import optparse
import os
import sys
from cell import __version__
__all__ = ['Option', 'Command']
Option = optparse.make_option
class Command(object):
Parser = optparse.OptionParser
args = ''
version = __version__
option_list = ()
prog_name = None
def run(self, *args, **options):
raise NotImplementedError('subclass responsibility')
def execute_from_commandline(self, argv=None):
"""Execute application from command line.
:keyword argv: The list of command line arguments.
Defaults to ``sys.argv``.
"""
if argv is None:
argv = list(sys.argv)
self.prog_name = os.path.basename(argv[0])
return self.handle_argv(self.prog_name, argv[1:])
def usage(self):
"""Returns the command-line usage string for this app."""
return '%%prog [options] %s' % (self.args, )
def get_options(self):
"""Get supported command line options."""
return self.option_list
def handle_argv(self, prog_name, argv):
"""Parses command line arguments from ``argv`` and dispatches
to :meth:`run`.
:param prog_name: The program name (``argv[0]``).
:param argv: Command arguments.
"""
options, args = self.parse_options(prog_name, argv)
return self.run(*args, **vars(options))
def exit(self, v=0):
sys.exit(v)
def exit_status(self, msg, status=0, fh=sys.stderr):
fh.write('%s\n' % (msg, ))
self.exit(status)
def exit_usage(self, msg):
sys.stderr.write('ERROR: %s\n\n' % (msg, ))
self.exit_status('Usage: %s' % (
self.usage().replace('%prog', self.prog_name), ))
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
def create_parser(self, prog_name):
return self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=self.get_options())
|
{
"content_hash": "665f6e8ca14fe5881403593f3cda3fab",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 69,
"avg_line_length": 29.91358024691358,
"alnum_prop": 0.577383408997111,
"repo_name": "celery/cell",
"id": "de245073bab13f88d96efc09fbeb7d8253794a36",
"size": "2423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cell/bin/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1495"
},
{
"name": "Python",
"bytes": "158962"
},
{
"name": "Shell",
"bytes": "2154"
}
],
"symlink_target": ""
}
|
from unittest import TestCase as PythonTestCase
class TestCase(PythonTestCase):
pass
|
{
"content_hash": "1d3a25e73c16378bf777d7c140f9d625",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 22.5,
"alnum_prop": 0.8111111111111111,
"repo_name": "fish-bundles/fb",
"id": "4f3d581f3aca633293ede19a5ab79bfd2349beef",
"size": "352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17468"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='cloudify-riemann-controller-plugin',
version='3.2',
author='Gigaspaces',
author_email='cosmo-admin@gigaspaces.com',
packages=['riemann_controller',
'riemann_controller.resources'],
package_data={'riemann_controller.resources': [
'manager.config',
'deployment.config.template'
]},
license='LICENSE',
description='Plugin for creating riemann configuration'
' based on blueprint policies and starting '
' a riemann core with generated configuration',
install_requires=[
'cloudify-plugins-common==3.2',
'jinja2==2.7.2'
],
)
|
{
"content_hash": "5c42e7e7e343471ebd8e2783bf1f5347",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 63,
"avg_line_length": 29.82608695652174,
"alnum_prop": 0.6282798833819242,
"repo_name": "konradxyz/cloudify-manager",
"id": "37e6b40a1e1980c5cf60de8ca06c9d5a2263a96c",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/riemann-controller/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "3344"
},
{
"name": "Python",
"bytes": "822901"
},
{
"name": "Shell",
"bytes": "16706"
}
],
"symlink_target": ""
}
|
from google.appengine.ext import ndb
class User(ndb.Model):
username = ndb.StringProperty()
interests = ndb.StringProperty(repeated=True)
email = ndb.StringProperty()
common = ndb.IntegerProperty()
favorites = ndb.KeyProperty(repeated=True)
# image = ndb.StringProperty()
|
{
"content_hash": "4d3e00697af564640b0c4c8b37f08304",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 49,
"avg_line_length": 33,
"alnum_prop": 0.7205387205387206,
"repo_name": "CSSIMusicApp/Apollu",
"id": "547784d2103e9231f755db991884683c9b61ccc4",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cssimusicapp/Models/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10525"
},
{
"name": "HTML",
"bytes": "15969"
},
{
"name": "JavaScript",
"bytes": "885"
},
{
"name": "Python",
"bytes": "22244"
}
],
"symlink_target": ""
}
|
"""
S.H.I.V.A. - Social network History & Information Vault & Analyser
Application entry point
"""
import jinja2
import cherrypy
import platform
from pymongo import MongoClient
from pymongo import ReadPreference
from engine.tools import IgnoreRequestFilter
from engine.tools import secureheaders
cherrypy.tools.secureheaders = cherrypy.Tool(
"before_finalize", secureheaders, priority=60)
from engine.tools import HazelcastSession
cherrypy.lib.sessions.HazelcastSession = HazelcastSession
from engine.modules.auth import Auth
cherrypy.tools.check_login = cherrypy.Tool("before_handler", Auth.check_login)
from engine.modules.heartbeat import Heartbeat
from engine.modules.notes import Notes
from engine.modules.vk import VK
class Application(object):
""" Main application class """
def __init__(self, template_engine, modules):
self.template_engine = template_engine
self.module_list = list()
for module in modules:
setattr(self, module, modules[module])
if modules[module].MODULE_NAME is not None:
item = dict()
item["path"] = module
item["name"] = modules[module].MODULE_NAME
item["instance"] = modules[module]
self.module_list.append(item)
@cherrypy.expose
@cherrypy.tools.check_login()
def index(self):
""" Index """
return self.template_engine.get_template(
"index.html"
).render(
user=cherrypy.session.get("login", None),
generator=platform.node(),
modules=self.module_list
)
def main():
""" Main (entry point) """
template_engine = jinja2.Environment(loader=jinja2.FileSystemLoader(
"/usr/src/app/template"))
mongo = MongoClient(
["mongo1", "mongo2", "mongo3"],
replicaSet="rs0",
read_preference=ReadPreference.PRIMARY_PREFERRED,
readConcernLevel="majority",
w=2, wtimeout=3000, j=True
)
modules = {
"heartbeat": Heartbeat(),
"auth": Auth(template_engine, mongo),
"notes": Notes(template_engine, mongo),
"vk": VK(template_engine, mongo)
}
config = "S.H.I.V.A..conf"
cherrypy.config.update(config)
application = cherrypy.tree.mount(
Application(template_engine, modules),
"/",
config
)
application.log.access_log.addFilter(
IgnoreRequestFilter("GET /heartbeat/index"))
cherrypy.engine.signals.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == "__main__":
main()
|
{
"content_hash": "90349103a8b60576907b37d051858c79",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.6417910447761194,
"repo_name": "LifeDJIK/S.H.I.V.A.",
"id": "cda6dc09f3fb95f68c6b0e7dcebeb48ed5cadf5d",
"size": "2694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "containers/shiva/S.H.I.V.A..py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4401"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "Python",
"bytes": "19571"
}
],
"symlink_target": ""
}
|
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return Context(environment, parent, template_name, blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
self._length = len(iterable) + self.index0 + 1
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
def __repr__(self):
return 'Undefined'
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
|
{
"content_hash": "f31284d2b9c07441ede85161dddbd8e4",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 79,
"avg_line_length": 33.66896551724138,
"alnum_prop": 0.570565342072921,
"repo_name": "atupal/ffbird",
"id": "efd67e1a2b916e4352ffc36ebea510d8e5641d0d",
"size": "19552",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "jinja2/runtime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7178"
},
{
"name": "JavaScript",
"bytes": "61036"
},
{
"name": "Python",
"bytes": "555889"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.