text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = [
'ManySlicesAlongPoints',
'ManySlicesAlongAxis',
'SlideSliceAlongPoints',
'SliceThroughTime',
]
__displayname__ = 'Slicing'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import _helpers
from ..base import FilterBase
class _SliceBase(FilterBase):
"""A helper class for making slicing fileters
Note:
* Make sure the input data source is slice-able.
* The SciPy module is required for this filter.
"""
__displayname__ = 'Base Slicing Filter'
__category__ = 'filter'
def __init__(
self,
n_slices=5,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkUnstructuredGrid',
):
FilterBase.__init__(
self,
nInputPorts=nInputPorts,
inputType=inputType,
nOutputPorts=nOutputPorts,
outputType=outputType,
)
# Parameters
self.__n_slices = n_slices
@staticmethod
def _generate_plane(origin, normal):
"""Internal helper to build a ``vtkPlane`` for the cutter"""
# Get the slicing Plane:
plane = vtk.vtkPlane() # Construct the plane object
# Set the origin... needs to be inside of the grid
plane.SetOrigin(origin[0], origin[1], origin[2])
# set normal of that plane so we look at XZ section
plane.SetNormal(normal)
return plane
@staticmethod
def _slice(pdi, pdo, plane):
"""Slice an input on a plane and produce the output"""
# create slice
cutter = vtk.vtkCutter() # Construct the cutter object
cutter.SetInputData(pdi) # Use the grid as the data we desire to cut
cutter.SetCutFunction(plane) # the the cutter to use the plane we made
cutter.Update() # Perfrom the Cut
slc = cutter.GetOutput() # grab the output
pdo.ShallowCopy(slc)
return pdo
def get_number_of_slices(self):
"""Retun the number of slices generated by this algorithm"""
return self.__n_slices
def set_number_of_slices(self, num):
"""Set the number of slices generated by this algorithm"""
if self.__n_slices != num:
self.__n_slices = num
self.Modified()
###############################################################################
class ManySlicesAlongPoints(_SliceBase):
"""Takes a series of points and a data source to be sliced. The points are
used to construct a path through the data source and a slice is added at
intervals of that path along the vector of that path at that point. This
constructs many slices through the input dataset as a merged
``vtkMultiBlockDataSet``.
Note:
* Make sure the input data source is slice-able.
* The SciPy module is required for this filter.
"""
__displayname__ = 'Many Slices Along Points'
__category__ = 'filter'
def __init__(self, n_slices=5, nearest_nbr=True, outputType='vtkMultiBlockDataSet'):
_SliceBase.__init__(
self,
n_slices=n_slices,
nInputPorts=2,
inputType='vtkDataSet',
nOutputPorts=1,
outputType=outputType,
)
self.__useNearestNbr = nearest_nbr
# CRITICAL for multiple input ports
def FillInputPortInformation(self, port, info):
"""This simply makes sure the user selects the correct inputs"""
typ = 'vtkDataSet'
if port == 0:
typ = 'vtkPolyData' # Make sure points are poly data
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), typ)
return 1
def _get_planes(self, pts):
"""Internal helper to generate planes for the slices"""
try:
# sklearn's KDTree is faster: use it if available
from sklearn.neighbors import KDTree as Tree
except ImportError:
from scipy.spatial import cKDTree as Tree
if self.get_number_of_slices() == 0:
return []
# Get the Points over the NumPy interface
wpdi = dsa.WrapDataObject(pts) # NumPy wrapped points
points = np.array(
wpdi.Points
) # New NumPy array of points so we dont destroy input
numPoints = pts.GetNumberOfPoints()
if self.__useNearestNbr:
tree = Tree(points)
ptsi = tree.query([points[0]], k=numPoints)[1].ravel()
else:
ptsi = [i for i in range(numPoints)]
# Iterate of points in order (skips last point):
planes = []
for i in range(0, numPoints - 1, numPoints // self.get_number_of_slices()):
# get normal
pts1 = points[ptsi[i]]
pts2 = points[ptsi[i + 1]]
x1, y1, z1 = pts1[0], pts1[1], pts1[2]
x2, y2, z2 = pts2[0], pts2[1], pts2[2]
normal = [x2 - x1, y2 - y1, z2 - z1]
# create plane
plane = self._generate_plane([x1, y1, z1], normal)
planes.append(plane)
return planes
def _get_slice(self, pts, data, planes, output):
"""Internal helper to perfrom the filter"""
# numPoints = pts.GetNumberOfPoints()
# Set number of blocks based on user choice in the selction
output.SetNumberOfBlocks(self.get_number_of_slices())
blk = 0
for i, plane in enumerate(planes):
temp = vtk.vtkPolyData()
self._slice(data, temp, plane)
output.SetBlock(blk, temp)
output.GetMetaData(blk).Set(
vtk.vtkCompositeDataSet.NAME(), 'Slice%.2d' % blk
)
blk += 1
return output
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pts = self.GetInputData(inInfo, 0, 0) # Port 0: points
data = self.GetInputData(inInfo, 1, 0) # Port 1: sliceable data
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
# Perfrom task
planes = self._get_planes(pts)
self._get_slice(pts, data, planes, output)
return 1
#### Getters / Setters ####
def set_use_nearest_nbr(self, flag):
"""Set a flag on whether to use SciPy's nearest neighbor approximation
when generating the slicing path
"""
if self.__useNearestNbr != flag:
self.__useNearestNbr = flag
self.Modified()
def apply(self, points, data):
"""Run the algorithm along some points for the given input data"""
self.SetInputDataObject(0, points)
self.SetInputDataObject(1, data)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class SlideSliceAlongPoints(ManySlicesAlongPoints):
"""Takes a series of points and a data source to be sliced. The points are
used to construct a path through the data source and a slice is added at
specified locations along that path along the vector of that path at that
point. This constructs one slice through the input dataset which the user
can translate via a slider bar in ParaView.
Note:
* Make sure the input data source is slice-able.
* The SciPy module is required for this filter.
"""
__displayname__ = 'Slide Slice Along Points'
__category__ = 'filter'
def __init__(self, n_slices=5, nearest_nbr=True):
ManySlicesAlongPoints.__init__(self, outputType='vtkPolyData')
self.__planes = None
self.__loc = 50 # Percent (halfway)
def _get_slice(self, pts, data, planes, output):
"""Internal helper to perfrom the filter"""
if not isinstance(planes, vtk.vtkPlane):
raise _helpers.PVGeoError('``_get_slice`` can only handle one plane.')
# numPoints = pts.GetNumberOfPoints()
# Set number of blocks based on user choice in the selction
self._slice(data, output, planes)
return output
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pts = self.GetInputData(inInfo, 0, 0) # Port 0: points
data = self.GetInputData(inInfo, 1, 0) # Port 1: sliceable data
output = vtk.vtkPolyData.GetData(outInfo, 0)
# Perfrom task
if self.__planes is None or len(self.__planes) < 1:
self.set_number_of_slices(pts.GetNumberOfPoints())
self.__planes = self._get_planes(pts)
idx = int(np.floor(pts.GetNumberOfPoints() * float(self.__loc / 100.0)))
self._get_slice(pts, data, self.__planes[idx], output)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to prepare output"""
pts = self.GetInputData(inInfo, 0, 0) # Port 0: points
self.set_number_of_slices(pts.GetNumberOfPoints())
self.__planes = self._get_planes(pts)
return 1
def set_location(self, loc):
"""Set the location along the input line for the slice location as a
percent (0, 99)."""
if loc > 99 or loc < 0:
raise _helpers.PVGeoError(
'Location must be given as a percentage along input path.'
)
if self.__loc != loc:
self.__loc = loc
self.Modified()
def get_location(self):
"""Return the current location along the input line for the slice"""
return self.__loc
###############################################################################
class ManySlicesAlongAxis(_SliceBase):
"""Slices a ``vtkDataSet`` along a given axis many times.
This produces a specified number of slices at once each with a normal vector
oriented along the axis of choice and spaced uniformly through the range of
the dataset on the chosen axis.
Args:
pad (float): Padding as a percentage (0.0, 1.0)
"""
__displayname__ = 'Many Slices Along Axis'
__category__ = 'filter'
def __init__(
self, n_slices=5, axis=0, rng=None, pad=0.01, outputType='vtkMultiBlockDataSet'
):
_SliceBase.__init__(
self,
n_slices=n_slices,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType=outputType,
)
# Parameters
self.__axis = axis
self.__rng = rng
self.__pad = pad
def _get_origin(self, pdi, idx):
"""Internal helper to get plane origin"""
og = list(self.get_input_center(pdi))
og[self.__axis] = self.__rng[idx]
return og
def get_input_bounds(self, pdi):
"""Gets the bounds of the input data set on the set slicing axis."""
bounds = pdi.GetBounds()
return bounds[self.__axis * 2], bounds[self.__axis * 2 + 1]
@staticmethod
def get_input_center(pdi):
"""Gets the center of the input data set
Return:
tuple: the XYZ coordinates of the center of the data set.
"""
bounds = pdi.GetBounds()
x = (bounds[1] + bounds[0]) / 2
y = (bounds[3] + bounds[2]) / 2
z = (bounds[5] + bounds[4]) / 2
return (x, y, z)
def get_normal(self):
"""Get the normal of the slicing plane"""
norm = [0, 0, 0]
norm[self.__axis] = 1
return norm
def _set_axial_range(self, pdi):
"""Internal helper to set the slicing range along the set axis"""
bounds = self.get_input_bounds(pdi)
padding = (bounds[1] - bounds[0]) * self.__pad # get percent padding
self.__rng = np.linspace(
bounds[0] + padding, bounds[1] - padding, num=self.get_number_of_slices()
)
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
# Get output:
# output = self.GetOutputData(outInfo, 0)
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
self._set_axial_range(pdi)
normal = self.get_normal()
# Perfrom task
# Set number of blocks based on user choice in the selction
output.SetNumberOfBlocks(self.get_number_of_slices())
blk = 0
for i in range(self.get_number_of_slices()):
temp = vtk.vtkPolyData()
origin = self._get_origin(pdi, i)
plane = self._generate_plane(origin, normal)
# Perfrom slice for that index
self._slice(pdi, temp, plane)
output.SetBlock(blk, temp)
output.GetMetaData(blk).Set(vtk.vtkCompositeDataSet.NAME(), 'Slice%.2d' % i)
blk += 1
return 1
#### Getters / Setters ####
def set_axis(self, axis):
"""Set the axis on which to slice
Args:
axis (int): the axial index (0, 1, 2) = (x, y, z)
"""
if axis not in (0, 1, 2):
raise _helpers.PVGeoError('Axis choice must be 0, 1, or 2 (x, y, or z)')
if self.__axis != axis:
self.__axis = axis
self.Modified()
def get_range(self):
"""Get the slicing range for the set axis"""
return self.__rng
def get_axis(self):
"""Get the set axis to slice upon as int index (0,1,2)"""
return self.__axis
def set_padding(self, pad):
"""Set the percent padding for the slices on the edges"""
if self.__pad != pad:
self.__pad = pad
self.Modified()
###############################################################################
class SliceThroughTime(ManySlicesAlongAxis):
"""Takes a sliceable ``vtkDataSet`` and progresses a slice of it along a
given axis. The macro requires that the clip already exist in the pipeline.
This is especially useful if you have many clips linked together as all will
move through the seen as a result of this macro.
"""
__displayname__ = 'Slice Through Time'
__category__ = 'filter'
def __init__(
self,
n_slices=5,
dt=1.0,
axis=0,
rng=None,
):
ManySlicesAlongAxis.__init__(
self, n_slices=n_slices, axis=axis, rng=rng, outputType='vtkPolyData'
)
# Parameters
self.__dt = dt
self.__timesteps = None
def _update_time_steps(self):
"""For internal use only"""
self.__timesteps = _helpers.update_time_steps(
self, self.get_number_of_slices(), self.__dt
)
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
self._set_axial_range(pdi)
i = _helpers.get_requested_time(self, outInfo)
# Perfrom task
normal = self.get_normal()
origin = self._get_origin(pdi, i)
plane = self._generate_plane(origin, normal)
self._slice(pdi, pdo, plane)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set the time information"""
# register time:
self._update_time_steps()
return 1
#### Public Getters / Setters ####
def set_number_of_slices(self, num):
"""Set the number of slices/timesteps to generate"""
ManySlicesAlongAxis.set_number_of_slices(self, num)
self._update_time_steps()
self.Modified()
def set_time_delta(self, dt):
"""
Set the time step interval in seconds
"""
if self.__dt != dt:
self.__dt = dt
self._update_time_steps()
self.Modified()
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps"""
return self.__timesteps.tolist() if self.__timesteps is not None else None
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/filters/slicing.py",
"copies": "1",
"size": "16114",
"license": "bsd-3-clause",
"hash": 5952381445426068000,
"line_mean": 33.285106383,
"line_max": 88,
"alpha_frac": 0.5752761574,
"autogenerated": false,
"ratio": 3.9621342512908777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5037410408690878,
"avg_score": null,
"num_lines": null
} |
__all__ = (
"MDRenderer",
"LOGGER",
"RenderTreeNode",
"DEFAULT_RENDERERS",
"RenderContext",
"WRAP_POINT",
)
import logging
from types import MappingProxyType
from typing import Any, Dict, Mapping, MutableMapping, Sequence, Tuple
from markdown_it.common.normalize_url import unescape_string
from markdown_it.token import Token
from mdformat.renderer._context import DEFAULT_RENDERERS, WRAP_POINT, RenderContext
from mdformat.renderer._tree import RenderTreeNode
from mdformat.renderer.typing import Postprocess
LOGGER = logging.getLogger(__name__)
class MDRenderer:
"""Markdown renderer.
A renderer class that outputs formatted Markdown. Compatible with
`markdown_it.MarkdownIt`.
"""
__output__ = "md"
def __init__(self, parser: Any = None):
"""__init__ must have `parser` parameter for markdown-it-py
compatibility."""
def render(
self,
tokens: Sequence[Token],
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
"""Takes token stream and generates Markdown.
Args:
tokens: A sequence of block tokens to render
options: Params of parser instance
env: Additional data from parsed input
finalize: write references and add trailing newline
"""
tree = RenderTreeNode(tokens)
return self.render_tree(tree, options, env, finalize=finalize)
def render_tree(
self,
tree: "RenderTreeNode",
options: Mapping[str, Any],
env: MutableMapping,
*,
finalize: bool = True,
) -> str:
self._prepare_env(env)
# Update RENDERER_MAP defaults with renderer functions defined
# by plugins.
updated_renderers = {}
postprocessors: Dict[str, Tuple[Postprocess, ...]] = {}
for plugin in options.get("parser_extension", []):
for syntax_name, renderer_func in plugin.RENDERERS.items():
if syntax_name in updated_renderers:
LOGGER.warning(
"Plugin conflict. More than one plugin defined a renderer"
f' for "{syntax_name}" syntax.'
)
else:
updated_renderers[syntax_name] = renderer_func
for syntax_name, pp in getattr(plugin, "POSTPROCESSORS", {}).items():
if syntax_name not in postprocessors:
postprocessors[syntax_name] = (pp,)
else:
postprocessors[syntax_name] += (pp,)
renderer_map = MappingProxyType({**DEFAULT_RENDERERS, **updated_renderers})
postprocessor_map = MappingProxyType(postprocessors)
render_context = RenderContext(renderer_map, postprocessor_map, options, env)
text = tree.render(render_context)
if finalize:
if env["used_refs"]:
text += "\n\n"
text += self._write_references(env)
if text:
text += "\n"
assert "\x00" not in text, "null bytes should be removed by now"
return text
@staticmethod
def _write_references(env: MutableMapping) -> str:
ref_list = []
for label in sorted(env["used_refs"]):
ref = env["references"][label]
destination = ref["href"] if ref["href"] else "<>"
destination = unescape_string(destination)
item = f"[{label.lower()}]: {destination}"
title = ref["title"]
if title:
title = title.replace('"', '\\"')
item += f' "{title}"'
ref_list.append(item)
return "\n".join(ref_list)
def _prepare_env(self, env: MutableMapping) -> None:
env["indent_width"] = 0
env["used_refs"] = set()
| {
"repo_name": "executablebooks/mdformat",
"path": "mdformat/renderer/__init__.py",
"copies": "1",
"size": "3886",
"license": "mit",
"hash": -662770030172442800,
"line_mean": 32.5,
"line_max": 85,
"alpha_frac": 0.5748841997,
"autogenerated": false,
"ratio": 4.436073059360731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5510957259060731,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Measurement',
'make_counter',
'make_rater',
'make_timer',
]
import collections
import functools
import time
Measurement = collections.namedtuple('Measurement', 'when value duration')
def make_counter(metry, name):
return functools.partial(count, metry.measure, name)
def make_rater(metry, name):
return functools.partial(rate, metry.measure, name)
def make_timer(metry, name):
return Timer(metry.measure, name)
def count(measure, name, value=1):
measure(name, Measurement(time.time(), value, None))
def rate(measure, name, value):
return MeasureContext(measure, name, value)
class Timer:
def __init__(self, measure, name):
self.measure = measure
self.name = name
def __call__(self, func):
@functools.wraps(func)
def timed_func(*args, **kwargs):
with self.time():
return func(*args, **kwargs)
return timed_func
def time(self):
return MeasureContext(self.measure, self.name, None)
class MeasureContext:
def __init__(self, measure, measure_name, value):
self.measure = measure
self.measure_name = measure_name
self.value = value
self._time = None
self._start = None
def __enter__(self):
self.start()
return self
def __exit__(self, *_):
self.stop()
def start(self):
self._time = time.time()
self._start = time.perf_counter()
def stop(self):
if self._start is None:
return
elapsed = time.perf_counter() - self._start
measurement = Measurement(self._time, self.value, elapsed)
self.measure(self.measure_name, measurement)
self._start = None # Disable context.
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/metry/measures.py",
"copies": "1",
"size": "1769",
"license": "mit",
"hash": 5084450176349412000,
"line_mean": 21.3924050633,
"line_max": 74,
"alpha_frac": 0.6082532504,
"autogenerated": false,
"ratio": 3.8456521739130434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4953905424313043,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'MessageBuffer',
'Message',
'Socket',
'BindEndpoint',
'ConnectEndpoint',
'device',
'terminate',
# Extend with constants and errors.
]
import ctypes
import enum
from collections import OrderedDict
from functools import partial
from . import _nanomsg as _nn
from . import constants
from . import errors
from .constants import *
from .errors import *
__all__.extend(constants.__all__)
__all__.extend(errors.__all__)
errors.asserts(
len(set(__all__)) == len(__all__),
'expect no variable name conflict: %r', __all__,
)
_PyBUF_READ = 0x100
_PyBUF_WRITE = 0x200
_PyMemoryView_FromMemory = ctypes.pythonapi.PyMemoryView_FromMemory
_PyMemoryView_FromMemory.argtypes = [
ctypes.c_void_p,
ctypes.c_ssize_t,
ctypes.c_int,
]
_PyMemoryView_FromMemory.restype = ctypes.py_object
class MessageBuffer:
"""Abstraction on top of nn_allocmsg, etc."""
def __init__(self, size, allocation_type=0, *, buffer=None):
if buffer is not None:
self.buffer, self.size = buffer, size
else:
self.buffer, self.size = None, 0
self.buffer = _nn.nn_allocmsg(size, allocation_type)
if self.buffer is None:
raise NanomsgError.make(_nn.nn_errno())
self.size = size
def __repr__(self):
if self.buffer is None:
ptr, size = 0, 0
else:
ptr, size = self.buffer.value, self.size
return ('<%s addr 0x%016x, size 0x%x>' %
(self.__class__.__name__, ptr, size))
def __enter__(self):
return self
def __exit__(self, *_):
self.free()
def __del__(self):
# Don't call super's __del__ since `object` doesn't have one.
self.free()
def as_memoryview(self):
errors.asserts(self.buffer is not None, 'expect non-None buffer')
return _PyMemoryView_FromMemory(
self.buffer,
self.size,
_PyBUF_READ | _PyBUF_WRITE,
)
def resize(self, size):
errors.asserts(self.buffer is not None, 'expect non-None buffer')
buffer = _nn.nn_reallocmsg(self.buffer, size)
if buffer is None:
raise NanomsgError.make(_nn.nn_errno())
self.buffer, self.size = buffer, size
def disown(self):
buffer, self.buffer, self.size = self.buffer, None, 0
return buffer
def free(self):
if self.buffer is None:
return
errors.check(_nn.nn_freemsg(self.buffer))
# It disowns the buffer only after nn_freemsg succeeds, but
# honestly, if it can't free the buffer, I am not sure what's
# the purpose to keep owning it (maybe for debugging?).
self.disown()
class Message:
"""Abstraction on top of nn_msghdr.
It manages two nanomsg structures: control and message; each
structure stored in of the three states:
* NULL: This is the initial state. One special thing of this state
is that if the structure is passed to nn_recvmsg, nn_recvmsg will
allocate memory space for the structure (thus changing it to OWNER
state).
* OWNER: The space of the structure is allocated by nn_allocmsg, and
is owned will be released by Message.
* BORROWER.
"""
class ResourceState(enum.Enum):
NULL = 'NULL'
OWNER = 'OWNER'
BORROWER = 'BORROWER'
def __init__(self):
self._message_header = _nn.nn_msghdr()
ctypes.memset(
ctypes.addressof(self._message_header),
0x0,
ctypes.sizeof(self._message_header),
)
self._io_vector = _nn.nn_iovec()
self._message_header.msg_iov = ctypes.pointer(self._io_vector)
self._message_header.msg_iovlen = 1
self._control = ctypes.c_void_p()
self._message_header.msg_control = ctypes.addressof(self._control)
self._message_header.msg_controllen = NN_MSG
self._message = ctypes.c_void_p()
self.size = 0
self._io_vector.iov_base = ctypes.addressof(self._message)
self._io_vector.iov_len = NN_MSG
@property
def _control_state(self):
if self._control is None:
return Message.ResourceState.BORROWER
elif self._control:
return Message.ResourceState.OWNER
else:
return Message.ResourceState.NULL
@property
def _message_state(self):
if self._message is None:
return Message.ResourceState.BORROWER
elif self._message:
return Message.ResourceState.OWNER
else:
return Message.ResourceState.NULL
def __repr__(self):
cstate = self._control_state
if cstate is Message.ResourceState.BORROWER:
cptr = self._message_header.msg_control or 0
else:
cptr = self._control.value or 0
mstate = self._message_state
if mstate is Message.ResourceState.BORROWER:
mptr = self._io_vector.iov_base or 0
else:
mptr = self._message.value or 0
return '<%s control %s 0x%016x, message %s 0x%016x, size 0x%x>' % (
self.__class__.__name__,
cstate.name, cptr,
mstate.name, mptr, self.size,
)
def __enter__(self):
return self
def __exit__(self, *_):
self.free()
def __del__(self):
# Don't call super's __del__ since `object` doesn't have one.
self.free()
@property
def nn_msghdr(self):
return self._message_header
def as_memoryview(self):
if self._message_state is Message.ResourceState.BORROWER:
mptr = self._io_vector.iov_base
else:
mptr = self._message
return _PyMemoryView_FromMemory(
mptr,
self.size,
_PyBUF_READ | _PyBUF_WRITE,
)
@staticmethod
def _addr_to_ptr(addr):
# This is idempotent (so it is safe even if addr is c_void_p).
return ctypes.cast(addr, ctypes.c_void_p)
@staticmethod
def _bytes_to_ptr(bytes_):
return ctypes.cast(ctypes.c_char_p(bytes_), ctypes.c_void_p)
def adopt_control(self, control, owner):
self._free_control()
if owner:
self._control = self._addr_to_ptr(control)
self._message_header.msg_control = ctypes.addressof(self._control)
self._message_header.msg_controllen = NN_MSG
else:
self._control = None
self._message_header.msg_control = self._bytes_to_ptr(control)
self._message_header.msg_controllen = len(control)
def adopt_message(self, message, size, owner):
self._free_message()
if owner:
self._message = self._addr_to_ptr(message)
self.size = size
self._io_vector.iov_base = ctypes.addressof(self._message)
self._io_vector.iov_len = NN_MSG
else:
self._message = None
self.size = size
self._io_vector.iov_base = self._bytes_to_ptr(message)
self._io_vector.iov_len = size
def _free_control(self):
rc = 0
if self._control_state is Message.ResourceState.OWNER:
rc = _nn.nn_freemsg(self._control)
self.disown_control()
errors.check(rc)
def _free_message(self):
rc = 0
if self._message_state is Message.ResourceState.OWNER:
rc = _nn.nn_freemsg(self._message)
self.disown_message()
errors.check(rc)
def disown_control(self):
state = self._control_state
control, self._control = self._control, ctypes.c_void_p()
self._message_header.msg_control = ctypes.addressof(self._control)
self._message_header.msg_controllen = NN_MSG
return control, state is Message.ResourceState.OWNER
def disown_message(self):
state = self._message_state
message, self._message = self._message, ctypes.c_void_p()
size, self.size = self.size, 0
self._io_vector.iov_base = ctypes.addressof(self._message)
self._io_vector.iov_len = NN_MSG
return message, size, state is Message.ResourceState.OWNER
def disown(self):
return self.disown_control(), self.disown_message()
def free(self):
try:
self._free_control()
finally:
self._free_message()
class SocketBase:
def __init__(self, *, domain=AF_SP, protocol=None, socket_fd=None):
# Set fd to None as a safety measure in case subclass's __init__
# raises exception since __del__ need at least self.fd.
self.fd = None
errors.asserts(
(protocol is None) != (socket_fd is None),
'expect either protocol or socket_fd is set: %r, %r',
protocol, socket_fd,
)
if protocol is not None:
self.fd = errors.check(_nn.nn_socket(domain, protocol))
else:
assert socket_fd is not None
self.fd = socket_fd
# Keep a strong reference to endpoint objects to prevent them
# from being released because users are not expected to keep a
# reference to these endpoint objects, i.e., users usually treat
# bind() and connect() as a void function.
self.endpoints = OrderedDict()
# Make a separate namespace for some of the options (don't
# clutter up this namespace).
self.options = OptionsProxy(self)
def __repr__(self):
binds = []
connects = []
for endpoint in self.endpoints.values():
if isinstance(endpoint, BindEndpoint):
binds.append(endpoint.address)
elif isinstance(endpoint, ConnectEndpoint):
connects.append(endpoint.address)
else:
raise AssertionError
return ('<%s fd %r, listen on %r, connect to %r>' %
(self.__class__.__name__, self.fd, binds, connects))
#
# Manage socket life cycle.
#
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def __del__(self):
# Don't call super's __del__ since `object` doesn't have one.
self.close()
def close(self):
if self.fd is None:
return
try:
errors.check(_nn.nn_close(self.fd))
except EBADF:
# Suppress EBADF because socket might have been closed
# through some other means, like nn_term.
pass
else:
self.fd = None
self.endpoints.clear()
#
# Socket options.
#
def getsockopt(self, level, option, option_size=64):
errors.asserts(self.fd is not None, 'expect socket.fd')
option_type = option.value.type
if option_type is OptionType.NN_TYPE_INT:
optval = ctypes.byref(ctypes.c_int())
optvallen = ctypes.sizeof(ctypes.c_int)
elif option_type is OptionType.NN_TYPE_STR:
optval = ctypes.create_string_buffer(option_size)
optvallen = len(optval)
else:
raise AssertionError
optvallen = ctypes.byref(ctypes.c_size_t(optvallen))
errors.check(_nn.nn_getsockopt(
self.fd, level, option.value, optval, optvallen))
if option_type is OptionType.NN_TYPE_INT:
value = optval._obj.value
elif option_type is OptionType.NN_TYPE_STR:
size = optvallen._obj.value
value = optval.raw[:size].decode('ascii')
else:
raise AssertionError
if option.value.unit is OptionUnit.NN_UNIT_BOOLEAN:
value = (False, True)[value]
return value
def setsockopt(self, level, option, value):
errors.asserts(self.fd is not None, 'expect socket.fd')
option_type = option.value.type
if isinstance(value, bool):
if option_type is not OptionType.NN_TYPE_INT:
raise ValueError('option %s is not int-typed' % option.name)
optval = ctypes.byref(ctypes.c_int(int(value)))
optvallen = ctypes.sizeof(ctypes.c_int)
elif isinstance(value, int):
if option_type is not OptionType.NN_TYPE_INT:
raise ValueError('option %s is not int-typed' % option.name)
optval = ctypes.byref(ctypes.c_int(value))
optvallen = ctypes.sizeof(ctypes.c_int)
elif isinstance(value, str):
if option_type is not OptionType.NN_TYPE_STR:
raise ValueError('option %s is not str-typed' % option.name)
optval = value.encode('ascii')
optvallen = len(optval)
elif isinstance(value, bytes):
if option_type is not OptionType.NN_TYPE_STR:
raise ValueError('option %s is not str-typed' % option.name)
optval = value
optvallen = len(optval)
else:
raise ValueError('unsupported type: {!r}'.format(value))
errors.check(_nn.nn_setsockopt(
self.fd, level, option.value, optval, optvallen))
#
# Endpoints.
#
def bind(self, address):
errors.asserts(self.fd is not None, 'expect socket.fd')
return self.__make_endpoint(address, BindEndpoint, _nn.nn_bind)
def connect(self, address):
errors.asserts(self.fd is not None, 'expect socket.fd')
return self.__make_endpoint(address, ConnectEndpoint, _nn.nn_connect)
def __make_endpoint(self, address, ep_class, ep_make):
if isinstance(address, str):
address_bytes = address.encode('ascii')
else:
address_bytes = address
endpoint_id = errors.check(ep_make(self.fd, address_bytes))
endpoint = ep_class(self, endpoint_id, address)
self.endpoints[endpoint_id] = endpoint
return endpoint
#
# Private data transmission methods that sub-classes may call.
#
def _send(self, message, size, flags):
errors.asserts(self.fd is not None, 'expect socket.fd')
errors.asserts(
not isinstance(message, Message),
'send does not accept Message',
)
if isinstance(message, MessageBuffer):
errors.asserts(size is None, 'expect size is None')
size = NN_MSG
nbytes = errors.check(
_nn.nn_send(self.fd, message.buffer, size, flags))
message.disown()
else:
if size is None:
size = len(message)
nbytes = errors.check(
_nn.nn_send(self.fd, message, size, flags))
errors.asserts(
size in (NN_MSG, nbytes),
'expect sending %d bytes, not %d' % (size, nbytes),
)
return nbytes
def _recv(self, message, size, flags):
errors.asserts(self.fd is not None, 'expect socket.fd')
errors.asserts(
not isinstance(message, Message),
'recv does not accept Message',
)
if message is None:
buffer = ctypes.c_void_p()
nbytes = errors.check(
_nn.nn_recv(self.fd, ctypes.byref(buffer), NN_MSG, flags))
return MessageBuffer(buffer=buffer, size=nbytes)
else:
errors.asserts(size is not None, 'expect size')
return errors.check(
_nn.nn_recv(self.fd, message, size, flags))
def _sendmsg(self, message, flags):
errors.asserts(self.fd is not None, 'expect socket.fd')
errors.asserts(
isinstance(message, Message),
'sendmsg only accepts Message',
)
nbytes = errors.check(_nn.nn_sendmsg(
self.fd,
ctypes.byref(message.nn_msghdr),
flags,
))
size = message.size # Copy size before disown clears it.
message.disown()
errors.asserts(
size == nbytes,
'expect sending %d bytes, not %d' % (size, nbytes),
)
return nbytes
def _recvmsg(self, message, flags):
errors.asserts(self.fd is not None, 'expect socket.fd')
errors.asserts(
message is None or isinstance(message, Message),
'recvmsg only accepts Message',
)
if message is None:
message = Message()
return_message = True
else:
return_message = False
nbytes = errors.check(_nn.nn_recvmsg(
self.fd,
ctypes.byref(message.nn_msghdr),
flags | NN_DONTWAIT,
))
message.size = nbytes
if return_message:
return message
else:
return nbytes
class Socket(SocketBase):
def send(self, message, size=None, flags=0):
return self._send(message, size, flags)
def recv(self, message=None, size=None, flags=0):
return self._recv(message, size, flags)
def sendmsg(self, message, flags=0):
return self._sendmsg(message, flags)
def recvmsg(self, message=None, flags=0):
return self._recvmsg(message, flags)
class EndpointBase:
def __init__(self, socket, endpoint_id, address):
self.socket = socket
self.endpoint_id = endpoint_id
self.address = address
def __repr__(self):
return ('<%s socket %r, id %d, address %r>' %
(self.__class__.__name__,
self.socket.fd, self.endpoint_id, self.address))
def __enter__(self):
return self
def __exit__(self, *_):
self.shutdown()
async def __aenter__(self):
return self.__enter__()
async def __aexit__(self, *exc_info):
return self.__exit__(*exc_info) # XXX: Would this block?
def shutdown(self):
if self.socket.fd is None:
self.endpoint_id = None
return
if self.endpoint_id is None:
return
endpoint_id, self.endpoint_id = self.endpoint_id, None
errors.check(_nn.nn_shutdown(self.socket.fd, endpoint_id))
self.socket.endpoints.pop(endpoint_id)
class BindEndpoint(EndpointBase):
pass
class ConnectEndpoint(EndpointBase):
pass
class OptionsProxy:
def __init__(self, socket):
self.socket = socket
def _getopt(self, level, option):
return self.socket.getsockopt(level, option)
def _setopt(self, value, level, option):
self.socket.setsockopt(level, option, value)
def _make(getter, setter, varz):
# Because partialmethod doesn't work with property...
level_option_pairs = [
(NN_SUB, NN_SUB_SUBSCRIBE),
(NN_SUB, NN_SUB_UNSUBSCRIBE),
(NN_REQ, NN_REQ_RESEND_IVL),
(NN_SURVEYOR, NN_SURVEYOR_DEADLINE),
(NN_TCP, NN_TCP_NODELAY),
(NN_WS, NN_WS_MSG_TYPE),
]
level_option_pairs.extend(
(NN_SOL_SOCKET, option)
for option in SocketOption
)
readonly = {
NN_DOMAIN,
NN_PROTOCOL,
NN_SNDFD,
NN_RCVFD,
}
for level, option in level_option_pairs:
name = option.name.lower()
prop = property(partial(getter, level=level, option=option))
if option not in readonly:
prop = prop.setter(partial(setter, level=level, option=option))
varz[name] = prop
_make(_getopt, _setopt, locals())
del _make
def device(sock1, sock2=None):
fd1 = sock1.fd
fd2 = sock2.fd if sock2 is not None else -1
errors.check(_nn.nn_device(fd1, fd2))
def terminate():
_nn.nn_term()
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/__init__.py",
"copies": "1",
"size": "19665",
"license": "mit",
"hash": 4961633630013615000,
"line_mean": 29.4411764706,
"line_max": 79,
"alpha_frac": 0.5745741165,
"autogenerated": false,
"ratio": 3.843072112565957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9917646229065957,
"avg_score": 0,
"num_lines": 646
} |
__all__ = [
'MessageBuilder',
'MessageReader',
'DynamicEnum',
'DynamicList',
'DynamicStruct',
'AnyPointer',
]
import collections
import collections.abc
import enum
import os
from . import bases
from . import io
from . import native
from .schemas import Schema
from .schemas import Type
class MessageBase:
def __init__(self, make_context, owned):
"""Construct a message.
`owned` is anything that you must retain through out the entire
message object life cycle to prevent it from being garbage
collected.
Basically, the life cycle should be: owned > context > resource.
"""
self._owned = owned
self._make_context = make_context
self._context = None
self._resource = None
def __enter__(self):
assert self._make_context is not None
assert self._resource is None
self._context = self._make_context()
self._make_context = None # _make_context is one-time use only.
self._resource = self._context.__enter__()
return self
def __exit__(self, *args):
self._resource = None
self._context, context = None, self._context
ret = context.__exit__(*args)
# You may release owned object after context is released.
self._owned = None
return ret
def open(self):
self.__enter__()
def close(self):
self.__exit__(None, None, None)
@property
def canonical(self):
assert self._resource is not None
return self._resource.isCanonical()
class MessageReader(MessageBase):
@classmethod
def from_bytes(cls, blob):
return cls(lambda: io.make_bytes_reader(blob), blob)
@classmethod
def from_packed_bytes(cls, blob):
return cls(lambda: io.make_packed_bytes_reader(blob), blob)
@classmethod
def from_file(cls, path):
return cls(lambda: io.make_file_reader(path), None)
@classmethod
def from_packed_file(cls, path):
return cls(lambda: io.make_packed_file_reader(path), None)
def get_root(self, schema):
assert self._resource is not None
assert schema.kind is Schema.Kind.STRUCT
return DynamicStruct(schema, self._resource.getRoot(schema._schema))
class MessageBuilder(MessageBase):
def __init__(self):
super().__init__(io.make_bytes_builder, None)
def init_root(self, schema):
assert self._resource is not None
assert schema.kind is Schema.Kind.STRUCT
message = self._resource.initRoot(schema._schema)
return DynamicStruct.Builder(schema, message)
def get_root(self, schema):
assert self._resource is not None
assert schema.kind is Schema.Kind.STRUCT
message = self._resource.getRoot(schema._schema)
return DynamicStruct.Builder(schema, message)
def to_bytes(self):
assert self._resource is not None
with io.make_bytes_writer() as writer:
native.writeMessage(writer, self._resource)
return writer.getArray()
def to_packed_bytes(self):
assert self._resource is not None
with io.make_bytes_writer() as writer:
native.writePackedMessage(writer, self._resource)
return writer.getArray()
def to_file(self, path, mode=0o664):
assert self._resource is not None
with io.open_fd(path, os.O_WRONLY | os.O_CREAT, mode) as fd:
native.writeMessageToFd(fd, self._resource)
def to_packed_file(self, path, mode=0o664):
assert self._resource is not None
with io.open_fd(path, os.O_WRONLY | os.O_CREAT, mode) as fd:
native.writePackedMessageToFd(fd, self._resource)
@staticmethod
def _get_fd(file_like):
try:
return file_like.fileno()
except OSError:
return None
def write_to(self, output):
assert self._resource is not None
fd = self._get_fd(output)
if fd is None:
output.write(self.to_bytes())
else:
native.writeMessageToFd(fd, self._resource)
def write_packed_to(self, output):
assert self._resource is not None
fd = self._get_fd(output)
if fd is None:
output.write(self.to_packed_bytes())
else:
native.writePackedMessageToFd(fd, self._resource)
class DynamicEnum:
@classmethod
def from_member(cls, schema, member):
assert schema.kind is Schema.Kind.ENUM
if isinstance(member, enum.Enum):
enumerant = schema.get_enumerant_from_ordinal(member.value)
elif isinstance(member, DynamicEnum):
assert member.schema is schema
enumerant = member.enumerant
else:
assert isinstance(member, int)
enumerant = schema.get_enumerant_from_ordinal(member)
if enumerant is None:
raise ValueError('%r is not a member of %s' % (member, schema))
return cls(schema, native.DynamicEnum(enumerant._enumerant))
def __init__(self, schema, enum_):
assert schema.kind is Schema.Kind.ENUM
assert schema.id == bases.get_schema_id(enum_.getSchema())
self.schema = schema
self._enum = enum_
enumerant = self._enum.getEnumerant()
if enumerant is None:
self.enumerant = None
else:
self.enumerant = self.schema[enumerant.getProto().getName()]
def get(self):
return self._enum.getRaw()
def __str__(self):
raw = self.get()
if raw == self.enumerant.ordinal:
return self.enumerant.name
else:
return str(raw)
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.schema == other.schema and
self.enumerant.ordinal == other.enumerant.ordinal
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.enumerant.ordinal)
class DynamicList(collections.abc.Sequence):
# NOTE: Since Cap'n Proto doesn't seem to allow List(AnyPointer), we
# don't have to handle that in DynamicList.
class Builder(collections.abc.MutableSequence):
def __init__(self, schema, list_):
assert schema.kind is Schema.Kind.LIST
assert schema.id == bases.get_schema_id(list_.getSchema())
self.schema = schema
self._list = list_
def copy_from(self, list_):
assert list_.schema is self.schema
assert len(list_) == len(self)
if self.schema.element_type.kind is Type.Kind.LIST:
for i in range(len(self)):
value = list_[i]
self.init(i, len(value)).copy_from(value)
elif self.schema.element_type.kind is Type.Kind.STRUCT:
for i in range(len(self)):
self.init(i).copy_from(list_[i])
else:
for i in range(len(self)):
self[i] = list_[i]
def as_reader(self):
return DynamicList(self.schema, self._list.asReader())
def __len__(self):
return self._list.size()
def _ensure_index(self, index):
if not isinstance(index, int):
raise TypeError('non-integer index: %s' % index)
if not 0 <= index < self._list.size():
raise IndexError(
'not 0 <= %d < %d' % (index, self._list.size()))
def __getitem__(self, index):
self._ensure_index(index)
return _dynamic_value_builder_to_python(
self.schema.element_type,
self._list[index],
)
def init(self, index, size=None):
self._ensure_index(index)
if self.schema.element_type.kind is Type.Kind.LIST:
assert size is not None
return DynamicList.Builder(
self.schema.element_type.schema,
self._list.init(index, size).asList(),
)
else:
assert self.schema.element_type.kind is Type.Kind.STRUCT
assert size is None
return DynamicStruct.Builder(
self.schema.element_type.schema,
self._list[index].asStruct(),
)
def __setitem__(self, index, value):
self._ensure_index(index)
_set_scalar(self._list, index, self.schema.element_type, value)
def __delitem__(self, index):
raise IndexError('do not support __delitem__')
def insert(self, index, value):
raise IndexError('do not support insert')
def __str__(self):
return '[%s]' % ', '.join(map(bases.str_value, self))
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.schema == other.schema and
len(self) == len(other) and
all(p == q for p, q in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
# Builder is not hashable.
def __init__(self, schema, list_):
assert schema.kind is Schema.Kind.LIST
assert schema.id == bases.get_schema_id(list_.getSchema())
self.schema = schema
self._list = list_
self._values_cache = None
@property
def _values(self):
if self._values_cache is None:
self._values_cache = tuple(
_dynamic_value_reader_to_python(
self.schema.element_type,
self._list[i],
)
for i in range(self._list.size())
)
return self._values_cache
def __len__(self):
return self._list.size()
def __iter__(self):
yield from self._values
def __getitem__(self, index):
return self._values[index]
def __str__(self):
return '[%s]' % ', '.join(map(bases.str_value, self._values))
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.schema == other.schema and
len(self) == len(other) and
all(p == q for p, q in zip(self, other))
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
assert isinstance(self._values, tuple)
return hash(self._values)
class DynamicStruct(collections.abc.Mapping):
class Builder(collections.abc.MutableMapping):
def __init__(self, schema, struct):
assert schema.kind is Schema.Kind.STRUCT
assert schema.id == bases.get_schema_id(struct.getSchema())
self.schema = schema
self._struct = struct
def copy_from(self, struct):
assert struct.schema is self.schema
if self.schema.union_fields:
# Can you mix union and non-union fields in one struct?
assert not self.schema.non_union_fields
for field in self.schema.union_fields:
if struct._struct.has(field._field):
self._copy_field(field, struct)
break
else:
raise ValueError(
'none of union member is set: %s' % struct)
return
for field in self.schema.fields:
if struct._struct.has(field._field):
self._copy_field(field, struct)
else:
self._struct.clear(field._field)
def _copy_field(self, field, struct):
if field.type.kind is Type.Kind.LIST:
list_ = struct[field.name]
self.init(field.name, len(list_)).copy_from(list_)
elif field.type.kind is Type.Kind.STRUCT:
self.init(field.name).copy_from(struct[field.name])
else:
self[field.name] = struct[field.name]
@property
def total_size(self):
msg_size = self._struct.totalSize()
return (msg_size.wordCount, msg_size.capCount)
def as_reader(self):
return DynamicStruct(self.schema, self._struct.asReader())
def __len__(self):
count = 0
for field in self.schema.fields:
if self._struct.has(field._field):
count += 1
return count
def __contains__(self, name):
field = self.schema.get(name)
return field and self._struct.has(field._field)
def __iter__(self):
for field in self.schema.fields:
if self._struct.has(field._field):
yield field.name
def __getitem__(self, name):
return self._get(name, True, None)
def get(self, name, default=None):
return self._get(name, False, default)
def _get(self, name, raise_on_missing, default):
field = self.schema.get(name)
if field and self._struct.has(field._field):
return _dynamic_value_builder_to_python(
field.type,
self._struct.get(field._field),
)
if raise_on_missing:
raise KeyError(name)
else:
return default
def init(self, name, size=None):
field = self.schema[name] # This may raise KeyError.
if field.type.kind is Type.Kind.LIST:
assert isinstance(size, int) and size > 0
return DynamicList.Builder(
field.type.schema,
self._struct.init(field._field, size).asList(),
)
elif field.type.kind is Type.Kind.STRUCT:
assert size is None
return DynamicStruct.Builder(
field.type.schema,
self._struct.init(field._field).asStruct(),
)
elif field.type.kind is Type.Kind.ANY_POINTER:
assert size is None
return self._get_any_pointer(field)
else:
raise AssertionError(
'cannot init non-list, non-struct field: %s' % field)
def __setitem__(self, name, value):
field = self.schema[name] # This may raise KeyError.
if field.type.kind is Type.Kind.ANY_POINTER:
self._get_any_pointer(field).set(value)
else:
_set_scalar(self._struct, field._field, field.type, value)
def _get_any_pointer(self, field):
return AnyPointer(self._struct.get(field._field).asAnyPointer())
def __delitem__(self, name):
field = self.schema[name] # This may raise KeyError.
if not self._struct.has(field._field):
raise KeyError(name)
self._struct.clear(field._field)
def __str__(self):
return '(%s)' % ', '.join(
'%s = %s' % (name, bases.str_value(value))
for name, value in self.items()
)
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.schema != other.schema:
return False
if len(self) != len(other):
return False
for name in self:
if name not in other:
return False
if self[name] != other[name]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# Builder is not hashable.
def __init__(self, schema, struct):
assert schema.kind is Schema.Kind.STRUCT
assert schema.id == bases.get_schema_id(struct.getSchema())
self.schema = schema
self._struct = struct
self._dict_cache = None
@property
def _dict(self):
if self._dict_cache is None:
self._dict_cache = collections.OrderedDict(
(
field.name,
_dynamic_value_reader_to_python(
field.type,
self._struct.get(field._field),
),
)
for field in self.schema.fields
if self._struct.has(field._field)
)
return self._dict_cache
@property
def total_size(self):
msg_size = self._struct.totalSize()
return (msg_size.wordCount, msg_size.capCount)
def __len__(self):
return len(self._dict)
def __contains__(self, name):
return name in self._dict
def __iter__(self):
yield from self._dict
def __getitem__(self, name):
return self._dict[name]
def get(self, name, default=None):
return self._dict.get(name, default)
def __str__(self):
return '(%s)' % ', '.join(
'%s = %s' % (name, bases.str_value(value))
for name, value in self._dict.items()
)
__repr__ = bases.repr_object
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if self.schema != other.schema:
return False
return self._dict == other._dict
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# self._dict is ordered, and so we could hash with iterating
# through it.
assert isinstance(self._dict, collections.OrderedDict)
return hash(tuple(self[name] for name in self))
def _set_scalar(builder, key, type_, python_value):
if type_.kind is Type.Kind.VOID:
assert python_value is None
elif not type_.kind.is_scalar:
raise TypeError('not scalar type: %s' % type_)
elif type_.kind is Type.Kind.ENUM:
python_value = DynamicEnum.from_member(type_.schema, python_value)
python_type, maker, _ = _DYNAMIC_VALUE_READER_TABLE[type_.kind]
assert isinstance(python_value, python_type)
if python_type is DynamicEnum:
value = maker(python_value._enum)
else:
value = maker(python_value)
builder.set(key, value)
class AnyPointer:
"""Wrap a capnp::AnyPointer::Reader/Builder object.
This is defined in capnp/any.h; don't confuse it with
capnp::schema::Type::AnyPointer::Reader/Builder.
"""
class Kind(enum.Enum):
NULL = (native.PointerType.NULL,)
STRUCT = (native.PointerType.STRUCT,)
LIST = (native.PointerType.LIST,)
CAPABILITY = (native.PointerType.CAPABILITY,)
def __init__(self, pointer_type):
self.pointer_type = pointer_type
_KIND_LOOKUP = {kind.pointer_type: kind for kind in Kind}
def __init__(self, any_pointer):
self._any_pointer = any_pointer
self._is_reader = isinstance(any_pointer, native.AnyPointer.Reader)
def __str__(self):
return '<opaque pointer>'
__repr__ = bases.repr_object
@property
def kind(self):
return self._KIND_LOOKUP[self._any_pointer.getPointerType()]
def init(self, schema, size=None):
assert not self._is_reader
if schema.kind is Schema.Kind.LIST:
assert isinstance(size, int) and size > 0
builder = DynamicList.Builder(
schema,
self._any_pointer.initAsList(schema._schema, size),
)
else:
assert schema.kind is Schema.Kind.STRUCT
assert size is None
builder = DynamicStruct.Builder(
schema,
self._any_pointer.initAsStruct(schema._schema)
)
return builder
def get(self, schema):
kind = self.kind
if kind is AnyPointer.Kind.NULL:
return None
elif schema is str:
assert kind is AnyPointer.Kind.LIST
return self._any_pointer.getAsText()
elif schema is bytes:
assert kind is AnyPointer.Kind.LIST
return self._any_pointer.getAsData()
elif schema.kind is Schema.Kind.LIST:
assert kind is AnyPointer.Kind.LIST
cls = DynamicList if self._is_reader else DynamicList.Builder
return cls(schema, self._any_pointer.getAsList(schema._schema))
else:
assert schema.kind is Schema.Kind.STRUCT
assert kind is AnyPointer.Kind.STRUCT
cls = DynamicStruct if self._is_reader else DynamicStruct.Builder
return cls(schema, self._any_pointer.getAsStruct(schema._schema))
def set(self, blob):
assert not self._is_reader
if blob is None:
self._any_pointer.clear()
elif isinstance(blob, str):
self._any_pointer.setAsText(blob)
else:
assert isinstance(blob, bytes)
self._any_pointer.setAsData(blob)
def as_reader(self):
assert not self._is_reader
return AnyPointer(self._any_pointer.asReader())
# type_kind -> python_type, maker, converter
_DYNAMIC_VALUE_READER_TABLE = {
Type.Kind.VOID: (
type(None),
native.DynamicValue.Reader.fromVoid,
native.DynamicValue.Reader.asVoid,
),
Type.Kind.BOOL: (
bool,
native.DynamicValue.Reader.fromBool,
native.DynamicValue.Reader.asBool,
),
Type.Kind.INT8: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asInt,
),
Type.Kind.INT16: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asInt,
),
Type.Kind.INT32: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asInt,
),
Type.Kind.INT64: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asInt,
),
Type.Kind.UINT8: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asUInt,
),
Type.Kind.UINT16: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asUInt,
),
Type.Kind.UINT32: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asUInt,
),
Type.Kind.UINT64: (
int,
native.DynamicValue.Reader.fromInt,
native.DynamicValue.Reader.asUInt,
),
Type.Kind.FLOAT32: (
float,
native.DynamicValue.Reader.fromFloat,
native.DynamicValue.Reader.asFloat,
),
Type.Kind.FLOAT64: (
float,
native.DynamicValue.Reader.fromFloat,
native.DynamicValue.Reader.asFloat,
),
Type.Kind.TEXT: (
str,
native.DynamicValue.Reader.fromStr,
native.DynamicValue.Reader.asText,
),
Type.Kind.DATA: (
bytes,
native.DynamicValue.Reader.fromBytes,
native.DynamicValue.Reader.asData,
),
Type.Kind.LIST: (
DynamicList,
native.DynamicValue.Reader.fromList,
native.DynamicValue.Reader.asList,
),
Type.Kind.ENUM: (
DynamicEnum,
native.DynamicValue.Reader.fromEnum,
native.DynamicValue.Reader.asEnum,
),
Type.Kind.STRUCT: (
DynamicStruct,
native.DynamicValue.Reader.fromStruct,
native.DynamicValue.Reader.asStruct,
),
Type.Kind.ANY_POINTER: (
AnyPointer,
native.DynamicValue.Reader.fromAnyPointer,
native.DynamicValue.Reader.asAnyPointer,
)
}
_DYNAMIC_READER_TYPES = frozenset((
DynamicList,
DynamicEnum,
DynamicStruct,
))
def _dynamic_value_reader_to_python(type_, value):
assert isinstance(value, native.DynamicValue.Reader)
python_type, _, converter = _DYNAMIC_VALUE_READER_TABLE[type_.kind]
python_value = converter(value)
if python_type in _DYNAMIC_READER_TYPES:
assert type_.schema is not None
python_value = python_type(type_.schema, python_value)
elif python_type is AnyPointer:
python_value = AnyPointer(python_value)
assert isinstance(python_value, python_type), (python_value, python_type)
return python_value
# type_kind -> python_type, converter
_DYNAMIC_VALUE_BUILDER_TABLE = {
Type.Kind.VOID: (type(None), lambda _: None),
Type.Kind.BOOL: (bool, native.DynamicValue.Builder.asBool),
Type.Kind.INT8: (int, native.DynamicValue.Builder.asInt),
Type.Kind.INT16: (int, native.DynamicValue.Builder.asInt),
Type.Kind.INT32: (int, native.DynamicValue.Builder.asInt),
Type.Kind.INT64: (int, native.DynamicValue.Builder.asInt),
Type.Kind.UINT8: (int, native.DynamicValue.Builder.asUInt),
Type.Kind.UINT16: (int, native.DynamicValue.Builder.asUInt),
Type.Kind.UINT32: (int, native.DynamicValue.Builder.asUInt),
Type.Kind.UINT64: (int, native.DynamicValue.Builder.asUInt),
Type.Kind.FLOAT32: (float, native.DynamicValue.Builder.asFloat),
Type.Kind.FLOAT64: (float, native.DynamicValue.Builder.asFloat),
Type.Kind.TEXT: (str, native.DynamicValue.Builder.asText),
Type.Kind.DATA: (bytes, native.DynamicValue.Builder.asData),
Type.Kind.LIST: (DynamicList.Builder, native.DynamicValue.Builder.asList),
Type.Kind.ENUM: (DynamicEnum, native.DynamicValue.Builder.asEnum),
Type.Kind.STRUCT: (
DynamicStruct.Builder,
native.DynamicValue.Builder.asStruct,
),
Type.Kind.ANY_POINTER: (
AnyPointer,
native.DynamicValue.Builder.asAnyPointer,
),
}
_DYNAMIC_BUILDER_TYPES = frozenset((
DynamicList.Builder,
DynamicEnum,
DynamicStruct.Builder,
))
def _dynamic_value_builder_to_python(type_, value):
assert isinstance(value, native.DynamicValue.Builder)
python_type, converter = _DYNAMIC_VALUE_BUILDER_TABLE[type_.kind]
python_value = converter(value)
if python_type in _DYNAMIC_BUILDER_TYPES:
assert type_.schema is not None
python_value = python_type(type_.schema, python_value)
elif python_type is AnyPointer:
python_value = AnyPointer(python_value)
assert isinstance(python_value, python_type), (python_value, python_type)
return python_value
| {
"repo_name": "clchiou/garage",
"path": "py/capnp/capnp/dynamics.py",
"copies": "1",
"size": "26626",
"license": "mit",
"hash": 7664514991665679000,
"line_mean": 29.8171296296,
"line_max": 78,
"alpha_frac": 0.5724104259,
"autogenerated": false,
"ratio": 4.075616102862391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.514802652876239,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'MessageReader',
'MessageBuilder',
]
from g1.bases.assertions import ASSERT
from . import _capnp
# pylint: disable=c-extension-no-member
from . import bases
from . import dynamics
from . import schemas
class _Array(bases.BaseResource):
_raw_type = (_capnp._Array_byte, _capnp._Array_word)
@property
def memory_view(self):
return self._raw.asBytes()
class MessageReader(bases.BaseResource):
_raw_type = (_capnp.FlatArrayMessageReader, _capnp.PackedMessageReader)
@classmethod
def from_message_bytes(cls, message_bytes):
return cls(_capnp.FlatArrayMessageReader(message_bytes), message_bytes)
@classmethod
def from_packed_message_bytes(cls, packed_message_bytes):
return cls(
_capnp.makePackedMessageReader(packed_message_bytes),
packed_message_bytes,
)
def __init__(self, raw, message_bytes):
super().__init__(raw)
# Own ``message_bytes`` because ``FlatArrayMessageReader`` does
# not own it.
self._message_bytes = message_bytes
def __exit__(self, *args):
try:
return super().__exit__(*args)
finally:
self._message_bytes = None
def get_root(self, struct_schema):
ASSERT.isinstance(struct_schema, schemas.StructSchema)
return dynamics.DynamicStructReader(
self,
struct_schema,
self._raw.getRoot(struct_schema._raw),
)
is_canonical = bases.def_f0(_capnp.MessageReader.isCanonical)
class MessageBuilder(bases.BaseResource):
_raw_type = _capnp.MallocMessageBuilder
@classmethod
def from_message_bytes(cls, message_bytes):
builder = cls()
_capnp.initMessageBuilderFromFlatArrayCopy(message_bytes, builder._raw)
return builder
@classmethod
def from_packed_message_bytes(cls, packed_message_bytes):
builder = cls()
_capnp.initMessageBuilderFromPackedArrayCopy(
packed_message_bytes,
builder._raw,
)
return builder
def __init__(self):
super().__init__(self._raw_type())
to_message = bases.def_f0(_Array, _capnp.messageToFlatArray)
to_packed_message = bases.def_f0(_Array, _capnp.messageToPackedArray)
def to_message_bytes(self):
with self.to_message() as array:
return bytes(array.memory_view)
def to_packed_message_bytes(self):
with self.to_packed_message() as array:
return bytes(array.memory_view)
def set_root(self, struct):
ASSERT.isinstance(struct, dynamics.DynamicStructReader)
self._raw.setRoot(struct._raw)
def get_root(self, struct_schema):
ASSERT.isinstance(struct_schema, schemas.StructSchema)
return dynamics.DynamicStructBuilder(
self,
struct_schema,
self._raw.getRoot(struct_schema._raw),
)
def init_root(self, struct_schema):
ASSERT.isinstance(struct_schema, schemas.StructSchema)
return dynamics.DynamicStructBuilder(
self,
struct_schema,
self._raw.initRoot(struct_schema._raw),
)
is_canonical = bases.def_f0(_raw_type.isCanonical)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/capnp/capnp/messages.py",
"copies": "1",
"size": "3245",
"license": "mit",
"hash": -8060624536851185000,
"line_mean": 26.974137931,
"line_max": 79,
"alpha_frac": 0.6326656394,
"autogenerated": false,
"ratio": 3.8356973995271866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9968363038927186,
"avg_score": 0,
"num_lines": 116
} |
__all__ = [
'MockSession',
'MockResponse',
'fake_sleep',
]
import requests
class MockSession:
"""Mock of requests.Session."""
def __init__(self, req_to_rep):
self._req_to_rep = req_to_rep
self._logs = []
def head(self, url, **kwargs):
return self.send(requests.Request('HEAD', url, **kwargs).prepare())
def get(self, url, **kwargs):
return self.send(requests.Request('GET', url, **kwargs).prepare())
def post(self, url, **kwargs):
return self.send(requests.Request('POST', url, **kwargs).prepare())
def put(self, url, **kwargs):
return self.send(requests.Request('PUT', url, **kwargs).prepare())
def send(self, request):
assert isinstance(request, requests.PreparedRequest)
self._logs.append(request)
rep = self._req_to_rep[(request.method, request.url)]
if isinstance(rep, Exception):
raise rep
elif isinstance(rep, list):
return MockResponse(*rep.pop(0))
else:
return MockResponse(*rep)
class MockResponse:
"""Mock of requests.Response."""
encoding = 'ascii'
def __init__(self, status_code, content):
self.status_code = status_code
self.content = content
def raise_for_status(self):
if 400 <= self.status_code < 600:
raise requests.HTTPError('http error', response=self)
def fake_sleep(_):
"""Use this in place of time.sleep in tests."""
pass
| {
"repo_name": "clchiou/garage",
"path": "py/garage/tests/http/mocks.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": 9166253557696931000,
"line_mean": 25.1929824561,
"line_max": 75,
"alpha_frac": 0.5941058272,
"autogenerated": false,
"ratio": 3.847938144329897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4942043971529897,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'NanomsgError',
# Extend in _create_errors().
]
from . import _nanomsg as _nn
from .constants import Error
_ERRORS = {}
class NanomsgError(Exception):
"""Base exception class."""
@staticmethod
def make(error):
exc_class = _ERRORS.get(error)
if exc_class is None:
return NanomsgError(error, _nn.nn_strerror(error).decode('ascii'))
else:
return exc_class()
def __init__(self, error, message):
super().__init__(message)
self.error = error
def _create_errors(global_vars, exposed_names):
def make_init(error):
# Don't use super() - its magic doesn't work here.
message = _nn.nn_strerror(error).decode('ascii')
def __init__(self):
NanomsgError.__init__(self, error, message)
return __init__
for error in Error:
exposed_names.append(error.name)
global_vars[error.name] = _ERRORS[error] = type(
error.name,
(NanomsgError,),
{'__init__': make_init(error)},
)
def check(ret):
if ret == -1:
raise NanomsgError.make(_nn.nn_errno())
return ret
def asserts(cond, message, *args):
if not cond:
raise AssertionError(message % args)
_create_errors(globals(), __all__)
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/errors.py",
"copies": "1",
"size": "1309",
"license": "mit",
"hash": -8686524670731796000,
"line_mean": 21.5689655172,
"line_max": 78,
"alpha_frac": 0.5691367456,
"autogenerated": false,
"ratio": 3.5961538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4665290591753846,
"avg_score": null,
"num_lines": null
} |
__all__ = []
class _MetaAnnotations:
annotations = list()
def annotation_iterator(self):
return self.annotations.iterator() # TBD
def clear_annotations(self):
self.annotations = list()
def add_annotation(self, annotation):
self.annotations.append(annotation)
def remove_annotation(self, annotation): # Can be int or obj
if type(annotation) is int:
self.annotations.remove(self.annotations[annotation])
else:
self.annotations.remove(annotation) # TBC
class _MetaInfons:
infons = dict()
def put_infon(self, key, val):
self.infons[key] = val
def remove_infon(self, key):
del(self.infons[key])
def clear_infons(self):
self.infons = dict()
class _MetaOffset:
offset = '-1'
class _MetaRelations:
relations = list()
def relation_iterator(self):
return self.relations.iterator() # TBD
def clear_relations(self):
self.relations = list()
def add_relation(self, relation):
self.relations.append(relation)
def remove_relation(self, relation): # Can be int or obj
if type(relation) is int:
self.relations.remove(self.relations[relation])
else:
self.relations.remove(relation) # TBC
class _MetaText:
text = ''
class _MetaId:
id = ''
| {
"repo_name": "telukir/PubMed2Go",
"path": "BioC_export/bioc/meta/_bioc_meta.py",
"copies": "2",
"size": "1358",
"license": "isc",
"hash": 3285053936477041000,
"line_mean": 22.4137931034,
"line_max": 65,
"alpha_frac": 0.6200294551,
"autogenerated": false,
"ratio": 3.8361581920903953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008506275278389328,
"num_lines": 58
} |
__all__ = []
def export(value):
__all__.append(value.__name__)
return value
import sys
module = sys.modules[__name__]
class Context(object):
level = 0
indent = ' '
def __enter__(self):
self.level += 1
return self.__lshift__
def __exit__(self, type_, value, btrace):
self.level -= 1
def __lshift__(self, data):
print(self.indent * self.level + str(data))
return self
class StrContext(Context):
data = ''
def __lshift__(self, data):
self.data += self.indent * self.level + str(data) + "\n"
return self
def __str__(self):
return self.data
context = StrContext()
__all__.append('context')
def build_attrs(attrs):
return ' ' + ' '.join(
'%s="%s"' % (
name.replace("_", "-").strip('-'),
(lambda v: ' '.join(v) if hasattr(v, '__iter__') else v)(
(value() if hasattr(value, '__call__') else value)
))
for (name, value) in attrs.iteritems() if value is not None
) if attrs else ''
@export
def css(**attrs):
return ' '.join("%s: %s;" % (name.replace('_', '-'), value)
for (name, value) in attrs.iteritems())
class TAG(object):
name = ''
attrs = {}
class __metaclass__(type):
def __enter__(cls):
return cls().__enter__()
def __exit__(cls, type_, value, btrace):
context.level -= 1
context << '</%s>' % cls.name
def __getattr__(cls, name):
return cls[name.replace('_', '-')]
def __getitem__(cls, name):
self = cls()
self.attrs['class'] = [name]
return self
def __enter__(self):
context << '<%s%s>' % (self.name, build_attrs(self.attrs))
context.level += 1
return context
def __exit__(self, type_, value, btrace):
context.level -= 1
context << '</%s>' % self.name
def __call__(self, _content=None, **attrs):
if attrs:
self.attrs.update(attrs)
if _content is not None:
context << '<%s%s>%s</%s>' % (self.name, build_attrs(self.attrs), _content, self.name)
return self
def __init__(self, _content=None, **attrs):
self.attrs = self.attrs.copy()
self(_content, **attrs)
def __getattr__(self, name):
return self[name.replace('_', '-')]
def __getitem__(self, name):
self.attrs.setdefault('class', []).append(name)
return self
class EMPTYTAG(object):
name = ''
attrs = {}
def __new__(cls, **attrs):
if attrs:
_attrs = cls.attrs.copy()
_attrs.update(attrs)
else:
_attrs = cls.attrs
context << '<%s%s />' % (cls.name, build_attrs(_attrs))
@export
class COMMENT(TAG):
def __enter__(self):
context << '<!-- '
return context
def __exit__(self, type_, value, traceback):
context << ' -->'
@export
class HTML(TAG):
name = 'html'
doctype = ''
def __enter__(self):
context << '''<!DOCTYPE %s>''' % (self.doctype)
return super(HTML, self).__enter__()
@export
class HTML5(HTML):
doctype = 'HTML'
simple_tags = ('head body title ' + # Main elements
'div p blockquote ' + # Blocks
'h1 h2 h3 h4 h5 h6 ' + # Headers
'u b i s a em strong span font ' + # Inline markup
'del ins ' + # Annotation
'ul ol li dd dt dl ' + # Lists
'article section nav aside ' + # HTML5
'audio video object embed param ' + # Media
'fieldset legend button textarea label select option ' + # Forms
'table thead tbody tr th td caption ' + # Tables
''
)
empty_tags = 'meta link br hr input'
for tag in simple_tags.split():
name = tag.upper()
__all__.append(name)
setattr(module, name, type(name, (TAG,), {'name': tag}))
for tag in empty_tags.split():
name = tag.upper()
__all__.append(name)
setattr(module, name, type(name, (EMPTYTAG,), {'name': tag}))
@export
class SCRIPT(TAG):
name = 'script'
attrs = {'type': 'text/javascript'}
@export
class CSS(LINK):
attrs = {'type': 'text/css', 'rel': 'stylesheet'}
def __new__(cls, href):
super(CSS, cls).__new__(cls, href=href)
@export
class JS(SCRIPT):
def __init__(self, src):
super(JS, self).__init__('', src=src)
@export
class FORM(TAG):
name = 'form'
attrs = {'method': 'POST'}
def __init__(self, action='', **attrs):
super(FORM, self).__init__(action=action, **attrs)
| {
"repo_name": "kostyll/usb-flash-network-monitor",
"path": "server/html.py",
"copies": "2",
"size": "4657",
"license": "mit",
"hash": 6395835275988908000,
"line_mean": 24.3097826087,
"line_max": 98,
"alpha_frac": 0.5086965858,
"autogenerated": false,
"ratio": 3.6669291338582677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5175625719658268,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'NetElement',
'NamedNetElement',
)
from .errors import NetError
class NetElement(object):
def __init__(self, *, net=None):
self._net = net
def bind(self, net):
print("*** bind:", self, net)
if self._net is not None and self._net is not net:
raise NetError("{!r}: cannot bind to net {!r}: already bound".format(self, net))
self._net = net
@property
def net(self):
return self._net
class NamedNetElement(NetElement):
__index__ = {}
def __init__(self, name=None, *, net=None):
super().__init__(net=net)
if name is None:
name = self._default_name()
self._name = name
@classmethod
def _default_name(cls):
index = cls.__index__.setdefault(cls, 0)
name = "{}_{}".format(cls.__name__, cls.__index__[cls])
cls.__index__[cls] += 1
return name
@property
def name(self):
return self._name
def __repr__(self):
return "{}({!r})".format(self.__class__.__name__, self._name)
| {
"repo_name": "simone-campagna/petra",
"path": "petra/net_element.py",
"copies": "1",
"size": "1069",
"license": "apache-2.0",
"hash": -368822919237587600,
"line_mean": 23.2954545455,
"line_max": 92,
"alpha_frac": 0.5285313377,
"autogenerated": false,
"ratio": 3.7118055555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9736747572629945,
"avg_score": 0.0007178641251221897,
"num_lines": 44
} |
__all__ = (
'Net',
)
import builtins
import collections
import itertools
from .errors import InternalError, NodeError
from .marking import Marking
from .net_element import NamedNetElement
from .node import Node
from .place import Place
from .transition import Transition
class Net(NamedNetElement):
__dict_factory__ = collections.OrderedDict
__defaultdict_factory__ = collections.defaultdict
__globals__ = {attr_name: getattr(builtins, attr_name) for attr_name in dir(builtins)}
def __init__(self, name=None, globals_d=None):
super().__init__(name=name, net=self)
self._places = self.__dict_factory__()
self._transitions = self.__dict_factory__()
self._engines = []
if globals_d is None:
globals_d = self.__globals__.copy()
self._globals_d = globals_d
### declare names:
def declare(self, name, value):
self._globals_d[name] = value
@property
def globals_d(self):
return self._globals_d
### add engine:
def add_engine(self, engine):
self._engines.append(engine)
def engines(self):
yield from self._engines
### add nodes:
def _check_node(self, node_type, node, **kwargs):
if isinstance(node, str):
node = node_type(net=self, name=node, **kwargs)
elif isinstance(node, node_type):
if node.net is not None and node.net is not self:
raise NodeError("cannot add {!r}: already bound".format(node))
if kwargs:
node.update(**kwargs)
else:
raise NodeError("cannot add {!r}: not a {}".format(node, node_type.__name__))
if node.name in self._places or node.name in self._transitions:
raise NodeError("cannot add {!r}: a node with the same name already exists".format(node))
node.bind(net=self)
return node
def add_place(self, place, tokens=None, **kwargs):
self.notify_net_changed()
place = self._check_node(Place, place, tokens=tokens, **kwargs)
self._places[place.name] = place
return place
def add_transition(self, transition, guard=None, **kwargs):
self.notify_net_changed()
transition = self._check_node(Transition, transition, guard=guard, **kwargs)
self._transitions[transition.name] = transition
return transition
def add_node(self, node):
if isinstance(node, Place):
return self.add_place(node)
elif isinstance(node, Transition):
return self.add_transition(node)
else:
raise NodeError("cannot add {!r}: not a valid node".format(node))
### get nodes:
def place(self, name):
return self._places[name]
def places(self):
yield from self._places.values()
def transition(self, name):
return self._transitions[name]
def transitions(self):
yield from self._transitions.values()
def node(self, name):
if name in self._places:
return self._places[name]
elif name in self._transitions:
return self._transitions[name]
else:
raise KeyError(name)
def nodes(self):
yield from self.places()
yield from self.transitions()
### add arcs:
def add_input(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_input(place, annotation=annotation)
return arc
def add_output(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_output(place, annotation=annotation)
return arc
def _get_node(self, node):
if isinstance(node, Node):
if node.net is not self:
raise NodeError("{!r}: node {!r} not bound to this net".format(self, node))
else:
node = self.node(node)
return node
def input_arcs(self, node):
node = self._get_node(node)
return node.input_arcs()
def inputs(self, node):
node = self._get_node(node)
return node.inputs()
def output_arcs(self, node):
node = self._get_node(node)
return node.output_arcs()
def outputs(self, node):
node = self._get_node(node)
return node.outputs()
### root nodes:
def root_places(self):
for node in self.places():
if len(node.inputs()) == 0:
yield node
def root_transitions(self):
for node in self.transitions():
if len(node.inputs()) == 0:
yield node
def root_nodes(self):
yield from self.root_places()
yield from self.root_transitions()
### dict interface:
def __getitem__(self, name):
return self.node(name)
def __iter__(self):
yield from self.nodes()
def __len__(self):
return len(self._places) + len(self._transitions)
### walk:
def walk(self, *, depth_first=False, first_only=True):
if first_only:
seen = set()
def not_seen(node):
b = node not in seen
if b:
seen.add(node)
return b
else:
not_seen = lambda node: True
nodes = itertools.chain(self.root_nodes())
while True:
if depth_first:
try:
node = next(nodes)
except StopIteration:
break
yield node
next_nodes = itertools.chain(filter(not_seen, self.outputs(node)), nodes)
nodes = iter(next_nodes)
else:
next_nodes = []
for node in nodes:
yield node
new_nodes = filter(not_seen, self.outputs(node))
next_nodes.extend(new_nodes)
if not next_nodes:
break
nodes = iter(next_nodes)
### marking:
def get_marking(self):
marking = Marking()
for place in self._places.values():
if place.tokens:
marking[place.name] = place.tokens.copy()
return marking
def set_marking(self, marking):
for place in self._places.values():
tokens = marking.get(place.name)
place.tokens.clear()
if tokens:
place.tokens.extend(tokens)
### notifications:
def notify_transition_fired(self, transition):
for engine in self._engines:
engine.notify_transition_fired(transition)
def notify_net_changed(self):
for engine in self._engines:
engine.notify_net_changed()
| {
"repo_name": "simone-campagna/petra",
"path": "petra/net.py",
"copies": "1",
"size": "7080",
"license": "apache-2.0",
"hash": 3576607404941713400,
"line_mean": 29.6493506494,
"line_max": 101,
"alpha_frac": 0.5638418079,
"autogenerated": false,
"ratio": 4.257366205652436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5321208013552435,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'newaxis', 'ufunc',
'asarray', 'asanyarray', 'base_repr',
'array_repr', 'array_str', 'set_string_function',
'array_equal', 'outer', 'vdot', 'identity', 'little_endian',
'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
'seterr',
]
import sys
import multiarray
from multiarray import *
del set_string_function
del typeinfo
import umath
from umath import *
import numerictypes
from numerictypes import *
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(multiarray)
__all__.remove('typeinfo')
extend_all(umath)
extend_all(numerictypes)
newaxis = None
ufunc = type(sin)
# XXX this file to be reviewed
def seterr(**args):
return args
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
#Use numarray's printing function
from arrayprint import array2string
_typelessdata = [int_, float_]#, complex_]
# XXX
#if issubclass(intc, int):
# _typelessdata.append(intc)
#if issubclass(longlong, int):
# _typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
# XXX pypy lacks support
if 0 and arr.flags.maskna:
whichna = isna(arr)
# If nothing is NA, explicitly signal the NA-mask
if not any(whichna):
lst += ", maskna=True"
# If everything is NA, can't skip the dtype
if skipdtype and all(whichna):
skipdtype = False
if skipdtype:
return "%s(%s)" % (cName, lst)
else:
typename = arr.dtype.name
# Quote typename in the output if it is "complex".
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = "'%s'" % typename
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool((a1 == a2).all())
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
See also
--------
inner, einsum
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
def vdot(a, b):
"""
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| {
"repo_name": "bussiere/pypyjs",
"path": "website/demo/home/rfk/repos/pypy/lib_pypy/numpypy/core/numeric.py",
"copies": "2",
"size": "17709",
"license": "mit",
"hash": -6345664618002090000,
"line_mean": 27.6090468498,
"line_max": 83,
"alpha_frac": 0.5619741374,
"autogenerated": false,
"ratio": 3.5581675708257987,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020340014155720058,
"num_lines": 619
} |
__all__ = [
'new_deque',
'Deque',
'new_queue',
'Queue',
'new_stack',
'Stack',
'new_heap',
'Heap',
'new_max_heap',
'new_min_heap',
'new_priority_queue',
'PriorityQueue',
'new_suffix_array',
'new_tree_node',
'TreeNode',
]
from typing import Any
from typing import Callable
from typing import List
from typing import TypeVar
from .deque import Deque
from .doubly_linked_list_deque import DoublyLinkedListDeque
from .heap import Heap
from .max_heap import MaxHeap
from .min_heap import MinHeap
from .priority_queue import PriorityQueue
from .queue import Queue
from .stack import Stack
from .suffix_array import SuffixArray
from .tree_node import TreeNode
T = TypeVar('T')
def new_deque(collection: List[Any] = ()) -> Deque:
"""
Generic Dequeue, doubly linked list based implementation
:param collection: List[Any]
:return: Deque
"""
return DoublyLinkedListDeque(collection)
def new_queue(collection: List[T] = ()) -> Queue[T]:
"""
Generic Queue, using Deque underneath
:param collection: List[T]
:return: Queue
"""
return Queue[T](collection)
def new_stack(collection: List[T] = ()) -> Stack[T]:
"""
Generic Stack, using Deque underneath
:param collection: List[T]
:return: Stack
"""
return Stack[T](collection)
def new_heap(comparator_f2: Callable[[Any, Any], bool], xs: List[Any] = ()) -> Heap:
"""
Fibonacci Heap
Factory method to construct generic heap
:param comparator_f2: a morphism to apply in order to compare heap entries
:param List[T] xs: a list of initial isomorphic values to populate heap
:return: pointer to Heap interface
Example of a generic Max heap
>>> max_heap = new_heap(lambda x, y: (x > y) - (x < y) == 1)
>>> max_heap.push('Kelly', 1)
>>> max_heap.push('Ryan', 7)
>>> max_heap.next_key #=> 'Ryan'
>>> max_heap.pop() #=> 7
"""
return Heap(comparator_f2, xs)
def new_max_heap(xs: List[Any] = ()) -> Heap:
"""
MAX Heap (Fibonacci Heap engine)
:param xs: optional collection of initial values
:return: an interface to Heap
"""
return MaxHeap(xs)
def new_min_heap(xs: List[Any] = ()) -> Heap:
"""
MAX Heap (Fibonacci Heap engine)
:param xs: optional collection of initial values
:return: an interface to Heap
"""
return MinHeap(xs)
def new_priority_queue(queue_vector_f2: Callable[[Any, Any], bool]) -> PriorityQueue:
"""
MAX Priority Queue (Fibonacci Heap engine)
>>> from py_algorithms.data_structures import new_priority_queue
>>>
>>> pq = new_priority_queue(lambda x, y: (x > y) - (x < y) == 1)
>>> pq.push('Important', 10)
>>> pq.push('Not So Important', -2)
>>> pq.pop() #=> 'Important'
:param queue_vector_f2: a functor defining queue order
:return: a PriorityQueue interface
"""
return PriorityQueue(queue_vector_f2)
def new_suffix_array(string: str) -> SuffixArray:
"""
>>> from py_algorithms.data_structures import new_suffix_array
>>>
>>> ds = new_suffix_array('python')
>>> ds.is_sub_str('py') #=> True
>>> ds.is_sub_str('on') #=> True
>>> ds.is_sub_str('ton') #=> True
>>> ds.is_sub_str('blah') #=> False
:param string: a subject for detection
:return: SuffixArray interface
"""
return SuffixArray(string)
def new_tree_node(element, left=None, right=None):
return TreeNode(element=element, left=left, right=right)
| {
"repo_name": "rlishtaba/py-algorithms",
"path": "py_algorithms/data_structures/__init__.py",
"copies": "1",
"size": "3556",
"license": "mit",
"hash": -9074130887530045000,
"line_mean": 23.8671328671,
"line_max": 85,
"alpha_frac": 0.6212035996,
"autogenerated": false,
"ratio": 3.320261437908497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9440647113981955,
"avg_score": 0.0001635847053083715,
"num_lines": 143
} |
__all__ = []
import asyncio
from .log import logger
from .events import send_event, eventhandler
from .common import mpstr
from .exceptions import XMLRPCError
_MAXREADLIST = 300
class Player():
def __init__(self, server, login, is_spectator, nickname):
self.server = server
self.login = login
self.is_spectator = is_spectator
self.nickname = nickname
async def forcespec(self, mode = 3):
await self.server.ForceSpectator(self.login, mode)
async def kick(self):
await self.server.Kick(self.login, '')
async def mute(self):
await self.server.Ignore(self.login)
async def unmute(self):
await self.server.UnIgnore(self.login)
async def is_admin(self):
pass
# @asyncio.coroutine
# def chat_command_forcespec(login, logintoforce, mode = 3):
#
# p = _player_list[logintoforce]
# chat_send(p.nickname + ' forced to Spectator!')
# await p.forcespec(mode)
#
#
# @asyncio.coroutine
# def chat_command_kick(login, logintokick):
#
# p = _player_list[logintokick]
# chat_send(p.nickname + ' kicked!')
# await p.kick()
#
#
# @asyncio.coroutine
# def chat_command_mute(login, logintomute):
#
# p = _player_list[logintomute]
# chat_send(p.nickname + ' muted!')
# await p.mute()
#
#
# @asyncio.coroutine
# def chat_command_unmute(login, logintounmute):
#
# p = _player_list[logintounmute]
# chat_send(p.nickname + ' unmuted!')
# await p.unmute()
@eventhandler('pie.connection_made')
async def _init(server):
list = await _get_player_list(server)
for p in list:
if p['Login'] == server.config.login:
continue
p = Player(server, p['Login'], False, mpstr(p['NickName']))
server.players[p.login] = p
send_event(server, 'pie.PlayerConnect', p)
async def _get_player_list(server):
list = []
i = 0
try:
while True:
r = await server.rpc.GetPlayerList(_MAXREADLIST, i)
i = i + _MAXREADLIST
list.extend(r)
except XMLRPCError as e:
logger.debug('catched exception: ' + str(e))
pass # no more _player_list
return list
@eventhandler('ManiaPlanet.PlayerConnect')
async def _on_player_connect(server, callback):
r = await server.rpc.GetPlayerInfo(callback.login)
p = Player(server, callback.login, callback.isSpectator, mpstr(r['NickName']))
send_event(server, 'pie.PlayerConnect', p)
server.players[callback.login] = p
@eventhandler('ManiaPlanet.PlayerDisconnect')
async def _on_player_disconnect(server, callback):
if callback.login == server.config.login:
return
p = server.players[callback.login]
send_event(server, 'pie.PlayerDisconnect', (p, callback.DisconnectionReason))
server.chat_send('Disconnect: ' + p.nickname)
del server.players[callback.login] | {
"repo_name": "juergenz/pie",
"path": "src/pie/players.py",
"copies": "1",
"size": "2878",
"license": "mit",
"hash": -724746819172537500,
"line_mean": 23.1932773109,
"line_max": 82,
"alpha_frac": 0.6445448228,
"autogenerated": false,
"ratio": 3.277904328018223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4422449150818223,
"avg_score": null,
"num_lines": null
} |
__all__ = []
import concurrent.futures._base
import reprlib
from . import events
Error = concurrent.futures._base.Error
CancelledError = concurrent.futures.CancelledError
TimeoutError = concurrent.futures.TimeoutError
class InvalidStateError(Error):
"""The operation is not allowed in this state."""
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
def isfuture(obj):
"""Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
obj._asyncio_future_blocking is not None)
def _format_callbacks(cb):
"""helper function for Future.__repr__"""
size = len(cb)
if not size:
cb = ''
def format_cb(callback):
return events._format_callback_source(callback, ())
if size == 1:
cb = format_cb(cb[0])
elif size == 2:
cb = '{}, {}'.format(format_cb(cb[0]), format_cb(cb[1]))
elif size > 2:
cb = '{}, <{} more>, {}'.format(format_cb(cb[0]),
size - 2,
format_cb(cb[-1]))
return 'cb=[%s]' % cb
def _future_repr_info(future):
# (Future) -> str
"""helper function for Future.__repr__"""
info = [future._state.lower()]
if future._state == _FINISHED:
if future._exception is not None:
info.append('exception={!r}'.format(future._exception))
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(future._result)
info.append('result={}'.format(result))
if future._callbacks:
info.append(_format_callbacks(future._callbacks))
if future._source_traceback:
frame = future._source_traceback[-1]
info.append('created at %s:%s' % (frame[0], frame[1]))
return info
| {
"repo_name": "mindbender-studio/setup",
"path": "bin/windows/python36/Lib/asyncio/base_futures.py",
"copies": "6",
"size": "2074",
"license": "mit",
"hash": 1523392539046361300,
"line_mean": 28.2112676056,
"line_max": 71,
"alpha_frac": 0.5964320154,
"autogenerated": false,
"ratio": 3.928030303030303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7524462318430304,
"avg_score": null,
"num_lines": null
} |
__all__ = []
import pygame, sys, os
from pygame.locals import *
import cafeinagame
from cafeinagame import *
from .cafeinarobot import CafeinaRobot
cr = None
# ENGLISH DEFINITIONS
def create(world, robot_direction, robot_velocity, out_X=0, out_Y=0):
global cr
cr = cafeinarobot.CafeinaRobot(world, robot_direction, robot_velocity, out_X, out_Y)
def turn_on():
cr.turn_on()
def update():
cr.update()
cr.draw()
def forward():
cr.forward()
def left():
cr.turn_left()
def right():
cr.turn_right()
def turn_off():
cr.shutdown()
def sense():
return cr.sense()
def has_finished():
return cr.has_finished()
# SPANISH DEFINITIONS
def crear(world, robot_direction, robot_velocity, out_X=0, out_Y=0):
cr = cafeinarobot.CafeinaRobot(world, robot_direction, robot_velocity, out_X, out_Y)
def encender():
cr.turn_on()
def actualizar():
cr.update()
cr.draw()
def avanzar():
cr.forward()
def izquierda():
cr.turn_left()
def derecha():
cr.turn_right()
def apagar():
cr.shutdown()
def hay_muro():
return cr.sense()
def ha_terminado():
return cr.has_finished()
| {
"repo_name": "dacanizares/CafeinaRobot",
"path": "robot/__init__.py",
"copies": "1",
"size": "1156",
"license": "mit",
"hash": -392269223487442300,
"line_mean": 15.0555555556,
"line_max": 88,
"alpha_frac": 0.6479238754,
"autogenerated": false,
"ratio": 2.7990314769975786,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8784225420414145,
"avg_score": 0.03254598639668673,
"num_lines": 72
} |
__all__ = ()
import reprlib
from . import format_helpers
# States for Future.
_PENDING = 'PENDING'
_CANCELLED = 'CANCELLED'
_FINISHED = 'FINISHED'
def isfuture(obj):
"""Check for a Future.
This returns True when obj is a Future instance or is advertising
itself as duck-type compatible by setting _asyncio_future_blocking.
See comment in Future for more details.
"""
return (hasattr(obj.__class__, '_asyncio_future_blocking') and
obj._asyncio_future_blocking is not None)
def _format_callbacks(cb):
"""helper function for Future.__repr__"""
size = len(cb)
if not size:
cb = ''
def format_cb(callback):
return format_helpers._format_callback_source(callback, ())
if size == 1:
cb = format_cb(cb[0][0])
elif size == 2:
cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
elif size > 2:
cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
size - 2,
format_cb(cb[-1][0]))
return f'cb=[{cb}]'
def _future_repr_info(future):
# (Future) -> str
"""helper function for Future.__repr__"""
info = [future._state.lower()]
if future._state == _FINISHED:
if future._exception is not None:
info.append(f'exception={future._exception!r}')
else:
# use reprlib to limit the length of the output, especially
# for very long strings
result = reprlib.repr(future._result)
info.append(f'result={result}')
if future._callbacks:
info.append(_format_callbacks(future._callbacks))
if future._source_traceback:
frame = future._source_traceback[-1]
info.append(f'created at {frame[0]}:{frame[1]}')
return info
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/base_futures.py",
"copies": "10",
"size": "1822",
"license": "apache-2.0",
"hash": -361225579306819600,
"line_mean": 28.868852459,
"line_max": 71,
"alpha_frac": 0.5702524698,
"autogenerated": false,
"ratio": 3.7644628099173554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9334715279717355,
"avg_score": null,
"num_lines": null
} |
__all__ = []
import sys
def _load_models():
from collections import OrderedDict, namedtuple
import pkg_resources
from scripting import error, status
from .framework.bmi_bridge import bmi_factory
models = OrderedDict()
failed = []
for entry_point in pkg_resources.iter_entry_points(group="pymt.plugins"):
try:
model = entry_point.load()
except Exception:
failed.append(entry_point.name)
else:
model = bmi_factory(model)
models[entry_point.name] = model
if len(models) > 0:
status("models: {0}".format(", ".join(models.keys())))
else:
status("models: (none)")
if failed:
error("failed to load the following models: {0}".format(", ".join(failed)))
Models = namedtuple("Models", models.keys())
return Models(*models.values())
models_loaded = False
if not models_loaded:
for model in _load_models():
__all__.append(model.__name__)
setattr(sys.modules[__name__], model.__name__, model)
models_loaded = True
try:
del model
except NameError:
pass
del sys, _load_models
| {
"repo_name": "csdms/coupling",
"path": "pymt/models.py",
"copies": "1",
"size": "1150",
"license": "mit",
"hash": -7370293149420776000,
"line_mean": 22.4693877551,
"line_max": 83,
"alpha_frac": 0.6052173913,
"autogenerated": false,
"ratio": 3.9383561643835616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024295432458697764,
"num_lines": 49
} |
__all__ = [
'NinjaRule',
'write_header_to',
]
import iga.context
import iga.precond
from iga.registry import RegistryMixin
RULE_VARS_1_0 = frozenset((
'command',
'depfile',
'description',
'generator',
'in',
'in_newline',
'out',
'restat',
'rspfile',
'rspfile_content',
))
RULE_VARS_1_3 = frozenset((
'deps',
))
RULE_VARS_1_5 = frozenset((
'msvc_deps_prefix',
))
RULE_VARS = RULE_VARS_1_0 | RULE_VARS_1_3 | RULE_VARS_1_5
RESERVED_RULE_NAMES = frozenset((
'phony',
))
INDENT = ' '
NINJA_HEADER = '''\
# This fils is generated by iga.
ninja_required_version = 1.5
'''
NINJA_RULE = '''\
rule {rule.name}
command = {rule.command}
'''
def write_header_to(ninja_file, *, _cxt=None):
variables = (_cxt or iga.context.current()).get('variables')
ninja_file.write(NINJA_HEADER)
if variables:
_write_vars(ninja_file, variables, indent=0)
ninja_file.write('\n')
class NinjaRule(RegistryMixin):
@staticmethod
def make(name, command, **kwargs):
iga.precond.check(
name not in RESERVED_RULE_NAMES,
'cannot use %r as rule name', name,
)
variables = dict(kwargs)
forbiddens = frozenset(variables) - RULE_VARS
iga.precond.check(
not forbiddens, 'cannot use %r in a rule', forbiddens
)
return NinjaRule(name=name, command=command, variables=variables)
def __init__(self, name, command, variables):
self.name = name
self.command = command
self.variables = variables
def write_to(self, ninja_file):
ninja_file.write(NINJA_RULE.format(rule=self))
_write_vars(ninja_file, self.variables, indent=1)
ninja_file.write('\n')
class NinjaBuildstmt:
@staticmethod
def make(**kwargs):
kwargs.setdefault('explicit_deps', ())
kwargs.setdefault('implicit_deps', ())
kwargs.setdefault('orderonly_deps', ())
kwargs.setdefault('variables', {})
return NinjaBuildstmt(**kwargs)
def __init__(self,
ninja_rule,
outputs,
explicit_deps,
implicit_deps,
orderonly_deps,
variables):
self.ninja_rule = ninja_rule
self.outputs = outputs
self.explicit_deps = explicit_deps
self.implicit_deps = implicit_deps
self.orderonly_deps = orderonly_deps
self.variables = variables
def write_to(self, ninja_file):
ninja_file.write(
'build %s: %s' % (_paths(self.outputs), self.ninja_rule)
)
for sep, deps in ((' ', self.explicit_deps),
(' | ', self.implicit_deps),
(' || ', self.orderonly_deps)):
if deps:
ninja_file.write(sep)
ninja_file.write(_paths(deps))
ninja_file.write('\n')
_write_vars(ninja_file, self.variables, indent=1)
ninja_file.write('\n')
def _paths(labels):
return ' '.join(str(label.path) for label in labels)
def _write_vars(ninja_file, variables, indent):
indent = INDENT * indent
for key in sorted(variables):
value = variables[key]
ninja_file.write('%s%s = %s\n' % (indent, key, value))
| {
"repo_name": "clchiou/iga",
"path": "iga/ninja.py",
"copies": "1",
"size": "3320",
"license": "mit",
"hash": 1482156187257140000,
"line_mean": 22.7142857143,
"line_max": 73,
"alpha_frac": 0.5686746988,
"autogenerated": false,
"ratio": 3.543223052294557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9611897751094557,
"avg_score": 0,
"num_lines": 140
} |
__all__ = []
class _MetaAnnotations:
annotations = list()
def annotation_iterator(self):
return self.annotations.iterator() # TBD
def clear_annotations(self):
self.annotations = list()
def add_annotation(self, annotation):
self.annotations.append(annotation)
def remove_annotation(self, annotation): # Can be int or obj
if type(annotation) is int:
self.annotations.remove(self.annotations[annotation])
else:
self.annotations.remove(annotation) # TBC
class _MetaInfons:
infons = dict()
def put_infon(self, key, val):
self.infons[key] = val
def remove_infon(self, key):
del(self.infons[key])
def clear_infons(self):
self.infons = dict()
class _MetaOffset:
offset = '-1'
class _MetaRelations:
relations = list()
def relation_iterator(self):
return self.relations.iterator() # TBD
def clear_relations(self):
self.relations = list()
def add_relation(self, relation):
self.relations.append(relation)
def remove_relation(self, relation): # Can be int or obj
if type(relation) is int:
self.relations.remove(self.relations[relation])
else:
self.relations.remove(relation) # TBC
class _MetaText:
text = ''
class _MetaId:
id = ''
| {
"repo_name": "SuLab/PyBioC",
"path": "src/bioc/meta/_bioc_meta.py",
"copies": "1",
"size": "1368",
"license": "bsd-2-clause",
"hash": -3610146353767016400,
"line_mean": 20.375,
"line_max": 65,
"alpha_frac": 0.615497076,
"autogenerated": false,
"ratio": 3.8,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4915497076,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'NN_MSG',
# Extend in _load().
]
from collections import defaultdict, namedtuple
from ctypes import byref, sizeof
import enum
from . import _nanomsg as _nn
NN_MSG = -1
NanomsgVersion = namedtuple('NanomsgVersion', 'current revision age')
#
# Instead of using a plain int object as enum members, we use this
# wrapper class because Enum treats members with the same value as
# alias (and symbol values may be the same).
#
class Symbol(int):
def __new__(cls, name, value):
self = super().__new__(cls, value)
self.name = name
return self
def __str__(self):
return '<%s: %d>' % (self.name, self)
__repr__ = __str__
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, Symbol):
return False
return self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def _load(global_vars, exposed_names):
symbols = _load_symbols()
# Create NS_VERSION.
syms = dict(symbols['NN_NS_VERSION'])
global_vars['NS_VERSION'] = NanomsgVersion(
current=syms['NN_VERSION_CURRENT'],
revision=syms['NN_VERSION_REVISION'],
age=syms['NN_VERSION_AGE'],
)
exposed_names.append('NS_VERSION')
# Create NN_NS_FLAG as plain int variables.
for name, sym in symbols['NN_NS_FLAG']:
global_vars[name] = sym.value
exposed_names.append(name)
# Create NN_NS_LIMIT as plain int variables.
for name, sym in symbols['NN_NS_LIMIT']:
global_vars[name] = sym.value
exposed_names.append(name)
# Create enum for the rest of namespaces.
# Use IntEnum when possible.
enum_decls = [
# enum name namespace enum type export?
('Domain', 'NN_NS_DOMAIN', enum.IntEnum, True),
('Transport', 'NN_NS_TRANSPORT', enum.IntEnum, True),
('Protocol', 'NN_NS_PROTOCOL', enum.IntEnum, True),
('OptionLevel', 'NN_NS_OPTION_LEVEL', enum.IntEnum, True),
('SocketOption', 'NN_NS_SOCKET_OPTION', enum.Enum, True),
('TransportOption', 'NN_NS_TRANSPORT_OPTION', enum.Enum, True),
('OptionType', 'NN_NS_OPTION_TYPE', enum.IntEnum, True),
('OptionUnit', 'NN_NS_OPTION_UNIT', enum.IntEnum, True),
# Don't export error because we will create exception classes
# for them.
('Error', 'NN_NS_ERROR', enum.IntEnum, False),
('Event', 'NN_NS_EVENT', enum.IntEnum, True),
('Statistic', 'NN_NS_STATISTIC', enum.IntEnum, True),
]
for enum_name, namespace, enum_type, export_members in enum_decls:
syms = symbols[namespace]
if enum_type is enum.Enum:
enum_class = enum.Enum(
enum_name,
[(name, Symbol(name, sym.value)) for name, sym in syms],
module=__name__,
)
else:
assert enum_type is enum.IntEnum
enum_class = enum.IntEnum(
enum_name,
[(name, sym.value) for name, sym in syms],
module=__name__,
)
# Check if members are unique (no alias).
enum_class = enum.unique(enum_class)
global_vars[enum_name] = enum_class
exposed_names.append(enum_name)
if export_members:
global_vars.update(enum_class.__members__)
exposed_names.extend(enum_class.__members__)
# Sanity check...
if len(set(exposed_names)) != len(exposed_names):
raise AssertionError('names conflict: %r' % exposed_names)
# Attach option type and unit to the options.
OptionType = global_vars['OptionType']
OptionUnit = global_vars['OptionUnit']
SocketOption = global_vars['SocketOption']
for name, sym in symbols['NN_NS_SOCKET_OPTION']:
option = SocketOption[name].value
option.type = OptionType(sym.type)
option.unit = OptionUnit(sym.unit)
TransportOption = global_vars['TransportOption']
for name, sym in symbols['NN_NS_TRANSPORT_OPTION']:
option = TransportOption[name].value
option.type = OptionType(sym.type)
option.unit = OptionUnit(sym.unit)
def _load_symbols():
namespace_names = {}
namespace_symbols = defaultdict(list)
for sym in _iter_symbols():
if sym.ns == 0:
name = sym.name.decode('ascii')
if not name.startswith('NN_NS_'):
raise AssertionError(name)
namespace_names[sym.value] = name
else:
namespace_symbols[sym.ns].append(sym)
symbols = {}
for index, name in namespace_names.items():
syms = namespace_symbols[index]
symbols[name] = [(sym.name.decode('ascii'), sym) for sym in syms]
return symbols
def _iter_symbols():
i = 0
while True:
sym = _nn.nn_symbol_properties()
size = _nn.nn_symbol_info(i, byref(sym), sizeof(sym))
if size == 0:
break
if size != sizeof(sym):
raise AssertionError('expect %d instead %d' % (sizeof(sym), size))
yield sym
i += 1
def _find_value_by_name(symbols, target):
for name, symbol in symbols:
if name == target:
return symbol.value
raise ValueError('%s not in %r' % (target, symbols))
_load(globals(), __all__)
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/constants.py",
"copies": "1",
"size": "5562",
"license": "mit",
"hash": 3039016849730825000,
"line_mean": 29.5604395604,
"line_max": 79,
"alpha_frac": 0.5649047105,
"autogenerated": false,
"ratio": 3.822680412371134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4887585122871134,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'nn_symbol_properties',
'nn_iovec',
'nn_msghdr',
'nn_pollfd',
# Extend in _load()
]
import ctypes
from ctypes import POINTER, c_char_p, c_int, c_short, c_size_t, c_void_p
_LIBNANOMSG = ctypes.cdll.LoadLibrary('libnanomsg.so')
# NOTE: Definitions below are targeting nanomsg 1.0.0.
class nn_symbol_properties(ctypes.Structure):
_fields_ = [
('value', c_int),
('name', c_char_p),
('ns', c_int),
('type', c_int),
('unit', c_int),
]
class nn_iovec(ctypes.Structure):
_fields_ = [
('iov_base', c_void_p),
('iov_len', c_size_t),
]
class nn_msghdr(ctypes.Structure):
_fields_ = [
('msg_iov', POINTER(nn_iovec)),
('msg_iovlen', c_int),
('msg_control', c_void_p),
('msg_controllen', c_size_t),
]
class nn_pollfd(ctypes.Structure):
_fields = [
('fd', c_int),
('events', c_short),
('revents', c_short),
]
def _load(libnanomsg, global_vars, exposed_names):
#
# NOTE: Use c_void_p instead of c_char_p so that Python does not
# convert variables to/from bytes automatically. While this might
# be inconvenient, it is probably the correct behavior (especially
# for nn_allocmsg allocated space).
#
decls = [
# Errors.
('nn_errno', [], c_int),
('nn_strerror', [c_int], c_char_p),
# Symbols.
('nn_symbol', [c_int, POINTER(c_int)], c_char_p),
('nn_symbol_info',
[c_int, POINTER(nn_symbol_properties), c_int], c_int),
# Helper function for shutting down multi-threaded applications.
('nn_term', [], None),
# Zero-copy support.
('nn_allocmsg', [c_size_t, c_int], c_void_p),
('nn_reallocmsg', [c_void_p, c_size_t], c_void_p),
('nn_freemsg', [c_void_p], c_int),
# Socket definition.
('nn_socket', [c_int, c_int], c_int),
('nn_close', [c_int], c_int),
('nn_setsockopt',
[c_int, c_int, c_int, c_void_p, c_size_t], c_int),
('nn_getsockopt',
[c_int, c_int, c_int, c_void_p, POINTER(c_size_t)], c_int),
('nn_bind', [c_int, c_char_p], c_int),
('nn_connect', [c_int, c_char_p], c_int),
('nn_shutdown', [c_int, c_int], c_int),
('nn_send', [c_int, c_void_p, c_size_t, c_int], c_int),
('nn_recv', [c_int, c_void_p, c_size_t, c_int], c_int),
('nn_sendmsg', [c_int, POINTER(nn_msghdr), c_int], c_int),
('nn_recvmsg', [c_int, POINTER(nn_msghdr), c_int], c_int),
# Socket mutliplexing support.
('nn_poll', [POINTER(nn_pollfd), c_int, c_int], c_int),
# Built-in support for devices.
('nn_device', [c_int, c_int], c_int),
]
for name, argtypes, restype in decls:
func = getattr(libnanomsg, name)
func.argtypes = argtypes
func.restype = restype
global_vars[name] = func
exposed_names.extend(name for name, _, _ in decls)
if len(set(exposed_names)) != len(exposed_names):
raise AssertionError('names conflict: %r' % exposed_names)
_load(_LIBNANOMSG, globals(), __all__)
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/_nanomsg.py",
"copies": "1",
"size": "3161",
"license": "mit",
"hash": 1343539544182363400,
"line_mean": 28.820754717,
"line_max": 72,
"alpha_frac": 0.538753559,
"autogenerated": false,
"ratio": 2.88675799086758,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39255115498675797,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'Node',
'BasePlace',
'BaseTransition',
'Arc',
'InputArc',
'OutputArc',
)
import abc
import collections
import itertools
from .annotation import InputAnnotation, OutputAnnotation
from .errors import NodeError, ArcError
from .net_element import NetElement, NamedNetElement
from .sequence_view import SequenceView
class Node(NamedNetElement, metaclass=abc.ABCMeta):
def __init__(self, name, *, net=None):
super().__init__(name=name, net=net)
self._inputs = []
self._outputs = []
def bind(self, net):
super().bind(net)
for arc in self._inputs:
arc.bind(net)
for arc in self._outputs:
arc.bind(net)
@abc.abstractmethod
def update(self, **kwargs):
raise NotImplementedError()
def _add_input_arc(self, arc):
arc.bind(self._net)
self._inputs.append(arc)
def _add_output_arc(self, arc):
arc.bind(self._net)
self._outputs.append(arc)
def add_output_arc(self, arc):
if arc.source is not self:
raise OutputArcError("{!r}: cannot add {!r}: invalid source".format(self, arc))
self._add_output_arc(arc)
def add_input_arc(self, arc):
if arc.target is not self:
raise InputArcError("{!r}: cannot add {!r}: invalid target".format(self, arc))
self._add_input_arc(arc)
@abc.abstractmethod
def make_connected_node(self, node):
raise NotImplementedError()
def add_input(self, source, annotation):
arc = self.create_input_arc(source, annotation)
source = arc.source
self._add_input_arc(arc)
source.add_output_arc(arc)
return arc
def add_output(self, target, annotation):
arc = self.create_output_arc(target, annotation)
target = arc.target
self._add_output_arc(arc)
target.add_input_arc(arc)
return arc
def create_input_arc(self, source, annotation):
source = self.make_connected_node(source)
if isinstance(source, BasePlace):
arc_class = InputArc
else:
arc_class = OutputArc
return arc_class(source=source, target=self, annotation=annotation)
def create_output_arc(self, target, annotation):
target = self.make_connected_node(target)
if isinstance(target, BasePlace):
arc_class = OutputArc
else:
arc_class = InputArc
return arc_class(source=self, target=target, annotation=annotation)
def input_arcs(self):
yield from self._inputs
def output_arcs(self):
yield from self._outputs
def inputs(self):
return SequenceView((arc.source for arc in self._inputs), length=len(self._inputs))
def outputs(self):
return SequenceView((arc.target for arc in self._outputs), length=len(self._outputs))
def notify_input_changed(self):
pass
def label(self):
return self.name
class BasePlace(Node):
def make_connected_node(self, node):
if isinstance(node, str):
return self._net.transition(node)
else:
if not isinstance(node, BaseTransition):
raise NodeError("{!r}: cannot connect to {!r}".format(self, node))
return node
class BaseTransition(Node):
def make_connected_node(self, node):
if isinstance(node, str):
return self._net.place(node)
else:
if not isinstance(node, BasePlace):
raise NodeError("{!r}: cannot connect to {!r}".format(self, node))
return node
class Arc(NetElement, metaclass=abc.ABCMeta):
__annotation_class__ = None
__source_node_class__ = None
__target_node_class__ = None
def __init__(self, source, target, annotation, *, net=None):
super().__init__(net=net)
if not isinstance(source, self.__source_node_class__):
raise TypeError("invalid source {!r} for {}: type is not {}".format(
source, self.__class__.__name__, self.__source_node_class__))
self._source = source
if not isinstance(target, self.__target_node_class__):
raise TypeError("invalid target {!r} for {}: type is not {}".format(
target, self.__class__.__name__, self.__target_node_class__))
self._target = target
if not isinstance(annotation, self.__annotation_class__):
raise TypeError("invalid arc annotation {!r} for {}: type is not {}".format(
annotation, self.__class__.__name__, self.__annotation_class__))
self._annotation = annotation
self._annotation.bind(self._net)
def bind(self, net):
super().bind(net)
self._annotation.bind(net)
@property
def source(self):
return self._source
@property
def target(self):
return self._target
@property
def annotation(self):
return self._annotation
def __repr__(self):
args = [repr(self._source), repr(self._target)]
if self._annotation is not None:
args.append(repr(self._annotation))
return "{}({})".format(
self.__class__.__name__, ', '.join(args))
def label(self):
return self._annotation.label()
class InputArc(Arc):
__annotation_class__ = InputAnnotation
__source_node_class__ = BasePlace
__target_node_class__ = BaseTransition
def filter_substitutions(self):
return self._annotation.filter_substitutions(self.source.tokens)
def remove_substitution(self, substitution):
return self._annotation.remove_substitution(self.source.tokens, substitution)
class OutputArc(Arc):
__annotation_class__ = OutputAnnotation
__source_node_class__ = BaseTransition
__target_node_class__ = BasePlace
def produce_token(self, substitution):
return self._annotation.produce_token(substitution)
def add_token(self, token):
self._annotation.add_token(self.target.tokens, token)
| {
"repo_name": "simone-campagna/petra",
"path": "petra/node.py",
"copies": "1",
"size": "6061",
"license": "apache-2.0",
"hash": 1848341055591178000,
"line_mean": 29.9234693878,
"line_max": 93,
"alpha_frac": 0.6069955453,
"autogenerated": false,
"ratio": 4.089743589743589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.519673913504359,
"avg_score": null,
"num_lines": null
} |
__all__ = []
__private__ = [ # anything not in __all__ must be in __private__
'BadPermutationError', 'block_perm_and_perms_within_blocks',
'check_permutation', 'compose_permutations', 'concatenate_permutations',
'full_block_perm', 'invert_permutation',
'permutation_from_block_permutations', 'permutation_from_disjoint_cycles',
'permutation_to_block_permutations', 'permutation_to_disjoint_cycles',
'permute']
class BadPermutationError(ValueError):
"""Can be raised to signal that a permutation does not pass the
:py:func:check_permutation test."""
pass
def check_permutation(permutation):
"""Verify that a tuple of permutation image points ``(sigma(1), sigma(2),
..., sigma(n))`` is a valid permutation, i.e. each number from 0 and n-1
occurs exactly once. I.e. the following **set**-equality must hold:
``{sigma(1), sigma(2), ..., sigma(n)} == {0, 1, 2, ... n-1}``
:param permutation: Tuple of permutation image points
:type permutation: tuple
:rtype: bool
"""
return list(sorted(permutation)) == list(range(len(permutation)))
def invert_permutation(permutation):
"""Compute the image tuple of the inverse permutation.
:param permutation: A valid (cf. :py:func:check_permutation) permutation.
:return: The inverse permutation tuple
:rtype: tuple
"""
return tuple([permutation.index(p) for p in range(len(permutation))])
def permutation_to_disjoint_cycles(permutation):
"""Any permutation sigma can be represented as a product of cycles.
A cycle (c_1, .. c_n) is a closed sequence of indices such that
sigma(c_1) == c_2, sigma(c_2) == sigma^2(c_1)== c_3, ..., sigma(c_(n-1)) == c_n, sigma(c_n) == c_1
Any single length-n cycle admits n equivalent representations in
correspondence with which element one defines as c_1.
(0,1,2) == (1,2,0) == (2,0,1)
A decomposition into *disjoint* cycles can be made unique, by requiring
that the cycles are sorted by their smallest element, which is also the
left-most element of each cycle. Note that permutations generated by
disjoint cycles commute. E.g.,
(1, 0, 3, 2) == ((1,0),(3,2)) --> ((0,1),(2,3)) normal form
:param permutation: A valid permutation image tuple
:type permutation: tuple
:return: A list of disjoint cycles, that when comb
:rtype: list
:raise: BadPermutationError
"""
if not check_permutation(permutation):
raise BadPermutationError('Malformed permutation %r' % permutation)
p_index = 0
current_cycle = [0]
# keep track of all remaining/unvisited indices
permutation_nums = list(range(1,len(permutation)))
cycles = []
while True:
# find next image point in cycle
p_index = permutation[p_index]
# if back at start of cycle
if p_index == current_cycle[0]:
# store cycle
cycles.append(current_cycle)
try:
# retrieve the next lowest un-used image point
p_index = permutation_nums.pop(0)
current_cycle = [p_index]
except IndexError:
break
else:
permutation_nums.remove(p_index)
current_cycle.append(p_index)
return cycles
def permutation_from_disjoint_cycles(cycles, offset=0):
"""Reconstruct a permutation image tuple from a list of disjoint cycles
:param cycles: sequence of disjoint cycles
:type cycles: list or tuple
:param offset: Offset to subtract from the resulting permutation image points
:type offset: int
:return: permutation image tuple
:rtype: tuple
"""
perm_length = sum(map(len, cycles))
res_perm = list(range(perm_length))
for c in cycles:
p1 = c[0] - offset
for p2 in c[1:]:
p2 = p2 - offset
res_perm[p1] = p2
p1 = p2
res_perm[p1] = c[0] - offset #close cycle
assert sorted(res_perm) == list(range(perm_length))
return tuple(res_perm)
def permutation_to_block_permutations(permutation):
"""If possible, decompose a permutation into a sequence of permutations
each acting on individual ranges of the full range of indices.
E.g.
``(1,2,0,3,5,4) --> (1,2,0) [+] (0,2,1)``
:param permutation: A valid permutation image tuple ``s = (s_0,...s_n)`` with ``n > 0``
:type permutation: tuple
:return: A list of permutation tuples ``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]`` such that ``s = t [+] u [+] ... [+] z``
:rtype: list of tuples
:raise: ValueError
"""
if len(permutation) == 0 or not check_permutation(permutation):
raise BadPermutationError()
cycles = permutation_to_disjoint_cycles(permutation)
if len(cycles) == 1:
return (permutation,)
current_block_start = cycles[0][0]
current_block_end = max(cycles[0])
current_block_cycles = [cycles[0]]
res_permutations = []
for c in cycles[1:]:
if c[0] > current_block_end:
res_permutations.append(permutation_from_disjoint_cycles(current_block_cycles, current_block_start))
assert sum(map(len, current_block_cycles)) == current_block_end - current_block_start + 1
current_block_start = c[0]
current_block_end = max(c)
current_block_cycles = [c]
else:
current_block_cycles.append(c)
if max(c) > current_block_end:
current_block_end = max(c)
res_permutations.append(permutation_from_disjoint_cycles(current_block_cycles, current_block_start))
assert sum(map(len, current_block_cycles)) == current_block_end - current_block_start + 1
assert sum(map(len, res_permutations)) == len(permutation)
return res_permutations
def permutation_from_block_permutations(permutations):
"""Reverse operation to :py:func:`permutation_to_block_permutations`
Compute the concatenation of permutations
``(1,2,0) [+] (0,2,1) --> (1,2,0,3,5,4)``
:param permutations: A list of permutation tuples
``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]``
:type permutations: list of tuples
:return: permutation image tuple
``s = t [+] u [+] ... [+] z``
:rtype: tuple
"""
offset = 0
new_perm = []
for p in permutations:
new_perm[offset: offset +len(p)] = [p_i + offset for p_i in p]
offset += len(p)
return tuple(new_perm)
def compose_permutations(alpha, beta):
r"""Find the composite permutation
.. math::
\sigma := \alpha \cdot \beta \\
\Leftrightarrow \sigma(j) = \alpha\left(\beta(j)\right) \\
:param a: first permutation image tuple
:type alpha: tuple
:param beta: second permutation image tuple
:type beta: tuple
:return: permutation image tuple of the composition.
:rtype: tuple
"""
return permute(alpha, beta)
#TODO remove redundant function concatenate_permutations
def concatenate_permutations(a, b):
"""Concatenate two permutations:
s = a [+] b
:param a: first permutation image tuple
:type a: tuple
:param b: second permutation image tuple
:type b: tuple
:return: permutation image tuple of the concatenation.
:rtype: tuple
"""
return permutation_from_block_permutations([a, b])
def permute(sequence, permutation):
"""Apply a permutation sigma({j}) to an arbitrary sequence.
:param sequence: Any finite length sequence ``[l_1,l_2,...l_n]``. If it is a list, tuple or str, the return type will be the same.
:param permutation: permutation image tuple
:type permutation: tuple
:return: The permuted sequence ``[l_sigma(1), l_sigma(2), ..., l_sigma(n)]``
:raise: BadPermutationError or ValueError
"""
if len(sequence) != len(permutation):
raise ValueError((sequence, permutation))
if not check_permutation(permutation):
raise BadPermutationError(str(permutation))
if type(sequence) in (list, tuple, str):
constructor = type(sequence)
else:
constructor = list
return constructor((sequence[p] for p in permutation))
def full_block_perm(block_permutation, block_structure):
"""
Extend a permutation of blocks to a permutation for the internal signals of all blocks.
E.g., say we have two blocks of sizes ('block structure') ``(2, 3)``,
then a block permutation that switches the blocks would be given by the image tuple ``(1,0)``.
However, to get a permutation of all 2+3 = 5 channels that realizes that block permutation we would need
``(2, 3, 4, 0, 1)``
:param block_permutation: permutation image tuple of block indices
:type block_permutation: tuple
:param block_structure: The block channel dimensions, block structure
:type block_structure: tuple
:return: A single permutation for all channels of all blocks.
:rtype: tuple
"""
fblockp = []
bp_inv = invert_permutation(block_permutation)
for k, block_length in enumerate(block_structure):
p_k = block_permutation[k]
offset = sum([block_structure[bp_inv[j]] for j in range(p_k)])
fblockp += range(offset, offset + block_length)
assert sorted(fblockp) == list(range(sum(block_structure)))
return tuple(fblockp)
def block_perm_and_perms_within_blocks(permutation, block_structure):
"""Decompose a permutation into a block permutation and into permutations
acting within each block.
:param permutation: The overall permutation to be factored.
:type permutation: tuple
:param block_structure: The channel dimensions of the blocks
:type block_structure: tuple
:return: ``(block_permutation, permutations_within_blocks)``
Where ``block_permutations`` is an image tuple for a permutation of the block indices
and ``permutations_within_blocks`` is a list of image tuples for the permutations of the channels
within each block
:rtype: tuple
"""
nblocks = len(block_structure)
offsets = [sum(block_structure[:k]) for k in range(nblocks)]
images = [permutation[offset: offset + length] for (offset, length) in zip(offsets, block_structure)]
images_mins = list(map(min, images))
key_block_perm_inv = lambda block_index: images_mins[block_index]
block_perm_inv = tuple(sorted(range(nblocks), key = key_block_perm_inv))
# print(images_mins)
# print(permutation, block_structure, "-->", block_perm, invert_permutation(block_perm))
block_perm = invert_permutation(block_perm_inv)
assert images_mins[block_perm_inv[0]] == min(images_mins)
assert images_mins[block_perm_inv[-1]] == max(images_mins)
# block_perm = tuple(invert_permutation(block_perm_inv))
perms_within_blocks = []
for (offset, length, image) in zip(offsets, block_structure, images):
block_key = lambda elt_index: image[elt_index]
within_inv = sorted(range(length), key = block_key)
within = invert_permutation(tuple(within_inv))
assert permutation[within_inv[0] + offset] == min(image)
assert permutation[within_inv[-1] + offset] == max(image)
perms_within_blocks.append(within)
return block_perm, perms_within_blocks
| {
"repo_name": "mabuchilab/QNET",
"path": "src/qnet/utils/permutations.py",
"copies": "1",
"size": "11312",
"license": "mit",
"hash": 6770833422324667000,
"line_mean": 35.2564102564,
"line_max": 150,
"alpha_frac": 0.6440063649,
"autogenerated": false,
"ratio": 3.7870773351188483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4931083700018848,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'ReleaseRepo',
'PodInstruction',
'SimpleInstruction',
'execute_instructions',
'get_git_stamp',
'get_hg_stamp',
]
from pathlib import Path
import datetime
import logging
import tempfile
import yaml
from foreman import Label
from garage import scripts
from garage.assertions import ASSERT
import shipyard
LOG = logging.getLogger(__name__)
class ReleaseRepo:
@staticmethod
def get_instruction_path(root, kind, label, version):
# Don's use with_suffix('.yaml') because version may contain
# dots, e.g., "1.0.3".
return root / kind / label.path / label.name / (version + '.yaml')
@staticmethod
def detect_instruction_path(root, label, version):
paths = {}
for kind in ('pods', 'volumes'):
path = ReleaseRepo.get_instruction_path(root, kind, label, version)
if path.exists():
paths[kind] = path
if not paths:
raise FileNotFoundError(
'expect instructions under: %s %s %s' % (root, label, version))
if len(paths) > 1:
raise RuntimeError(
'expect unique instruction: %s' % sorted(paths.values()))
return paths.popitem()
def __init__(self, release_root, rules):
self.root = scripts.ensure_path(release_root)
self.rules = rules
def load_instructions(self, labels_versions):
data_list = []
rule_list = []
for label, version in labels_versions:
LOG.info('load release %s@%s', label, version)
_, path = self.detect_instruction_path(self.root, label, version)
data = yaml.load(path.read_text())
rule = data.get('rule')
if not rule:
raise ValueError('instruction does not specify rule')
rule_list.append(Label.parse(rule, implicit_path=label.path))
data_list.append(data)
self.rules.load_from_labels(rule_list)
return [
self._make_instruction(data_list[i], rule_list[i], label, version)
for i, (label, version) in enumerate(labels_versions)
]
def load_instruction_files(self, paths):
blobs = []
build_ids = set()
for path in paths:
LOG.info('load release instruction: %s', path)
path = self._check_path(path)
data = yaml.load(path.read_text())
rule, pod, version = self._parse_rpv(path, data)
blobs.append((data, rule, pod, version))
# You should not build the same pod twice
build_id = (pod, version)
if build_id in build_ids:
raise ValueError(
'duplicated instruction: %s@%s' % (pod, version))
build_ids.add(build_id)
self.rules.load_from_labels(rule for _, rule, _, _ in blobs)
return [self._make_instruction(*blob) for blob in blobs]
def _make_instruction(self, data, rule, pod, version):
# Check if instruction file overwrites pod and version.
pod = data.get('pod', pod)
version = data.get('version', version)
parameters = [
(Label.parse(label, implicit_path=rule.path), value)
for label, value in sorted(data.get('parameters', {}).items())
]
parameters.sort()
rule_type = self.rules.get_rule(rule).annotations.get('rule-type')
if rule_type == 'build_pod':
return self._make_pod_instruction(
data,
rule, pod, version,
parameters,
)
elif rule_type == 'build_volume':
return SimpleInstruction(
kind='volumes',
rule=rule, target=pod, version=version,
parameters=parameters,
)
else:
# FIXME: This is probably confusing: Although this is not a
# pod, we still put it to `pods` directory. We do this just
# because it is convenient, not because it is right.
return SimpleInstruction(
kind='pods',
rule=rule, target=pod, version=version,
parameters=parameters,
)
def _make_pod_instruction(self, data, rule, pod, version, parameters):
self._check_pod(rule, pod)
pod_parameter = self._get_pod_parameter(rule)
build_image_rules = shipyard.get_build_image_rules(
self.rules,
self.rules.get_rule(rule),
)
build_volume_rules = shipyard.get_build_volume_rules(
self.rules,
self.rules.get_rule(rule),
)
parse_label = lambda l: Label.parse(l, implicit_path=rule.path)
instruction = PodInstruction(
rule=rule,
pod=pod,
version=version,
images={
parse_label(label): version
for label, version in data.get('images', {}).items()
},
image_rules={}, # Set it in _add_default_images().
volumes={
parse_label(label): version
for label, version in data.get('volumes', {}).items()
},
volume_mapping={
parse_label(l1): parse_label(l2)
for l1, l2 in pod_parameter.default['volume_mapping']
},
volume_rules={}, # Set in _add_volume_rules().
parameters=parameters,
)
self._add_default_images(instruction, build_image_rules)
self._add_default_volumes(instruction, build_image_rules)
self._add_volume_rules(instruction, build_volume_rules)
return instruction
def _check_path(self, path):
path = scripts.ensure_path(path)
if path.exists():
if not path.is_absolute():
path = path.resolve()
else:
# I guess it's a path relative to `pods` directory?
path = scripts.ensure_file(self.root / 'pods' / path)
if path.suffix != '.yaml':
LOG.warning('expect file suffix to be .yaml: %s', path)
return path
def _parse_rpv(self, path, data):
"""Parse rule, pod, and version."""
try:
relpath = path.relative_to(self.root)
except ValueError:
inferred_pod = None
inferred_version = None
else:
# relpath should be like:
# pods/LABEL_PATH/POD_NAME/VERSION.yaml
LOG.debug('try to infer instruction data from %s', relpath)
parts = relpath.parts
if parts[0] != 'pods':
raise ValueError('invalid relative path: %s' % relpath)
inferred_pod = '//%s:%s' % ('/'.join(parts[1:-2]), parts[-2])
inferred_version = relpath.stem
pod = data.get('pod', inferred_pod)
if not pod:
raise ValueError('instruction does not specify pod')
if inferred_pod and inferred_pod != pod:
LOG.warning('actual pod differs from the inferred: %s != %s',
pod, inferred_pod)
pod = Label.parse(pod)
version = data.get('version', inferred_version)
if not version:
raise ValueError('instruction does not specify version')
if inferred_version is not None and inferred_version != version:
LOG.warning('actual version differs from the inferred: %s != %s',
version, inferred_version)
rule = data.get('rule')
if not rule:
raise ValueError('instruction does not specify rule')
rule = Label.parse(rule, implicit_path=pod.path)
return rule, pod, version
def _get_pod_parameter(self, rule):
pod = self.rules.get_rule(rule)
pod = self.rules.get_parameter(
pod.annotations['pod-parameter'],
implicit_path=pod.label.path,
)
return pod
def _check_pod(self, rule, pod):
pod2 = self.rules.get_pod_name(self.rules.get_rule(rule))
if pod2 != pod:
fmt = 'pod from build file differs from instruction: %s != %s'
raise ValueError(fmt % (pod2, pod))
def _add_default_images(self, instruction, build_image_rules):
for rule in build_image_rules:
image = self.rules.get_parameter(
rule.annotations['image-parameter'],
implicit_path=rule.label.path,
)
image = Label.parse_name(rule.label.path, image.default['name'])
instruction.images.setdefault(image, instruction.version)
instruction.image_rules[image] = rule.label
def _add_default_volumes(self, instruction, build_image_rules):
for rule in build_image_rules:
specify_app_rule = shipyard.get_specify_app_rule(
self.rules,
shipyard.get_specify_image_rule(
self.rules,
rule,
),
)
app = self.rules.get_parameter(
specify_app_rule.annotations['app-parameter'],
implicit_path=specify_app_rule.label.path,
)
# Include only volumes that provide `data` path.
for volume in app.default['volumes']:
if not volume.get('data'):
continue
instruction.volumes.setdefault(
Label.parse_name(app.label.path, volume['name']),
instruction.version,
)
def _add_volume_rules(self, instruction, build_volume_rules):
for rule in build_volume_rules:
volume = self.rules.get_parameter(
rule.annotations['volume-parameter'],
implicit_path=rule.label.path,
)
volume = Label.parse_name(rule.label.path, volume.default['name'])
instruction.volume_rules[volume] = rule.label
def execute_instructions(instructions, repo, builder, input_roots):
for instruction in instructions:
LOG.info('execute release instruction: %s', instruction)
if not instruction.execute(repo, builder, input_roots):
return False # Fail early.
return True
class PodInstruction:
"""Release instruction of pods.
It tracks extra info for building images, etc.
"""
def __init__(self, **kwargs):
# Pod build rule.
self.rule = ASSERT.type_of(kwargs.pop('rule'), Label)
# Label that refers to the pod (not pod build rule).
self.pod = ASSERT.type_of(kwargs.pop('pod'), Label)
# Pod version.
self.version = kwargs.pop('version')
# Map image label to version.
self.images = kwargs.pop('images')
# Buile rules of the images.
self.image_rules = kwargs.pop('image_rules')
# Map volume label to version.
self.volumes = kwargs.pop('volumes')
self.volume_mapping = kwargs.pop('volume_mapping')
self.volume_rules = kwargs.pop('volume_rules')
self.parameters = kwargs.pop('parameters')
if kwargs:
raise ValueError('unknown names: %s' % ', '.join(sorted(kwargs)))
def __str__(self):
return '%s@%s' % (self.pod, self.version)
def execute(self, repo, builder, input_roots):
build_name = 'build-%d' % datetime.datetime.now().timestamp()
# Skip building pod if it is present.
if self._get_pod_path(repo).exists():
LOG.info('skip building pod: %s@%s', self.pod, self.version)
return True
# Build images that are not present.
for image in sorted(self.images):
self._build_image(repo, builder, build_name, image, input_roots)
# Build volumes that are not present.
for volume in sorted(self.volumes):
self._build_volume(repo, builder, build_name, volume, input_roots)
# Finally we build the pod.
self._build_pod(repo, builder, build_name)
return True
def _get_mapped_to_volume_label(self, volume):
while volume in self.volume_mapping:
volume = self.volume_mapping[volume]
return volume
def _get_pod_path(self, repo):
return (repo.root / 'pods' /
self.pod.path / self.pod.name / self.version)
def _get_image_path(self, repo, image):
return (repo.root / 'images' /
image.path / image.name / self.images[image])
def _get_volume_path(self, repo, volume):
version = self.volumes[volume]
volume = self._get_mapped_to_volume_label(volume)
return repo.root / 'volumes' / volume.path / volume.name / version
def _build_image(self, repo, builder, build_name, image, input_roots):
image_lv = '%s@%s' % (image, self.images[image])
image_path = self._get_image_path(repo, image)
if image_path.exists():
LOG.info('skip building existed image: %s', image_lv)
return
image_uri = self._get_image_uri(repo, self.image_rules[image])
if image_uri:
LOG.info(
'skip building image because it is from registry: %s %s',
image_lv, image_uri,
)
return
LOG.info('build image %s -> %s', self.image_rules[image], image_lv)
version_label = self._get_version_label(repo, self.image_rules[image])
with tempfile.TemporaryDirectory() as build_dir:
args = [
'--build-name', build_name,
'--parameter', '%s=%s' % (version_label, self.version),
'--output', build_dir,
]
input_root, input_path = shipyard.find_input_path(
input_roots, 'image-data', image)
if input_root is not None:
LOG.info('use image data: %s %s', input_root, input_path)
args.extend(['--input', input_root, input_path])
add_parameters(args, self.parameters)
builder.build(self.image_rules[image], extra_args=args)
scripts.mkdir(image_path.parent)
scripts.cp(
Path(build_dir) / image.name, image_path,
recursive=True,
)
def _build_volume(self, repo, builder, build_name, original, input_roots):
volume = self._get_mapped_to_volume_label(original)
volume_lv = '%s@%s' % (volume, self.volumes[original])
volume_path = self._get_volume_path(repo, original)
if volume_path.exists():
LOG.info('skip building volume: %s', volume_lv)
return
LOG.info('build volume %s -> %s', self.volume_rules[volume], volume_lv)
version_label = self._get_version_label(
repo, self.volume_rules[volume])
tarball_filename = self._get_volume_tarball_filename(
repo, self.volume_rules[volume])
with tempfile.TemporaryDirectory() as build_dir:
args = [
'--build-name', build_name,
'--parameter', '%s=%s' % (version_label, self.version),
'--output', build_dir,
]
input_root, input_path = shipyard.find_input_path(
input_roots, 'volume-data', volume)
if input_root is not None:
LOG.info('use volume data: %s %s', input_root, input_path)
args.extend(['--input', input_root, input_path])
add_parameters(args, self.parameters)
builder.build(self.volume_rules[volume], extra_args=args)
scripts.mkdir(volume_path)
scripts.cp(Path(build_dir) / tarball_filename, volume_path)
def _build_pod(self, repo, builder, build_name):
LOG.info('build pod %s -> %s', self.rule, self)
version_label = self._get_version_label(repo, self.rule)
images_from_registry = frozenset(
image
for image in self.images
if self._get_image_uri(repo, self.image_rules[image])
)
with tempfile.TemporaryDirectory() as build_dir:
# Builder is running in a container and so symlinks won't
# work; to work around this, we copy files to build_dir (and
# for now all we need to copy is `image-name/sha512`).
for image in self.images:
if image in images_from_registry:
continue
image_path = self._get_image_path(repo, image)
scripts.mkdir(build_dir / image.name)
scripts.cp(image_path / 'sha512', build_dir / image.name)
builder.build(self.rule, extra_args=add_parameters(
[
'--build-name', build_name,
'--parameter', '%s=%s' % (version_label, self.version),
'--output', build_dir,
],
self.parameters,
))
# Undo the workaround.
for image in self.images:
if image in images_from_registry:
continue
scripts.rm(build_dir / image.name, recursive=True)
pod_path = self._get_pod_path(repo)
scripts.mkdir(pod_path.parent)
scripts.cp(build_dir, pod_path, recursive=True)
# Create symlink to images.
for image in self.images:
if image in images_from_registry:
continue
image_path = self._get_image_path(repo, image)
link_path = pod_path / image.name
if link_path.exists():
LOG.warning('refuse to overwrite: %s', link_path)
continue
scripts.symlink_relative(image_path, link_path)
# Create symlink to volumes.
for volume in self.volumes:
volume_path = self._get_volume_path(repo, volume)
link_path = pod_path / volume.name
if link_path.exists():
LOG.warning('refuse to overwrite: %s', link_path)
continue
scripts.symlink_relative(volume_path, link_path)
@staticmethod
def _get_version_label(repo, rule):
version_parameter = repo.rules.get_parameter(
repo.rules.get_rule(rule).annotations['version-parameter'],
implicit_path=rule.path,
)
return version_parameter.label
@staticmethod
def _get_image_uri(repo, image_rule_label):
image_rule = repo.rules.get_rule(image_rule_label)
image_parameter = repo.rules.get_parameter(
image_rule.annotations['image-parameter'],
implicit_path=image_rule.label.path,
)
return image_parameter.default['image_uri']
@staticmethod
def _get_volume_tarball_filename(repo, rule):
volume_parameter = repo.rules.get_parameter(
repo.rules.get_rule(rule).annotations['volume-parameter'],
implicit_path=rule.path,
)
return volume_parameter.default['tarball_filename']
class SimpleInstruction:
"""Release instruction of a single build rule."""
def __init__(self, *, kind, rule, target, version, parameters):
self.kind = kind
self.rule = rule
self.target = target
self.version = version
self.parameters = parameters
def __str__(self):
return '%s@%s' % (self.target, self.version)
def execute(self, repo, builder, input_roots):
LOG.info('build %s -> %s', self.rule, self)
build_name = 'build-%d' % datetime.datetime.now().timestamp()
output_path = (
repo.root / self.kind /
self.target.path / self.target.name / self.version
)
args = [
'--build-name', build_name,
'--output', output_path,
]
if self.kind == 'volumes':
input_root, input_path = shipyard.find_input_path(
input_roots, 'volume-data', self.target)
if input_root is not None:
LOG.info('use volume data: %s %s', input_root, input_path)
args.extend(['--input', input_root, input_path])
add_parameters(args, self.parameters)
builder.build(self.rule, extra_args=args)
return True
def get_git_stamp(path):
with scripts.directory(path):
cmd = ['git', 'remote', '--verbose']
remotes = scripts.execute(cmd, capture_stdout=True).stdout
for remote in remotes.decode('utf8').split('\n'):
remote = remote.split()
if remote[0] == 'origin':
url = remote[1]
break
else:
raise RuntimeError('no remote origin for %s' % path)
cmd = ['git', 'log', '-1', '--format=format:%H']
revision = scripts.execute(cmd, capture_stdout=True).stdout
revision = revision.decode('ascii').strip()
dirty = False
cmd = ['git', 'status', '--porcelain']
status = scripts.execute(cmd, capture_stdout=True).stdout
for status_line in status.decode('utf8').split('\n'):
# Be careful of empty line!
if status_line and not status_line.startswith(' '):
dirty = True
break
return url, revision, dirty
def get_hg_stamp(path):
with scripts.directory(path):
cmd = ['hg', 'path']
remotes = scripts.execute(cmd, capture_stdout=True).stdout
for remote in remotes.decode('utf8').split('\n'):
remote = remote.split()
if remote[0] == 'default':
ASSERT.equal(remote[1], '=')
url = remote[2]
break
else:
raise RuntimeError('no default remote for %s' % path)
cmd = ['hg', 'log', '--limit', '1', '--template', '{node}']
revision = scripts.execute(cmd, capture_stdout=True).stdout
revision = revision.decode('ascii').strip()
cmd = ['hg', 'status']
dirty = scripts.execute(cmd, capture_stdout=True).stdout
dirty = bool(dirty.strip())
return url, revision, dirty
def add_parameters(extra_args, parameters):
for label, value in parameters:
extra_args.append('--parameter')
extra_args.append('%s=%s' % (label, value))
return extra_args
| {
"repo_name": "clchiou/garage",
"path": "shipyard/shipyard/release.py",
"copies": "1",
"size": "22372",
"license": "mit",
"hash": -4448393955289626000,
"line_mean": 34.4548335975,
"line_max": 79,
"alpha_frac": 0.5573484713,
"autogenerated": false,
"ratio": 4.1087235996326905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.516607207093269,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'OADao',
'OADbTransaction'
]
import binascii
import os
import psycopg2
import psycopg2.extras
import psycopg2.extensions
from textwrap import dedent as td
from ._env import oactx, oalog
from openarc.exception import OAGraphStorageError
## Exportable classes
class OADao(object):
"""Wrapper around psycopg2 with additional functionality
for logging, connection management and sql execution"""
def __init__(self, schema, cdict=True, trans_commit_hold=False):
"""Schema refers to the api entity we're referring
to: auth, trading etc"""
self.cdict = cdict
self.dbconn = oactx.db_conn
self.schema = schema
self.trans_depth = 1
self._cursor = None
self._trans_commit_hold = trans_commit_hold
self.__enter__()
##################################################
# OADaos should not be used in ctx, but whatever #
##################################################
def __enter__(self): #
return self #
#
def __exit__(self, exc, value, traceback): #
if not self._trans_commit_hold: #
self.cur_finalize(exc) #
##################################################
def commit(self):
"""Proxy method for committing dbconnection actions"""
self.dbconn.commit()
def rollback(self):
"""Proxy method for rolling back any existing action"""
self.dbconn.rollback()
@property
def cur(self):
if not self._cursor:
if self.cdict:
self._cursor = self.dbconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
else:
self._cursor = self.dbconn.cursor()
self._cursor.execute("SET search_path TO %s", [self.schema])
return self._cursor
def cur_finalize(self, exc):
if exc:
self.rollback()
else:
self.commit()
self._cursor = None
@property
def description(self):
return self.cur.description
def execute(self, query, params=[], savepoint=False, cdict=True, extcur=None):
results = None
cur = self.cur
if cdict != self.cdict:
cur = self.dbconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("SET search_path TO %s", [self.schema])
if type(extcur)==list:
while len(extcur)>0:
extcur.pop()
extcur.append(cur)
if savepoint:
savepoint_name = 'sp_'+binascii.hexlify(os.urandom(7)).decode('utf-8')
oalog.debug(f"Initializing savepoint [{savepoint_name}]", f='sql')
cur.execute("SAVEPOINT %s" % savepoint_name)
oalog.debug(f"{td(cur.mogrify(query, params).decode('utf-8'))}", f='sql')
try:
try:
cur.execute(query, params)
except Exception as e:
raise OAGraphStorageError(str(e), e)
try:
results = cur.fetchall()
except:
pass
except:
if savepoint:
oalog.debug(f"Rolling back to savepoint {savepoint_name}", f='sql')
cur.execute("ROLLBACK TO SAVEPOINT %s" % savepoint_name)
if not self._trans_commit_hold:
self.rollback()
raise
else:
if not self._trans_commit_hold:
self.commit()
return results
@property
def isolation(self):
return self._isolation_level
@isolation.setter
def isolation(self, value):
self.dbconn.set_isolation_level(value)
class Isolation(object):
READCMT = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
READRPT = psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ
SERIAL = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE
class OADbTransaction(object):
"""Manipulates the global dbconn object so that all OAGs see the same
cursor. This is the functional equivalent of a semantic transaction. Captures
non-OAG database transactions, but only as an unintended side effect."""
def __init__(self, transname):
self.dao = oactx.db_txndao
def __enter__(self):
if not self.dao:
oactx.db_txndao = OADao("openarc", trans_commit_hold=True)
self.dao = oactx.db_txndao
self.dao.cur
self.dao.trans_depth += 1
return self
def __exit__(self, exc, value, traceback):
self.dao.trans_depth -= 1
if self.dao.trans_depth == 1:
self.dao.cur_finalize(exc)
oactx.db_txndao = None
self.dao = None
| {
"repo_name": "kchoudhu/openarc",
"path": "openarc/_dao.py",
"copies": "1",
"size": "4840",
"license": "bsd-3-clause",
"hash": 2727870853440922000,
"line_mean": 31.4832214765,
"line_max": 96,
"alpha_frac": 0.5495867769,
"autogenerated": false,
"ratio": 4.1261722080136405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005019428733269208,
"num_lines": 149
} |
__all__ = [
'OAG_RootNode',
'OAG_RootD',
'OAG_RpcDiscoverable'
]
import attrdict
import datetime
import hashlib
import inflection
import inspect
import os
import signal
import socket
import sys
from ._db import *
from ._env import *
from ._rdf import *
from ._rpc import reqcls, RpcTransaction, RpcProxy, RestProxy, RpcACL
from ._util import oagprop, staticproperty
from openarc.exception import *
from openarc.time import *
class OAG_RootNode(object):
##### Class variables
_fkframe = []
##### Proxies
@property
def cache(self):
return self._cache_proxy
@property
def db(self):
return self._db_proxy
@property
def rdf(self):
return self._rdf_proxy
@property
def props(self):
return self._prop_proxy
@property
def rpc(self):
return self._rpc_proxy
@property
def REST(self):
return self._rest_proxy
##### User defined via inheritance
@staticproperty
def context(cls):
raise NotImplementedError("Must be implemented in deriving OAGraph class")
@staticproperty
def dbindices(cls): return {}
@staticproperty
def dblocalsql(cls): return {}
@staticproperty
def infname_fields(cls):
"""Override in deriving classes as necessary"""
return sorted([k for k, v in cls.streams.items()])
@staticproperty
def is_unique(cls): return False
@staticproperty
def nonstream_deps(cls): return []
@staticproperty
def restapi(cls): return {}
@staticproperty
def streamable(cls): return True
@staticproperty
def streams(cls):
raise NotImplementedError("Must be implemented in deriving OAGraph class")
##### Derivative fields
@staticproperty
def dbpkname(cls): return "_%s_id" % cls.dbtable
@staticproperty
def dbtable(cls):
ca_prop = getattr(cls, '_dbtable_name', ())
if not ca_prop or (len(ca_prop)>0 and ca_prop[0]!=cls):
db_table_name = inflection.underscore(cls.__name__)[4:]
setattr(cls, '_dbtable_name', (cls, db_table_name))
if not cls.is_reversible:
raise OAError("This table name isn't reversible: [%s]" % cls.__name__)
return cls._dbtable_name[1]
@classmethod
def is_oagnode(cls, stream):
try:
streaminfo = cls.streams[stream][0]
if type(streaminfo).__name__=='type':
return 'OAG_RootNode' in [x.__name__ for x in inspect.getmro(streaminfo)]
else:
return False
except KeyError:
return False
@classmethod
def is_scalar(cls, stream):
try:
return type(cls.streams[stream][0])==str
except KeyError:
# Stay with me here: if there's a key error, you've either fed this
# function junk data, or an internal member beginning with '_'; return
# True.
return True
@classmethod
def is_enum(cls, stream):
try:
return cls.is_scalar(stream)==False and cls.is_oagnode(stream)==False
except KeyError:
return False
@staticproperty
def is_reversible(cls):
ca_prop = getattr(cls, '_is_reversible', ())
if not ca_prop or (len(ca_prop)>0 and ca_prop[0]!=cls):
reverse_class_name = "OAG_"+inflection.camelize(cls.dbtable)
setattr(cls, '_is_reversible', (cls, reverse_class_name == cls.__name__))
return cls._is_reversible[1]
@staticproperty
def stream_db_mapping(cls):
ca_prop = getattr(cls, '_stream_db_mapping', ())
if not ca_prop or (len(ca_prop)>0 and ca_prop[0]!=cls):
schema = {}
for stream, streaminfo in cls.streams.items():
if cls.is_oagnode(stream):
schema[stream] = streaminfo[0].dbpkname[1:]+'_'+stream
else:
schema[stream] = stream
setattr(cls, '_stream_db_mapping', (cls, schema))
return cls._stream_db_mapping[1]
@staticproperty
def db_stream_mapping(cls):
ca_prop = getattr(cls, '_db_stream_mapping', ())
if not ca_prop or (len(ca_prop)>0 and ca_prop[0]!=cls):
setattr(cls, '_db_stream_mapping', (cls, {cls.stream_db_mapping[k]:k for k in cls.stream_db_mapping}))
return cls._db_stream_mapping[1]
##### User API
@property
def id(self):
try:
return self.props._cframe[self.dbpkname]
except:
return None
@property
def infname(self):
if len(self.props._cframe)==0:
raise OAError("Cannot calculate infname if OAG attributes have not set")
hashstr = str()
for stream in self.infname_fields:
node = getattr(self, stream, None)
hashstr += node.infname if self.is_oagnode(stream) and node else str(node)
return hashlib.sha256(hashstr.encode('utf-8')).hexdigest()
@property
def infname_semantic(self):
if None in [self.db.searchidx, self.db.searchprms]:
raise OAError("Cannot calculate infname_semantic if search parameters are not initialized")
hashstr = str()
hashstr += self.context
hashstr += self.__class__.__name__
hashstr += self.db.searchidx
for searchprm in self.db.searchprms:
hashstr += str(searchprm)
return hashlib.sha256(hashstr.encode('utf-8')).hexdigest()
@property
def is_materialized(self):
"""Has been persisted to the database"""
return self.id is not None
def clone(self):
oagcopy = self.__class__()
oagcopy._iteridx = 0
# Clone proxies
oagcopy.rdf.clone(self)
oagcopy.db.clone(self)
oagcopy.props.clone(self)
if oagcopy.is_unique:
oagcopy.props._set_attrs_from_cframe_uniq()
return oagcopy
def reset(self, idxreset=True):
self.rdf._rdf_window = self.rdf._rdf
if idxreset:
self._iteridx = 0
self.props._set_attrs_from_cframe()
return self
@property
def size(self):
if self.rdf._rdf_window is None:
return 0
else:
return len(self.rdf._rdf_window)
@property
def url(self):
return self.rpc.url
##### Stream attributes
##### Internals
def __del__(self):
# If the table isn't reversible, OAG would never have been created
if not self.is_reversible:
return
oalog.debug(f"GC=========>", f='gc')
oalog.debug("Deleting {} {}, {}, proxy: {}".format(
self,
self.rpc.id if self.rpc.is_enabled else str(),
self.rpc.url if self.rpc.is_enabled else str(),
self.rpc.is_proxy
), f='gc')
if self.rpc.is_enabled:
# Tell upstream proxies that we are going away
if self.rpc.is_proxy:
oalog.debug(f"Delete: proxies", f='gc')
oalog.debug(f"--> {self.rpc.proxied_url}", f='gc')
oactx.rm_ka_via_rpc(self.rpc.url, self.rpc.proxied_url, 'proxy')
# Tell upstream registrations that we are going away
oalog.debug(f"Delete: registrations", f='gc')
oalog.debug(f"--> {self.rpc.registrations}", f='gc')
# Tell subnodes we are going away
oalog.debug(f"Delete cache", f='gc')
oalog.debug(f"--> {self.cache.state}", f='gc')
self.cache.clear()
oalog.debug(f"Delete: queue size", f='gc')
oalog.debug(f"--> {oactx.rm_queue_size}", f='gc')
# print("Delete: stop router")
# self.rpc._glets[0].kill()
oalog.debug("<=========GC", f='gc')
def __enter__(self):
self.rpc.discoverable = True
return self
def __exit__(self, type, value, traceback):
self.rpc.discoverable = False
def __getattribute__(self, attr):
"""Cascade through the following lookups:
1. Attempt a lookup via the prop proxy
2. Attempt to retrieve via RPC if applicable.
3. Attempt a regular attribute lookup.
Failure at each step is denoted by the generation of an AttributeError"""
try:
props = object.__getattribute__(self, '_prop_proxy')
return props.get(attr, internal_call=True)
except AttributeError as e:
pass
try:
if object.__getattribute__(self, 'is_proxy'):
rpc = object.__getattribute__(self, '_rpc_proxy')
if attr in rpc.proxied_streams:
oalog.debug(f"[{rpc.id}] proxying request for [{attr}] to [{rpc.proxied_url}]", f='rpc')
payload = reqcls(self).getstream(rpc.proxied_url, attr)['payload']
if payload['value']:
if payload['type'] == 'redirect':
for cls in OAG_RootNode.__subclasses__():
if cls.__name__==payload['class']:
return cls(initurl=payload['value'])
else:
return payload['value']
else:
raise AttributeError("[%s] does not exist at [%s]" % (attr, rpc.proxied_url))
except AttributeError:
pass
return object.__getattribute__(self, attr)
def __getitem__(self, indexinfo, preserve_cache=False):
self.rdf._rdf_window_index = indexinfo
if self.is_unique:
raise OAError("Cannot index OAG that is marked unique")
if not preserve_cache and self._iteridx != self.rdf._rdf_window_index:
self.cache.clear()
if type(self.rdf._rdf_window_index)==int:
self.props._cframe = self.rdf._rdf_window[self.rdf._rdf_window_index]
elif type(self.rdf._rdf_window_index)==slice:
self.rdf._rdf_window = self.rdf._rdf_window[self.rdf._rdf_window_index]
self.props._cframe = self.rdf._rdf_window[0]
self.props._set_attrs_from_cframe()
return self
@classmethod
def __graphsubclasses__(cls):
subclasses = cls.__subclasses__()
for subclass in cls.__subclasses__():
subclasses += subclass.__graphsubclasses__()
return subclasses
def __init__(
self,
# Implied positional args
searchprms=[],
searchidx='id',
searchwin=None,
searchoffset=None,
searchdesc=False,
# Actual Named args
heartbeat=True,
initprms={},
initurl=None,
initschema=True,
rest=False,
rpc=True,
rpc_acl=RpcACL.LOCAL_ALL,
rpc_dbupdate_listen=False,
rpc_discovery_timeout=0):
# Initialize environment
oainit(oag=self)
# Alphabetize
self._iteridx = 0
self.is_proxy = not initurl is None
#### Set up proxies
# Database API
self._db_proxy = DbProxy(self, searchprms, searchidx, searchwin, searchoffset, searchdesc, initschema)
# Relational Dataframe manipulation
self._rdf_proxy = RdfProxy(self)
# Set attributes on OAG and keep them in sync with cframe
self._prop_proxy = PropProxy(self)
# Manage oagprop state
self._cache_proxy = CacheProxy(self)
# All RPC operations
self._rpc_proxy = RpcProxy(self,
initurl=initurl,
rpc_enabled=rpc,
rpc_acl_policy=rpc_acl,
rpc_dbupdate_listen=rpc_dbupdate_listen,
rpc_discovery_timeout=rpc_discovery_timeout,
heartbeat_enabled=heartbeat)
# All REST operations
self._rest_proxy = RestProxy(self, rest_enabled=rest)
if not self._rpc_proxy.is_proxy:
self._prop_proxy._set_cframe_from_userprms(initprms, force_attr_refresh=True)
if self.db.searchprms:
self.db.search()
if self.is_unique:
self.props._set_attrs_from_cframe_uniq()
else:
self._rpc_proxy.proxied_streams = reqcls(self).register_proxy(self._rpc_proxy.proxied_url, 'proxy')['payload']
oalog.debug("Create {}, {}, {}".format(
self,
self.rpc.id if self.rpc.is_enabled else str(),
f"listening on {self.rpc.url}" if self.rpc.is_enabled else str()
), f='gc')
def __iter__(self):
if self.is_unique:
raise OAError("__iter__: Unique OAGraph object is not iterable")
else:
return self
def __next__(self):
if self.is_unique:
raise OAError("__next__: Unique OAGraph object is not iterable")
else:
if self._iteridx < self.size:
# Clear propcache
self.props.clear()
# Clear oagcache
self.cache.clear()
# Set cframe according to rdf
self.props._cframe = self.rdf._rdf_window[self._iteridx]
# Set attributes from cframe
self.props._set_attrs_from_cframe()
# Set up next iteration
self._iteridx += 1
return self
else:
self._iteridx = 0
self.cache.clear()
raise StopIteration()
def __setattr__(self, attr, newval, fastiter=False):
try:
# Sanity check
if self.rpc.is_proxy and attr in self.rpc.proxied_streams:
raise OAError("Cannot set value on a proxy OAG")
# Set new value
currval = self.props.add(attr, newval, None, None, False, False, fastiter)
except (AttributeError, OAGraphIntegrityError):
# Attribute errors means object has not been completely
# initialized yet; graph integrity errors mean we used
# property manager to manage property on the stoplist.
#
# In either case, default to using the default __setattr__
super(OAG_RootNode, self).__setattr__(attr, newval)
class OAG_RpcDiscoverable(OAG_RootNode):
@property
def is_unique(self): return False
@property
def context(self): return "openarc"
@staticproperty
def dbindices(cls):
return {
#Index Name------------Elements------Unique-------Partial
'rpcinfname_idx' : [ ['rpcinfname'], True , None ]
}
@staticproperty
def streams(cls): return {
'envid' : [ 'text', "", None ],
'heartbeat' : [ 'timestamp', "", None ],
'listen' : [ 'boolean', True, None ],
'rpcinfname' : [ 'text', "", None ],
'stripe' : [ 'int', 0 , None ],
'type' : [ 'text', "", None ],
'url' : [ 'text', "", None ],
}
@property
def is_valid(self):
return OATime().now-self.heartbeat < datetime.timedelta(seconds=oaenv.rpctimeout)
class OAG_RootD(OAG_RootNode):
@staticproperty
def context(cls): return "openarc"
@staticproperty
def daemonname(cls): return cls.dbtable
@staticproperty
def dbindices(cls): return {
'host' : [ ['host'], False, None ]
}
@staticproperty
def streams(cls): return {
'host' : [ 'text', str, None ],
'port' : [ 'int', int, None ],
}
@staticproperty
def streamable(cls): return False
def __enter__(self):
try:
self.db.create()
with open(self.pidfile, 'w') as f:
f.write(str(os.getpid()))
return self
except Exception as e:
print('[STARTUP ERROR]', e)
self.__exit__(code=1)
def __exit__(self, *args, code=0):
try:
self.db.delete()
except Exception as e:
print('[CLEANUP WARNING]', e)
try:
os.unlink(self.pidfile)
except Exception as e:
print('[CLEANUP WARNING]', e)
sys.exit(code)
def start(self, pidfile=None, cfgfile=None):
def get_cfg_file_path():
# If cfgfile has been specified, you are lucky. If not, do song
# and dance to figure out where it is.
if cfgfile:
cfg_file_path = cfgfile
else:
cfgname = f'{self.daemonname}.conf'
cfg_dir = os.environ.get("XDG_CONFIG_HOME")
if not cfg_dir:
for l in [f'~/.config/{cfgname}', f'/usr/local/etc/{cfgname}' ]:
cfg_file_path = os.path.expanduser(l)
if os.path.exists(cfg_file_path):
break
else:
cfg_file_path = os.path.join(cfg_dir, f'{cfgname}')
return cfg_file_path
oaenv.merge_app_cfg(get_cfg_file_path())
def get_pid_file_path():
if pidfile:
return pidfile
else:
pidname = f'{self.daemonname}.pid'
xdg_rdir = os.environ.get("XDG_RUNTIME_DIR")
rdir = xdg_rdir if xdg_rdir else '/var/run'
return f'{rdir}/{pidname}'
self.pidfile = get_pid_file_path()
hostname = socket.gethostname()
# Are there too many stripes?
allowed_ports = [oaenv.app.startport+stripe for stripe in range(oaenv.app.stripes)]
try:
_d = self.__class__(hostname, 'by_host')
occupied_ports = [dd.port for dd in _d]
except OAGraphRetrieveError as e:
occupied_ports = []
if len(occupied_ports)==len(allowed_ports):
raise OAError("All necessary stripes are already running")
# set up and run this daemon
self.host = hostname
self.port = list(set(allowed_ports)-set(occupied_ports))[0]
with self as daemon:
signal.signal(signal.SIGTERM, self.__exit__)
signal.signal(signal.SIGINT, self.__exit__)
daemon.REST.start(port=self.port)
| {
"repo_name": "kchoudhu/openarc",
"path": "openarc/_graph.py",
"copies": "1",
"size": "18621",
"license": "bsd-3-clause",
"hash": 1393845554706332200,
"line_mean": 30.5076142132,
"line_max": 122,
"alpha_frac": 0.5389613877,
"autogenerated": false,
"ratio": 4.00968992248062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504865131018062,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'OAuthToken',
]
class OAuthToken(object):
"""OAuth credentials base-class. Several implementations are available:
this class:
provides crawler what is require to authenticate against sources
via OAuth.
"""
def __init__(self, access_token, refresh_token=None,
token_secret=None, consumer_key=None, expires=None):
self.__access_token = access_token
self.__refresh_token = refresh_token
self.__token_secret = token_secret
self.__consumer_key = consumer_key
self.__expires = expires
access_token = property(
fget=lambda slf: slf.__access_token,
doc='''Read-only property accessor over the
OAuth granted access token used by Docido to gain access
to the protected resources on behalf of the user, instead
of using the user's service provider credentials.
:rtype: string
'''
)
refresh_token = property(
fget=lambda slf: slf.__refresh_token,
doc='''Read-only property accessor over the
OAuth refresh token used to recreate the access token.
:rtype: string
'''
)
token_secret = property(
fget=lambda slf: slf.__token_secret,
doc='''Read-only property accessor over the
secret token provided by a service when retrieving an OAuth token.
This property is set only when required provided by the authentication
mechanism of the crawled service and required by crawler to fetch data.
:rtype: string
'''
)
consumer_key = property(
fget=lambda slf: slf.__consumer_key,
doc='''Read-only property accessor over the
Docido consumer key. This property is set when required by
the crawler to fetch data.
:rtype: string
'''
)
expires = property(
fget=lambda slf: slf.__expires,
doc='''Read-only property accessor over the expires field provided
by authentication mechanism of the crawled service when token was
acquired.
:rtype: string
'''
)
| {
"repo_name": "LilliJane/docido-python-sdk",
"path": "docido_sdk/oauth/api.py",
"copies": "1",
"size": "2123",
"license": "apache-2.0",
"hash": 9063012606218279000,
"line_mean": 29.3285714286,
"line_max": 79,
"alpha_frac": 0.6241168158,
"autogenerated": false,
"ratio": 4.5853131749460045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5709429990746004,
"avg_score": null,
"num_lines": null
} |
__all__ = [
# OcTree
'OcTreeReader',
'OcTreeAppender',
]
__displayname__ = 'OcTree Mesh'
import vtk
from vtk.util import numpy_support as nps
from .. import _helpers, interface
from .two_file_base import ModelAppenderBase, ubcMeshReaderBase
with _helpers.HiddenPrints():
import discretize
class OcTreeReader(ubcMeshReaderBase):
"""This class reads a UBC OcTree Mesh file and builds a
``vtkUnstructuredGrid`` of the data in the file. Model File is optional.
Reader will still construct ``vtkUnstructuredGrid`` safely.
"""
__displayname__ = 'UBC OcTree Mesh Reader'
__category__ = 'reader'
description = 'PVGeo: UBC OcTree Mesh'
def __init__(self, nOutputPorts=1, outputType='vtkUnstructuredGrid', **kwargs):
ubcMeshReaderBase.__init__(
self, nOutputPorts=nOutputPorts, outputType=outputType, **kwargs
)
self.__mesh = None
self.__models = []
def ubc_octree_mesh(self, FileName, pdo=None):
"""This method reads a UBC OcTree Mesh file and builds a
``vtkUnstructuredGrid`` of the data in the file. This method generates
the ``vtkUnstructuredGrid`` without any data attributes.
Args:
FileName (str): The mesh filename as an absolute path for the input
mesh file in UBC OcTree format.
pdo (vtkUnstructuredGrid): A pointer to the output data object.
Return:
vtkUnstructuredGrid:
a ``vtkUnstructuredGrid`` generated from the UBCMesh grid.
Mesh is defined by the input mesh file.
No data attributes here, simply an empty mesh. Use the
``place_model_on_octree_mesh()`` method to associate with model data.
"""
try:
self.__mesh = discretize.TreeMesh.readUBC(FileName)
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
if pdo is None:
pdo = self.__mesh.to_vtk()
else:
pdo.DeepCopy(self.__mesh.to_vtk())
return pdo
@staticmethod
def place_model_on_octree_mesh(mesh, model, data_name='Data'):
"""Places model data onto a mesh. This is for the UBC Grid data reaers
to associate model data with the mesh grid.
Args:
mesh (vtkUnstructuredGrid): The ``vtkUnstructuredGrid`` that is the
mesh to place the model data upon. Needs to have been read in by ubcOcTree
model (np.ndarray): A NumPy float array that holds all of the data
to place inside of the mesh's cells.
data_name (str): The name of the model data array once placed on the
``vtkUnstructuredGrid``.
Return:
vtkUnstructuredGrid:
The input ``vtkUnstructuredGrid`` with model data appended.
"""
if isinstance(model, dict):
for key in model.keys():
mesh = OcTreeReader.place_model_on_octree_mesh(
mesh, model[key], data_name=key
)
return mesh
# Make sure this model file fits the dimensions of the mesh
numCells = mesh.GetNumberOfCells()
if numCells < len(model):
raise _helpers.PVGeoError(
'This model file has more data than the given mesh has cells to hold.'
)
elif numCells > len(model):
raise _helpers.PVGeoError(
'This model file does not have enough data to fill the given mesh\'s cells.'
)
# This is absolutely crucial!
# Do not play with unless you know what you are doing!
# Also note that this assumes ``discretize`` handles addin this array
ind_reorder = nps.vtk_to_numpy(mesh.GetCellData().GetArray('index_cell_corner'))
model = model[ind_reorder]
# Convert data to VTK data structure and append to output
c = interface.convert_array(model, name=data_name, deep=True)
# THIS IS CELL DATA! Add the model data to CELL data:
mesh.GetCellData().AddArray(c)
return mesh
def __ubc_octree(self, filename_mesh, filename_models, output):
"""Wrapper to Read UBC GIF OcTree mesh and model file pairs. UBC OcTree
models are defined using a 2-file format. The "mesh" file describes how
the data is descritized. The "model" file lists the physical property
values for all cells in a mesh. A model file is meaningless without an
associated mesh file. This only handles OcTree formats
Args:
filename_mesh (str): The OcTree Mesh filename as an absolute path
for the input mesh file in UBC OcTree Mesh Format
filename_models (list(str)): The model filenames as absolute paths for
the input model timesteps in UBC OcTree Model Format.
output (vtkUnstructuredGrid): The output data object
Return:
vtkUnstructuredGrid:
A ``vtkUnstructuredGrid`` generated from the UBC 2D/3D Mesh grid.
Mesh is defined by the input mesh file. Cell data is defined by
the input model file.
"""
if self.need_to_readMesh():
# Construct/read the mesh
self.ubc_octree_mesh(filename_mesh, pdo=output)
self.need_to_readMesh(flag=False)
output.DeepCopy(self.__mesh.to_vtk())
if self.need_to_readModels() and self.this_has_models():
# Read the model data
self.__models = []
for f in filename_models:
# Read the model data
self.__models.append(ubcMeshReaderBase.ubc_model_3d(f))
self.need_to_readModels(flag=False)
return output
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get output:
output = self.GetOutputData(outInfo, 0)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
self.__ubc_octree(self.get_mesh_filename(), self.get_model_filenames(), output)
# Place the model data for given timestep onto the mesh
if len(self.__models) > i:
self.place_model_on_octree_mesh(
output, self.__models[i], self.get_data_name()
)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Pipeline method for handling requests about the grid extents and time
step values
"""
# Call parent to handle time stuff
ubcMeshReaderBase.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
if self.need_to_readMesh():
ext = self._read_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
def clear_mesh(self):
"""Use to clean/rebuild the mesh."""
self.__mesh = vtk.vtkUnstructuredGrid()
ubcMeshReaderBase.clear_models(self)
def clear_models(self):
"""Use to clean the models and reread the data"""
self.__models = []
ubcMeshReaderBase.clear_models(self)
###############################################################################
class OcTreeAppender(ModelAppenderBase):
"""This filter reads a timeseries of models and appends it to an input
``vtkUnstructuredGrid``
"""
__displayname__ = 'UBC OcTree Mesh Appender'
__category__ = 'filter'
def __init__(self, **kwargs):
ModelAppenderBase.__init__(
self,
inputType='vtkUnstructuredGrid',
outputType='vtkUnstructuredGrid',
**kwargs
)
def _read_up_front(self):
"""Internal helper to read all data at start"""
reader = ubcMeshReaderBase.ubc_model_3d
self._models = []
for f in self._model_filenames:
# Read the model data
self._models.append(reader(f))
self.need_to_read(flag=False)
return
def _place_on_mesh(self, output, idx=0):
"""Internal helper to place a model on the mesh for a given index"""
OcTreeReader.place_model_on_octree_mesh(
output, self._models[idx], self.get_data_name()
)
return
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/ubc/octree.py",
"copies": "1",
"size": "8435",
"license": "bsd-3-clause",
"hash": 5094223481506190000,
"line_mean": 37.3409090909,
"line_max": 92,
"alpha_frac": 0.6033195021,
"autogenerated": false,
"ratio": 4.126712328767123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5230031830867123,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Node', 'BinaryTree']
class Node():
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class BinaryTree():
"""
5
/ \
2 3
/ \ / \
-1 4 2 0
"""
def __init__(self):
self.root = Node(5)
self.node1_0 = Node(2)
self.node1_1 = Node(3)
self.node2_0 = Node(-1)
self.node2_1 = Node(4)
self.node2_2 = Node(2)
self.node2_3 = Node(0)
self.root.left = self.node1_0
self.root.right = self.node1_1
self.node1_0.left = self.node2_0
self.node1_0.right = self.node2_1
self.node1_1.left = self.node2_2
self.node1_1.right = self.node2_3
self.seq_preorder = []
self.seq_inorder = []
self.seq_postorder = []
def preorder(self):
self.preorder_helper(self.root)
return self.seq_preorder
def preorder_helper(self, node):
if node is None:
return
self.seq_preorder.append(node.val)
self.preorder_helper(node.left)
self.preorder_helper(node.right)
def inorder(self):
self.inorder_helper(self.root)
return self.seq_inorder
def inorder_helper(self, node):
if node is None:
return
self.inorder_helper(node.left)
self.seq_inorder.append(node.val)
self.inorder_helper(node.right)
def postorder(self):
self.postorder_helper(self.root)
return self.postorder
def postorder_helper(self, node):
if node is None:
return
self.postorder_helper(node.left)
self.postorder_helper(node.right)
self.seq_postorder.append(node.val)
if __name__ == "__main__":
tree = BinaryTree();
result = tree.preorder()
print(result) | {
"repo_name": "euccas/CodingPuzzles-Python",
"path": "leet/source/binarytree/lib_binary_tree.py",
"copies": "1",
"size": "1857",
"license": "mit",
"hash": -3876208416940346400,
"line_mean": 24.8055555556,
"line_max": 51,
"alpha_frac": 0.5519655358,
"autogenerated": false,
"ratio": 3.263620386643234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43155859224432336,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Node']
import math
import uuid
import time
import struct
import random
import socket
import asyncio
import marshal
from print_colors import PrintColors
from contact import Contact
from routing_table import RoutingTable
from protocol_command import ProtocolCommand
from ping_protocol_command import PingProtocolCommand
from discover_protocol_command import DiscoverProtocolCommand
class Node(object):
def __init__(self, loop, id=None, listen_host='0.0.0.0', listen_port=6633, bootstrap=False):
self.loop = loop
if id == None:
id = str(uuid.uuid4())
self.id = id
self.listen_host = listen_host
self.listen_port = listen_port
self.bootstrap = bootstrap
# routing table
self.rt = RoutingTable()
# default protocol_commands
self.protocol_commands = {}
protocol_command = PingProtocolCommand(self, 1, 0, 0)
self.add_protocol_command(protocol_command)
protocol_command = DiscoverProtocolCommand(self, 1, 0, 1)
self.add_protocol_command(protocol_command)
# socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((self.listen_host, self.listen_port))
self.recv_buffer = {} # {(remote_host: remote_port): [socket_data, ...]}
self.recv_packs = {} # {msg_id: {pack_index: pack_data}}
self.loop.add_reader(self.sock, self.rect_sock_data)
# tasks
self.loop.call_soon(self.check_recv_buffer)
self.loop.call_soon(self.remove_dead_contacts)
def __repr__(self):
return '<{} id={}>'.format(
self.__class__.__name__,
self.id,
)
#
# protocol commands
#
def get_protocol_command(self, protocol_major_version, protocol_minor_version, protocol_command_code):
k = (
protocol_major_version,
protocol_minor_version,
protocol_command_code,
)
protocol_command = self.protocol_commands[k]
return protocol_command
def add_protocol_command(self, protocol_command):
k = (
protocol_command.protocol_major_version,
protocol_command.protocol_minor_version,
protocol_command.protocol_command_code,
)
self.protocol_commands[k] = protocol_command
protocol_command.start()
def remove_protocol_command(self, protocol_command):
protocol_command.stop()
k = (
protocol_command.protocol_major_version,
protocol_command.protocol_minor_version,
protocol_command.protocol_command_code,
)
del self.protocol_commands[k]
#
# tasks
#
def remove_dead_contacts(self):
# print(PrintColors.VIOLET, 'remove_dead_contacts:', self, len(self.rt.contacts), len(self.rt.remove_contacts), PrintColors.END)
t = time.time()
move_remove_contacts = []
remove_remove_contacts = []
for c in self.rt.contacts:
if c.id == self.id:
continue
if t - c.last_seen > 15.0 + len(self.rt.contacts) + len(self.rt.add_contacts):
move_remove_contacts.append(c)
for c in move_remove_contacts:
self.rt.contacts.remove(c)
self.rt.remove_contacts.add(c)
print(PrintColors.YELLOW + 'remove_dead_contacts:', self, c, PrintColors.END)
for c in self.rt.remove_contacts:
if t - c.last_seen > 30.0 + (len(self.rt.contacts) + len(self.rt.add_contacts)) * 2.0:
remove_remove_contacts.append(c)
for c in remove_remove_contacts:
self.rt.remove_contacts.remove(c)
print(PrintColors.RED + 'remove_dead_contacts:', self, c, PrintColors.END)
self.loop.call_later(15.0 + random.random() * 15.0, self.remove_dead_contacts)
#
# socket
#
def check_recv_buffer(self):
for remote_address, recv_buffer in self.recv_buffer.items():
if not len(recv_buffer):
continue
self.process_sock_data(b'', remote_address)
self.loop.call_later(random.random(), self.check_recv_buffer)
def rect_sock_data(self):
data, remote_address = self.sock.recvfrom(1500)
self.process_sock_data(data, remote_address)
def process_sock_data(self, data, remote_address):
remote_host, remote_port = remote_address
if remote_address not in self.recv_buffer:
self.recv_buffer[remote_address] = []
self.recv_buffer[remote_address].append(data)
recv_buffer = b''.join(self.recv_buffer[remote_address])
pack_header_size = struct.calcsize('!QIIII')
if len(recv_buffer) < pack_header_size:
return
del self.recv_buffer[remote_address][:]
pack_header = recv_buffer[:pack_header_size]
recv_buffer_rest = recv_buffer[pack_header_size:]
msg_id, msg_size, msg_n_packs, pack_size, pack_index = struct.unpack('!QIIII', pack_header)
if pack_size > len(recv_buffer_rest):
self.recv_buffer[remote_address].append(pack_header)
self.recv_buffer[remote_address].append(recv_buffer_rest)
return
pack_data = recv_buffer_rest[:pack_size]
rest_data = recv_buffer_rest[pack_size:]
self.recv_buffer[remote_address].append(rest_data)
if msg_id not in self.recv_packs:
self.recv_packs[msg_id] = {}
self.recv_packs[msg_id][pack_index] = pack_data
if len(self.recv_packs[msg_id]) < msg_n_packs:
return
msg = b''.join([self.recv_packs[msg_id][i] for i in range(msg_n_packs)])
del self.recv_packs[msg_id]
self.parse_message(msg, remote_host, remote_port)
#
# message
#
def build_message(self, protocol_major_version, protocol_minor_version, protocol_message_type, protocol_command_code, obj):
obj_data = marshal.dumps(obj)
message_data = struct.pack(
'!BBBB',
protocol_major_version,
protocol_minor_version,
protocol_message_type,
protocol_command_code,
)
message_data += obj_data
return message_data
def send_message(self, message_data, remote_host, remote_port):
for pack in self.build_packs(message_data):
self.sock.sendto(pack, (remote_host, remote_port))
def build_packs(self, message_data):
message_id = random.randint(0, 2 ** 64)
step = 1400 - 3 * 4
pack_index = 0
message_n_packs = int(math.ceil(len(message_data) / step))
for s in range(0, len(message_data), step):
e = s + step
pack_data = message_data[s:e]
pack = self.build_pack(message_id, len(message_data), message_n_packs, len(pack_data), pack_index, pack_data)
pack_index += 1
yield pack
def build_pack(self, message_id, message_size, message_n_packs, pack_size, pack_index, pack_data):
pack = struct.pack('!QIIII', message_id, message_size, message_n_packs, pack_size, pack_index)
pack += pack_data
return pack
def parse_message(self, message, remote_host, remote_port):
message_header_size = struct.calcsize('!BBBB')
message_header = message[:message_header_size]
message_data = message[message_header_size:]
protocol_version_major, protocol_version_minor, protocol_message_type, protocol_command_code = struct.unpack('!BBBB', message_header)
protocol_command = self.get_protocol_command(protocol_version_major, protocol_version_minor, protocol_command_code)
if protocol_message_type == ProtocolCommand.PROTOCOL_REQ:
if message_data:
args, kwargs = marshal.loads(message_data)
else:
args, kwargs = (), {}
protocol_command.on_req(remote_host, remote_port, *args, **kwargs)
elif protocol_message_type == ProtocolCommand.PROTOCOL_RES:
obj = marshal.loads(message_data)
protocol_command.on_res(remote_host, remote_port, obj)
| {
"repo_name": "mtasic85/routingtable",
"path": "node.py",
"copies": "1",
"size": "8212",
"license": "mit",
"hash": 3484754578402063000,
"line_mean": 33.6497890295,
"line_max": 141,
"alpha_frac": 0.6109352168,
"autogenerated": false,
"ratio": 3.710799819249887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9806270441003689,
"avg_score": 0.003092919009239418,
"num_lines": 237
} |
__all__ = ["node", "remove_tail", "remove_head"]
class node(object):
"""Class representing single-linked list"""
__slots__ = ["data", "next", "previous"]
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def __repr__(self):
if self.next is None:
return "(" + repr(self.data) + ", Next: " + repr(self.next) + ")"
return "(" + repr(self.data) + ", Next: " + repr(self.next.data) + ")"
def __str__(self):
if self.next is None:
return "(" + str(self.data) + ", Next: " + str(self.next) + ")"
return "(" + str(self.data) + ", Next: " + str(self.next.data) + ")"
def set_next(self, next):
self.next = next
def remove_tail(node):
if node is None:
raise ValueError
temp = node
while node.next:
node = node.next
node = None
return temp
def remove_head(node):
if node is None:
raise ValueError
temp, node = node.next, None
return temp
if __name__ == "__main__":
head = node(1, node(2, node(3)))
try:
while node:
print(head)
head = remove_head(head)
except ValueError:
head = node(1, node(2))
print(remove_tail(head))
| {
"repo_name": "vyzyv/university",
"path": "python/Zestaw9/node.py",
"copies": "1",
"size": "1264",
"license": "apache-2.0",
"hash": -1451620853688562200,
"line_mean": 24.28,
"line_max": 78,
"alpha_frac": 0.5189873418,
"autogenerated": false,
"ratio": 3.482093663911846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.938368164983423,
"avg_score": 0.02347987117552335,
"num_lines": 50
} |
# All nodes are of the form [path1, child1, path2, child2]
# or <value>
from ethereum import utils
from ethereum.db import EphemDB, ListeningDB
import rlp, sys
import copy
hashfunc = utils.sha3
HASHLEN = 32
# 0100000101010111010000110100100101001001 -> ASCII
def decode_bin(x):
return ''.join([chr(int(x[i:i+8], 2)) for i in range(0, len(x), 8)])
# ASCII -> 0100000101010111010000110100100101001001
def encode_bin(x):
o = ''
for c in x:
c = ord(c)
p = ''
for i in range(8):
p = str(c % 2) + p
c /= 2
o += p
return o
# Encodes a binary list [0,1,0,1,1,0] of any length into bytes
def encode_bin_path(li):
if li == []:
return ''
b = ''.join([str(x) for x in li])
b2 = '0' * ((4 - len(b)) % 4) + b
prefix = ['00', '01', '10', '11'][len(b) % 4]
if len(b2) % 8 == 4:
return decode_bin('00' + prefix + b2)
else:
return decode_bin('100000' + prefix + b2)
# Decodes bytes into a binary list
def decode_bin_path(p):
if p == '':
return []
p = encode_bin(p)
if p[0] == '1':
p = p[4:]
assert p[0:2] == '00'
L = ['00', '01', '10', '11'].index(p[2:4])
p = p[4+((4 - L) % 4):]
return [(1 if x == '1' else 0) for x in p]
# Get a node from a database if needed
def dbget(node, db):
if len(node) == HASHLEN:
return rlp.decode(db.get(node))
return node
# Place a node into a database if needed
def dbput(node, db):
r = rlp.encode(node)
if len(r) == HASHLEN or len(r) > HASHLEN * 2:
h = hashfunc(r)
db.put(h, r)
return h
return node
# Get a value from a tree
def get(node, db, key):
node = dbget(node, db)
if key == []:
return node[0]
elif len(node) == 1 or len(node) == 0:
return ''
else:
sub = dbget(node[key[0]], db)
if len(sub) == 2:
subpath, subnode = sub
else:
subpath, subnode = '', sub[0]
subpath = decode_bin_path(subpath)
if key[1:len(subpath)+1] != subpath:
return ''
return get(subnode, db, key[len(subpath)+1:])
# Get length of shared prefix of inputs
def get_shared_length(l1, l2):
i = 0
while i < len(l1) and i < len(l2) and l1[i] == l2[i]:
i += 1
return i
# Replace ['', v] with [v] and compact nodes into hashes
# if needed
def contract_node(n, db):
if len(n[0]) == 2 and n[0][0] == '':
n[0] = [n[0][1]]
if len(n[1]) == 2 and n[1][0] == '':
n[1] = [n[1][1]]
if len(n[0]) != 32:
n[0] = dbput(n[0], db)
if len(n[1]) != 32:
n[1] = dbput(n[1], db)
return dbput(n, db)
# Update a trie
def update(node, db, key, val):
node = dbget(node, db)
# Unfortunately this particular design does not allow
# a node to have one child, so at the root for empty
# tries we need to add two dummy children
if node == '':
node = [dbput([encode_bin_path([]), ''], db),
dbput([encode_bin_path([1]), ''], db)]
if key == []:
node = [val]
elif len(node) == 1:
raise Exception("DB must be prefix-free")
else:
assert len(node) == 2, node
sub = dbget(node[key[0]], db)
if len(sub) == 2:
_subpath, subnode = sub
else:
_subpath, subnode = '', sub[0]
subpath = decode_bin_path(_subpath)
sl = get_shared_length(subpath, key[1:])
if sl == len(subpath):
node[key[0]] = [_subpath, update(subnode, db, key[sl+1:], val)]
else:
subpath_next = subpath[sl]
n = [0, 0]
n[subpath_next] = [encode_bin_path(subpath[sl+1:]), subnode]
n[(1 - subpath_next)] = [encode_bin_path(key[sl+2:]), [val]]
n = contract_node(n, db)
node[key[0]] = dbput([encode_bin_path(subpath[:sl]), n], db)
return contract_node(node, db)
# Compression algorithm specialized for merkle proof databases
# The idea is similar to standard compression algorithms, where
# you replace an instance of a repeat with a pointer to the repeat,
# except that here you replace an instance of a hash of a value
# with the pointer of a value. This is useful since merkle branches
# usually include nodes which contain hashes of each other
magic = '\xff\x39'
def compress_db(db):
out = []
values = db.kv.values()
keys = [hashfunc(x) for x in values]
assert len(keys) < 65300
for v in values:
o = ''
pos = 0
while pos < len(v):
done = False
if v[pos:pos+2] == magic:
o += magic + magic
done = True
pos += 2
for i, k in enumerate(keys):
if v[pos:].startswith(k):
o += magic + chr(i // 256) + chr(i % 256)
done = True
pos += len(k)
break
if not done:
o += v[pos]
pos += 1
out.append(o)
return rlp.encode(out)
def decompress_db(ins):
ins = rlp.decode(ins)
vals = [None] * len(ins)
def decipher(i):
if vals[i] is None:
v = ins[i]
o = ''
pos = 0
while pos < len(v):
if v[pos:pos+2] == magic:
if v[pos+2:pos+4] == magic:
o += magic
else:
ind = ord(v[pos+2]) * 256 + ord(v[pos+3])
o += hashfunc(decipher(ind))
pos += 4
else:
o += v[pos]
pos += 1
vals[i] = o
return vals[i]
for i in range(len(ins)):
decipher(i)
o = EphemDB()
for v in vals:
o.put(hashfunc(v), v)
return o
# Convert a merkle branch directly into RLP (ie. remove
# the hashing indirection). As it turns out, this is a
# really compact way to represent a branch
def compress_branch(db, root):
o = dbget(copy.copy(root), db)
def evaluate_node(x):
for i in range(len(x)):
if len(x[i]) == HASHLEN and x[i] in db.kv:
x[i] = evaluate_node(dbget(x[i], db))
elif isinstance(x, list):
x[i] = evaluate_node(x[i])
return x
o2 = rlp.encode(evaluate_node(o))
return o2
def decompress_branch(branch):
branch = rlp.decode(branch)
db = EphemDB()
def evaluate_node(x):
if isinstance(x, list):
x = [evaluate_node(n) for n in x]
x = dbput(x, db)
return x
evaluate_node(branch)
return db
# Test with n nodes and k branch picks
def test(n, m=100):
assert m <= n
db = EphemDB()
x = ''
for i in range(n):
k = hashfunc(str(i))
v = hashfunc('v'+str(i))
x = update(x, db, [int(a) for a in encode_bin(rlp.encode(k))], v)
print x
print sum([len(val) for key, val in db.db.items()])
l1 = ListeningDB(db)
o = 0
p = 0
q = 0
ecks = x
for i in range(m):
x = copy.deepcopy(ecks)
k = hashfunc(str(i))
v = hashfunc('v'+str(i))
l2 = ListeningDB(l1)
v2 = get(x, l2, [int(a) for a in encode_bin(rlp.encode(k))])
assert v == v2
o += sum([len(val) for key, val in l2.kv.items()])
cdb = compress_db(l2)
p += len(cdb)
assert decompress_db(cdb).kv == l2.kv
cbr = compress_branch(l2, x)
q += len(cbr)
dbranch = decompress_branch(cbr)
assert v == get(x, dbranch, [int(a) for a in encode_bin(rlp.encode(k))])
# for k in l2.kv:
# assert k in dbranch.kv
o = {
'total_db_size': sum([len(val) for key, val in l1.kv.items()]),
'avg_proof_size': sum([len(val) for key, val in l1.kv.items()]),
'avg_compressed_proof_size': (p / min(n, m)),
'avg_branch_size': (q / min(n, m)),
'compressed_db_size': len(compress_db(l1))
}
return o
| {
"repo_name": "harlantwood/pyethereum",
"path": "ethereum/tests/bintrie.py",
"copies": "7",
"size": "8038",
"license": "mit",
"hash": 8548760521041949000,
"line_mean": 26.8131487889,
"line_max": 81,
"alpha_frac": 0.5051007713,
"autogenerated": false,
"ratio": 3.132501948558067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7137602719858067,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'OMFReader',
]
import omf
import omfvista
import vtk
from .. import _helpers
from ..base import ReaderBaseBase
class OMFReader(ReaderBaseBase):
"""Handles reading an OMF Project"""
__displayname__ = 'OMF Project Reader'
__category__ = 'reader'
extensions = 'omf'
description = 'PVGeo: Open Mining Format Project'
def __init__(self):
ReaderBaseBase.__init__(self, nOutputPorts=1, outputType='vtkMultiBlockDataSet')
# Properties:
self._dataselection = vtk.vtkDataArraySelection()
self._dataselection.AddObserver(
"ModifiedEvent", _helpers.create_modified_callback(self)
)
self.__names = []
self.__data = dict()
self.__project = None
def Modified(self, read_again=False):
"""Ensure default is overridden to be false so array selector can call."""
ReaderBaseBase.Modified(self, read_again=read_again)
def modified(self, read_again=False):
"""Ensure default is overridden to be false so array selector can call."""
return self.Modified(read_again=read_again)
def get_file_name(self):
"""Super class has file names as a list but we will only handle a single
project file. This provides a conveinant way of making sure we only
access that single file.
A user could still access the list of file names using ``get_file_names()``.
"""
return ReaderBaseBase.get_file_names(self, idx=0)
#### Methods for performing the read ####
def _read_up_front(self):
"""Internal functiona to read all data at the start"""
# Read all elements
reader = omf.OMFReader(self.get_file_name())
self.__project = reader.get_project()
self.__names = [e.name for e in self.__project.elements]
for n in self.__names:
self._dataselection.AddArray(n)
self.need_to_read(flag=False)
return 1
def _get_raw_data(self):
"""Converts OMF data to VTK data objects."""
# Now iterate over the elements and add converted data to the data dict:
data = dict()
for e in self.__project.elements:
if self._dataselection.ArrayIsEnabled(e.name):
if e.name not in self.__data:
self.__data[e.name] = omfvista.wrap(e)
data[e.name] = self.__data[e.name]
return data
#### pipeline methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get data for current timestep and populate the output data object."""
# Get output:
# output = self.GetOutputData(outInfo, 0)
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
# Perfrom the read
if self.need_to_read():
self._read_up_front()
data = self._get_raw_data()
# Set number of blocks based on user choice in the selction
output.SetNumberOfBlocks(self._dataselection.GetNumberOfArraysEnabled())
blk = 0
# iterate over data set to produce output based on users selection
keys = data.keys()
for name in keys:
output.SetBlock(blk, data[name])
output.GetMetaData(blk).Set(vtk.vtkCompositeDataSet.NAME(), name)
blk += 1
return 1
#### Getters / Setters ####
def GetDataSelection(self):
"""Return the current user selection of data elements"""
if self.need_to_read():
self._read_up_front()
return self._dataselection
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/gmggroup/reader.py",
"copies": "1",
"size": "3555",
"license": "bsd-3-clause",
"hash": 7241441361133668000,
"line_mean": 34.55,
"line_max": 100,
"alpha_frac": 0.6129395218,
"autogenerated": false,
"ratio": 4.086206896551724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199146418351723,
"avg_score": null,
"num_lines": null
} |
"""All non private names (no leading underscore) here are part of the tox API.
They live in the tox namespace and can be accessed as tox.[NAMESPACE.]NAME
"""
import os
import re
import sys
_THIS_FILE = os.path.realpath(os.path.abspath(__file__))
class PYTHON:
PY_FACTORS_RE = re.compile("^(?!py$)(py|pypy|jython)([2-9][0-9]?[0-9]?)?$")
CURRENT_RELEASE_ENV = "py37"
"""Should hold currently released py -> for easy updating"""
QUICKSTART_PY_ENVS = ["py27", "py35", "py36", CURRENT_RELEASE_ENV, "pypy", "jython"]
"""For choices in tox-quickstart"""
class INFO:
DEFAULT_CONFIG_NAME = "tox.ini"
CONFIG_CANDIDATES = ("pyproject.toml", "tox.ini", "setup.cfg")
IS_WIN = sys.platform == "win32"
IS_PYPY = hasattr(sys, "pypy_version_info")
class PIP:
SHORT_OPTIONS = ["c", "e", "r", "b", "t", "d"]
LONG_OPTIONS = [
"build",
"cache-dir",
"client-cert",
"constraint",
"download",
"editable",
"exists-action",
"extra-index-url",
"global-option",
"find-links",
"index-url",
"install-options",
"prefix",
"proxy",
"no-binary",
"only-binary",
"requirement",
"retries",
"root",
"src",
"target",
"timeout",
"trusted-host",
"upgrade-strategy",
]
INSTALL_SHORT_OPTIONS_ARGUMENT = ["-{}".format(option) for option in SHORT_OPTIONS]
INSTALL_LONG_OPTIONS_ARGUMENT = ["--{}".format(option) for option in LONG_OPTIONS]
_HELP_DIR = os.path.join(os.path.dirname(_THIS_FILE), "helper")
VERSION_QUERY_SCRIPT = os.path.join(_HELP_DIR, "get_version.py")
SITE_PACKAGE_QUERY_SCRIPT = os.path.join(_HELP_DIR, "get_site_package_dir.py")
BUILD_REQUIRE_SCRIPT = os.path.join(_HELP_DIR, "build_requires.py")
BUILD_ISOLATED = os.path.join(_HELP_DIR, "build_isolated.py")
PARALLEL_RESULT_JSON_PREFIX = ".tox-result"
PARALLEL_RESULT_JSON_SUFFIX = ".json"
| {
"repo_name": "gaborbernat/tox",
"path": "src/tox/constants.py",
"copies": "2",
"size": "1972",
"license": "mit",
"hash": -1714734453821942500,
"line_mean": 29.3384615385,
"line_max": 88,
"alpha_frac": 0.5978701826,
"autogenerated": false,
"ratio": 3.175523349436393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.977077092862921,
"avg_score": 0.0005245206814366056,
"num_lines": 65
} |
"""All non private names (no leading underscore) here are part of the tox API.
They live in the tox namespace and can be accessed as tox.[NAMESPACE.]NAME
"""
import re
import sys
def _construct_default_factors(cpython_versions, pypy_versions, other_interpreters):
default_factors = {"py": sys.executable, "py2": "python2", "py3": "python3"}
default_factors.update(
{
"py{}{}".format(major, minor): "python{}.{}".format(major, minor)
for major, minor in cpython_versions
}
)
default_factors.update({exc: exc for exc in ["pypy", "pypy2", "pypy3"]})
default_factors.update(
{
"pypy{}{}".format(major, minor): "pypy{}.{}".format(major, minor)
for major, minor in pypy_versions
}
)
default_factors.update({interpreter: interpreter for interpreter in other_interpreters})
return default_factors
class PYTHON:
PY_FACTORS_RE = re.compile("^(?!py$)(py|pypy|jython)([2-9][0-9]?)?$")
PY_FACTORS_MAP = {"py": "python", "pypy": "pypy", "jython": "jython"}
CPYTHON_VERSION_TUPLES = [(2, 7), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8)]
PYPY_VERSION_TUPLES = [(2, 7), (3, 5)]
OTHER_PYTHON_INTERPRETERS = ["jython"]
DEFAULT_FACTORS = _construct_default_factors(
CPYTHON_VERSION_TUPLES, PYPY_VERSION_TUPLES, OTHER_PYTHON_INTERPRETERS
)
CURRENT_RELEASE_ENV = "py36"
"""Should hold currently released py -> for easy updating"""
QUICKSTART_PY_ENVS = ["py27", "py34", "py35", CURRENT_RELEASE_ENV, "pypy", "jython"]
"""For choices in tox-quickstart"""
class INFO:
DEFAULT_CONFIG_NAME = "tox.ini"
CONFIG_CANDIDATES = ("pyproject.toml", "tox.ini", "setup.cfg")
IS_WIN = sys.platform == "win32"
class PIP:
SHORT_OPTIONS = ["c", "e", "r", "b", "t", "d"]
LONG_OPTIONS = [
"build",
"cache-dir",
"client-cert",
"constraint",
"download",
"editable",
"exists-action",
"extra-index-url",
"global-option",
"find-links",
"index-url",
"install-options",
"prefix",
"proxy",
"no-binary",
"only-binary",
"requirement",
"retries",
"root",
"src",
"target",
"timeout",
"trusted-host",
"upgrade-strategy",
]
INSTALL_SHORT_OPTIONS_ARGUMENT = ["-{}".format(option) for option in SHORT_OPTIONS]
INSTALL_LONG_OPTIONS_ARGUMENT = ["--{}".format(option) for option in LONG_OPTIONS]
| {
"repo_name": "Avira/tox",
"path": "src/tox/constants.py",
"copies": "1",
"size": "2525",
"license": "mit",
"hash": -3817310658239025700,
"line_mean": 31.3717948718,
"line_max": 92,
"alpha_frac": 0.575049505,
"autogenerated": false,
"ratio": 3.3938172043010755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9464446394085684,
"avg_score": 0.0008840630430784743,
"num_lines": 78
} |
__all__ = [
'openvpn',
]
from pathlib import Path
import logging
from garage import apps
from garage import scripts
LOG = logging.getLogger(__name__)
@apps.with_prog('copy-client')
@apps.with_help('copy generated client data')
@apps.with_argument('cadir', type=Path, help='provide easy-rsa cadir')
@apps.with_argument('client', help='provide client name')
@apps.with_argument('target', type=Path, help='set target directory')
def copy_client(args):
"""Copy generated client data to another directory."""
srcs = [
args.cadir / 'keys' / file
for file in (args.client + '.crt', args.client + '.key')
]
return 0 if safe_copy(srcs, args.target) else 1
@apps.with_prog('copy-server')
@apps.with_help('copy generated server data')
@apps.with_argument('cadir', type=Path, help='provide easy-rsa cadir')
@apps.with_argument('target', type=Path, help='set target directory')
def copy_server(args):
"""Copy generated server data to another directory."""
srcs = [
args.cadir / 'keys' / file
for file in 'ca.crt ca.key server.crt server.key dh2048.pem'.split()
]
return 0 if safe_copy(srcs, args.target) else 1
def safe_copy(srcs, dst_dir):
okay = True
for src in srcs:
if not src.exists():
LOG.error('source file does not exist: %s', src)
okay = False
dst = dst_dir / src.name
if dst.exists():
LOG.error('attempt to overwrite: %s', dst)
okay = False
if not okay:
return False
scripts.mkdir(dst_dir)
for src in srcs:
scripts.cp(src, dst_dir)
return True
@apps.with_prog('make-ovpn')
@apps.with_help('make .ovpn file')
@apps.with_argument(
'server_dir', type=Path, help='provide directory of server credentials')
@apps.with_argument(
'client_dir', type=Path, help='provide directory of client credentials')
@apps.with_argument('config', help='provide config file name')
@apps.with_argument('client', help='provide client name')
@apps.with_argument('output', type=Path, help='set output .ovpn file')
def make_ovpn(args):
"""Make .ovpn file."""
parts = [
args.client_dir / args.config,
args.server_dir / 'ca.crt',
args.client_dir / (args.client + '.crt'),
args.client_dir / (args.client + '.key'),
# By the way, ta.key (not ca.key) is the TLS-auth key, which is
# generated by server, not by easy-rsa
args.server_dir / 'ta.key',
]
okay = True
for part in parts:
if not part.exists():
LOG.error('file does not exist: %s', part)
okay = False
if not okay:
return 1
if args.output.exists():
LOG.warning('overwrite %s', args.output)
scripts.ensure_contents(
args.output,
''.join([
parts[0].read_text(),
'<ca>\n', parts[1].read_text(), '</ca>\n',
'<cert>\n', parts[2].read_text(), '</cert>\n',
'<key>\n', parts[3].read_text(), '</key>\n',
'<tls-auth>\n', parts[4].read_text(), '</tls-auth>\n',
]),
)
return 0
@apps.with_help('manage openvpn')
@apps.with_apps(
'operation', 'operation on openvpn',
copy_client,
copy_server,
make_ovpn,
)
def openvpn(args):
"""Manage OpenVPN.
We assume that you are using easy-rsa because that seems to be
what OpenVPN recommends.
"""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/mob/openvpn.py",
"copies": "1",
"size": "3445",
"license": "mit",
"hash": -1464552997948908300,
"line_mean": 28.9565217391,
"line_max": 76,
"alpha_frac": 0.6040638607,
"autogenerated": false,
"ratio": 3.4142715559960357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45183354166960354,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'OrionClient',
)
import json, logging, os, re, requests, socket, time, warnings
from datetime import datetime
__verify__=False
try:
import certifi
__verify__ = certifi.where(), # Path to the Certifi bundle.
except:
if os.path.isfile("/etc/ssl/certs/ca-certificates.crt"):
__verify__="/etc/ssl/certs/ca-certificates.crt"
warnings.filterwarnings("ignore", message="^.*Unverified HTTPS request is being made.*$")
warnings.filterwarnings("ignore", message="^.*A true SSLContext object is not available.*$")
class ErrorValue(object):
def __init__(self,**kwds):
self.__dict__.update(kwds)
def __nonzero__(self):
return False
def __repr__(self):
return 'ErrorValue(%s)'% '\n,'.join('%s=%r' % i for i in self.__dict__.iteritems())
class OrionClient(object):
'''
OrionClient creates a client for a fiware orion server
orion_host_url is the prefix of the orion requests
orion_token_url is the token request url
eg "https://orion.lab.fiware.org/token"
username user id
password user password
authMethod one of None, 'fiware-token'
logger None or the name of a logger to use
timeout 10 seconds
verify None or path to a requests certs folder
'''
authMethods = ('fiware-token','inline','request')
_orion_version_str = 'v1'
def __init__(self,
orion_host_url,
orion_host_port=1026,
orion_token_url=None,
username=None,
password=None,
authMethod='fiware-token',
logger=None,
timeout=10,
verify=__verify__,
):
if authMethod:
if authMethod not in self.authMethods:
raise ValueError('authMethod=%r not in %r' % (authMethod,self.authMethods))
if None in (username,password):
raise ValueError('need both username and password for authMethod=%r' % authMethod)
self.authMethod = authMethod
self.username = username
self.password = password
self.orion_host_url = self.clean_url(orion_host_url)
self.orion_host_port = orion_host_port
self.orion_token_url = self.clean_url(orion_token_url)
self.fiware_auth_token = None
self.fiware_auth_expiry = None
self.logger = logging.getLogger(logger) if logger else None
self.timeout = timeout
self.verify = verify
@property
def token(self,duration=3600):
'''return an authorisation token from the token request url'''
if self.fiware_auth_token:
if self.fiware_auth_expiry > time.time():
return self.fiware_auth_token
r = requests.post(
self.orion_token_url,
json=dict(
username=self.username,
password=self.password,
),
timeout=self.timeout,
#,
#headers = {'Content-type':'application/json'}
)
if r.status_code != 200:
raise ValueError("Cannot get Orion token: %s" % r.text)
token = r.text
self.fiware_auth_token = token
self.fiware_auth_expiry = time.time() + duration -1
return token
@property
def orion_host_prefix(self):
return "%s:%s" % (self.orion_host_url,self.orion_host_port)
@property
def orion_entities_url(self):
return "%s/%s/contextEntities" % (self.orion_host_prefix,self._orion_version_str)
@property
def orion_entitytypes_url(self):
return "%s/%s/contextEntityTypes" % (self.orion_host_prefix,self._orion_version_str)
@staticmethod
def _make_url(*args):
return '/'.join(a.strip('/') for a in args if a is not None)
@staticmethod
def clean_url(url):
if url:
if (not url.startswith('http://')) and (not url.startswith('https://')):
url = "http://" + url
return url.rstrip('/')
@staticmethod
def pydict_to_orion(d):
"""Convert Python dictionary to the top level format for Orion
The top level in Orion has to be expanded to name/type/value
MY_DATA = {
"attributes": [
{
"name": "race_name",
"type": "string",
"value": "Sunday Fun Run"
},
{
"name": "start_list",
"type": "T",
"value": START_LIST
}
]
}
"""
orion_attrs = []
for key, val in d.iteritems():
if key != '_about':
if isinstance(val, int):
orion_attrs.append(dict(name=key, type="integer", value=val))
elif isinstance(val, datetime):
orion_attrs.append(dict(name=key, type="datetime",
value=val.isoformat()))
elif isinstance(val, bool):
orion_attrs.append(dict(name=key, type="boolean", value=val))
elif isinstance(val, basestring):
orion_attrs.append(dict(name=key, type="string",
value=val))
elif isinstance(val, float):
orion_attrs.append(dict(name=key, type="float", value=val))
elif isinstance(val, (list, dict)):
orion_attrs.append(dict(name=key, type="T", value=val))
else:
raise ValueError("Don't know how to encode top-level attribute %s in orion" % val)
return dict(attributes=orion_attrs)
@staticmethod
def orion_to_py(orion):
'''convert a token from orion to python form'''
if 'contextResponses' in orion:
return dict((x['contextElement']['id'],OrionClient.orion_to_py(x)) for x in orion['contextResponses'] if 'contextElement' in x)
#We get this extra level with the Santander thing, but not needed for ourselves
if "contextElement" in orion:
orion = orion["contextElement"]
return dict((attrdict["name"],attrdict["value"]) for attrdict in orion.get("attributes",[]))
def get_headers(self, exclude_content_type=False):
HEADERS = {
'Accept': 'application/json',
}
if not exclude_content_type:
HEADERS['Content-Type'] = 'application/json'
if self.authMethod=='fiware-token':
HEADERS['X-Auth-Token'] = self.token
return HEADERS
@property
def orion_version(self):
'''return the orion version'''
url = "%s/version" % self.orion_host_prefix
headers = self.get_headers()
r = requests.get(
url,
verify=self.verify,
headers=headers,
)
return r.status_code,r.json()
def create_entity(self, entity_id, type_id="", **attributes):
"""Create or replace the given entity in Orion
create_entity(
entity_id, #string id
type_id=optional_type_id, #string
attr0=value0,
attr1=value1,
....
)
We send this
{
"type": type_id, #if specified
"attributes": [
attr0: value0,
.....
attrk, valuek
....
]
}
"""
data =self.pydict_to_orion(attributes)
if type_id:
data['type'] = type_id
return self.do_request(
'post',
self._make_url(self.orion_entities_url,entity_id),
data = json.dumps(data, cls=self.DateTimeEncoder),
)
def update_entity(self, entity_id, orionify=True, **kwds):
'''update a specified entitity
update_entity(
'entity_id',
attr0=value0,
.....
)'''
return self.do_request(
'post',
self._make_url(self.orion_entities_url,entity_id,"attributes"),
data = json.dumps(self.pydict_to_orion(kwds) if orionify else kwds, cls=self.DateTimeEncoder),
)
def fetch_entity(self, entity_id=None, type_id=None, attribute=None):
'''fetch some or a specifiied entity
fetch_entity(
entity_id='entity_id',
type_id='type_id',
attribute='attribute',
)
if entity_id is specified then that specific entity is returned
else if type_id is specified we return entities of that type.
If attribute is specified only that attribute will be returned else
all attributes will be returned.
In the multiple return case we return a dictionary with keys the entity ids.
'''
if entity_id:
url = (self.orion_entities_url,entity_id)
elif type_id:
url = (self.orion_entitytypes_url,type_id)
else:
url = (self.orion_entities_url,)
if attribute:
url = url + ('attributes',attribute)
r = self.do_request(
'get',
self._make_url(*url),
)
if r:
if 'statusCode' in r:
if r["statusCode"].get("code", None) == u'404':
return None
return self.orion_to_py(r)
else:
return r
def fetch_attribute(self, entity_id, attribute):
'''
fetch_attribute(
'entity_id',
'attribute'.
)
return the specified attribute from the specified entity.
'''
r = self.do_request(
'get',
self._make_url(self.orion_entities_url,entity_id,"attributes",attribute),
)
resp = r.json()
if 'statusCode' in resp:
if resp["statusCode"].get("code", None) == u'404':
return None
return resp["attributes"][0]["value"]
def update_attribute(self, entity_id, attribute, newdata):
'''update_attribute(
'entity_id',
'attribute'.
)
update the speficied attribute of the specified entity'''
return self.do_request(
'put',
self._make_url(self.orion_entities_url,entity_id,"attributes",attribute),
data = json.dumps({"value": newdata}),
)
def delete_entity(self, entity_id):
'''delete_entity('entity_id')
delete the specified entity'''
return self.do_request(
'delete',
self._make_url(self.orion_entities_url,entity_id),
)
def cancel_subscription(self,subscr_id):
return self.do_request(
'post',
"%s/%s/unsubscribeContext" % (self.orion_host_prefix,self._orion_version_str),
json={'subscriptionId': subscr_id},
)
def setup_notification(self, entity_id, attributes=[],
callback_url="localhost:8000/fw/orion-notify/",
duration="1M", # defaults to 1 Month
poll_period="1S", # defaults to 1 second
):
"""Register for the Orion server to post change notices on the entity.
:: duration := follows https://en.wikipedia.org/wiki/ISO_8601, already
prefixed with 'P'
:: poll_period := follows https://en.wikipedia.org/wiki/ISO_8601, already
prefixed with 'PT' (we only accept periods below 24h :) )
"""
if not callback_url.startswith('http'):
callback_url = "%s%s" % (get_local_host(), callback_url)
msg = {
"entities": [
{
"type": "",
"isPattern": "false",
"id": entity_id,
}
],
"attributes": attributes,
"reference": callback_url,
"duration": "P%s" % duration,
"notifyConditions": [
{
"type": "ONCHANGE",
"condValues": attributes
}
],
"throttling": "PT%s" % poll_period
}
logger.info('Subscribing: %s' % json.dumps(msg))
return self.do_request(
'post',
"%s/%s/subscribeContext" % (self.orion_host_prefix,self._orion_version_str),
json=msg,
)
def do_request(self,verb,url,**kwds):
r = getattr(requests,verb)(
url,
headers=self.get_headers(),
verify=False,
timeout=self.timeout,
**kwds
)
if r.status_code == 200:
errors = self.has_orion_error(r.json())
if errors:
return ErrorValue(orion_errors=errors)
else:
return r.json()
try:
return ErrorValue(status_code=r.status_code,json=r.json())
except:
return ErrorValue(status_code=r.status_code,content=r.content)
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
@staticmethod
def has_orion_error(content):
# if the output is not boolean False, there is an error
if 'orionError' in content:
err = json.dumps(content['orionError'], indent=4)
return err
return False
| {
"repo_name": "openath/orion-python-client",
"path": "src/orionclient.py",
"copies": "1",
"size": "14597",
"license": "apache-2.0",
"hash": -2659943635836631000,
"line_mean": 35.9544303797,
"line_max": 139,
"alpha_frac": 0.4996232102,
"autogenerated": false,
"ratio": 4.365131578947368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5364754789147368,
"avg_score": null,
"num_lines": null
} |
__all__ = ['normalize_baseline',
'normalize_spikes',
'moving_average']
import numpy as np
def normalize_baseline(signals, beta=1, epsilon=0.01,
mode='online', function=None):
if function is None:
function = np.sum
if mode == 'online':
norm_signals = _normalize_baseline_online(signals, beta, epsilon, function)
elif mode == 'offline':
norm_signals = _normalize_baseline_offline(signals, beta, epsilon, function)
else:
raise ValueError
return norm_signals
def _normalize_baseline_online(signals, beta, epsilon, function):
e = epsilon
norm_signals = [[((signal[i] + e) / (function(signal[0:(i + 1)], 0) + e))
** beta for i in range(len(signal))] for signal in signals]
return np.array(norm_signals)
def _normalize_baseline_offline(signals, beta, epsilon, function):
e = epsilon
norm = function(signals, 1)[:, None]
norm_signals = ((signals + e) / (norm + e)) ** beta
return norm_signals
def normalize_spikes(signals, alpha=1.2):
norm_signals = (np.abs(signals[:, 1:] - signals[:, 0:-1])) ** alpha
return norm_signals
def moving_average(signals, n=1):
cs = np.cumsum(signals, 1)
cs[:, n:] = cs[:, n:] - cs[:, :-n]
norm_signals = cs[:, (n - 1):] / n
return norm_signals
| {
"repo_name": "norbert/hearsay",
"path": "hearsay/normalizations.py",
"copies": "1",
"size": "1348",
"license": "mit",
"hash": 2889178657685772000,
"line_mean": 28.9555555556,
"line_max": 84,
"alpha_frac": 0.5979228487,
"autogenerated": false,
"ratio": 3.613941018766756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4711863867466756,
"avg_score": null,
"num_lines": null
} |
__all__ = ('Notification')
from datetime import datetime
from pocketthrone.entities.enum import MessageImportancy, NotificationCategory
class Notification:
_id = -1
# properties defining unset type & category of this notification
importancy = None
category = None
# title & message content
title = "<untitled>"
body = "<no content>"
def __init__(self, _id, importancy=MessageImportancy.IMPORTANCY_UNSET, category=NotificationCategory.NOTI_UNSET, body=None, title=None):
# set initial properties
self._id = _id
self.importancy = importancy
self.category = category
self.title = title
self.content = content
def get_importancy(self):
'''returns the MessageImportancy of this Notification'''
return self.importancy
def set_importancy(self, value):
'''sets MessageImportancy of this Notification'''
self.importancy = value
def get_category(self):
'''returns the NotificationCategory of this Notification'''
return self.category
def set_category(self, value):
'''sets NotificationCategory of this Notfication'''
self.category = value
def set_title_n_body(self, title, body):
self.title = title
self.body = body
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/entities/notification.py",
"copies": "2",
"size": "1162",
"license": "bsd-2-clause",
"hash": 1006579319018396700,
"line_mean": 25.4090909091,
"line_max": 137,
"alpha_frac": 0.7375215146,
"autogenerated": false,
"ratio": 3.5753846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026500836972326237,
"num_lines": 44
} |
"""All notifications."""
ERR_NO_LOGIN = 0
NOTIF_LOGIN_STAFF = 1
NOTIF_LOGIN_STUDENT = 2
NOTIF_SETTING_UPDATED = 3
NOTIF_SETTING_ONE_TYPE = 4
NOTIF_INQUIRY_PLACED = 5
NOTIF_HELP_DONE = 6
notifications = {
ERR_NO_LOGIN: 'You are attempting to <b>disable both login methods</b> - '
'Google login and Default login. To change from one login method to the '
'other, enable both first, then disable one! Settings unchanged.',
NOTIF_LOGIN_STAFF: 'Hello there! <b>Click on "start helping" in the '
'bottom-right corner</b> to start helping.',
NOTIF_LOGIN_STUDENT: 'Hello there! Click on <b>"request help" in the '
'bottom-right corner</b> to enqueue yourself.',
NOTIF_SETTING_UPDATED: 'Settings have been updated.',
NOTIF_SETTING_ONE_TYPE: 'You have "Inquiry Types" enabled, but there is'
' only one type for students to choose from! Recommended: update the '
'"Inquiry Types" field below with a comma-separated list of values, or '
'disable it.',
NOTIF_INQUIRY_PLACED: 'Your request has been placed! A staff member will '
'be in touch soon.',
NOTIF_HELP_DONE: 'All inquiries for your location are processed!'
}
| {
"repo_name": "alvinwan/quupod",
"path": "quupod/notifications.py",
"copies": "2",
"size": "1166",
"license": "apache-2.0",
"hash": 6732502609909076000,
"line_mean": 42.1851851852,
"line_max": 78,
"alpha_frac": 0.6981132075,
"autogenerated": false,
"ratio": 3.303116147308782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 27
} |
__all__ = ['Notifier']
# #############################################################################
# ########## Libraries #############
# ##################################
# standard library
from ctypes import *
# 3rd party module
from win10toast import ToastNotifier
# ############################################################################
# ########### Classes ##############
# ##################################
class PowerClass(Structure):
"""Defining Field Strings for Power Class"""
_fields_ = [('ACLineStatus', c_byte),
('BatteryFlag', c_byte),
('BatteryLifePercent', c_byte),
('Reserved1', c_byte),
('BatteryLifeTime', c_ulong),
('BatteryFullLifeTime', c_ulong)]
class Notifier():
@staticmethod
def get_battery_level():
"""Extends PowerClass and returns Battery Level"""
powerclass = PowerClass()
windll.kernel32.GetSystemPowerStatus(byref(powerclass))
return powerclass.BatteryLifePercent
@staticmethod
def get_battery_line():
"""Extends PowerClass and returns Battery AC Line Status"""
powerclass = PowerClass()
windll.kernel32.GetSystemPowerStatus(byref(powerclass))
return powerclass.ACLineStatus
@staticmethod
def create_plug_out_toast():
"""Throws PlugOut Toast for 10 seconds"""
toaster = ToastNotifier()
toaster.show_toast("Battery Charged",
"To maintain the optimal battery life,kindly unplug the charger now!!",
duration=10)
return None
@staticmethod
def create_plug_in_toast():
"""Throws PlugIn Toast for 10 seconds"""
toaster = ToastNotifier()
toaster.show_toast("Low Battery",
"To maintain the optimal battery life,kindly plug in the charger now!!",
duration=10)
return None
| {
"repo_name": "arunkumarpalaniappan/win10batteryoptimizer",
"path": "win10batteryoptimizer/__init__.py",
"copies": "1",
"size": "1927",
"license": "mit",
"hash": -6732947108844285000,
"line_mean": 34.0363636364,
"line_max": 88,
"alpha_frac": 0.5256875973,
"autogenerated": false,
"ratio": 4.5663507109004735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5592038308200473,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'OutlineContinents',
'GlobeSource',
]
import numpy as np
import pyvista as pv
import vtk
from .. import interface
from ..base import AlgorithmBase
class OutlineContinents(AlgorithmBase):
"""A simple data source to produce a ``vtkEarthSource`` outlining the
Earth's continents. This works well with our ``GlobeSource``.
"""
__displayname__ = 'Outline Continents'
__category__ = 'source'
def __init__(self, radius=6371.0e6):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkPolyData'
)
self.__radius = radius
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate the output"""
pdo = self.GetOutputData(outInfo, 0)
earth = vtk.vtkEarthSource()
earth.SetRadius(self.__radius)
earth.OutlineOn()
earth.Update()
foo = pv.wrap(earth.GetOutput())
# Copy the geometries only
continents = pv.PolyData()
continents.points = foo.points.copy()
continents.lines = foo.lines.copy()
del foo
pdo.ShallowCopy(continents)
return 1
def set_radius(self, radius):
"""Set the radius of the globe. Defualt is 6.371.0e9 meters"""
if self.__radius != radius:
self.__radius = radius
self.Modified()
###############################################################################
class GlobeSource(AlgorithmBase):
"""Creates a globe/sphere the size of the Earth with texture coordinates
already mapped. The globe's center is assumed to be (0,0,0).
Args:
radius (float): the radius to use
npar (int): the number of parallels (latitude)
nmer (int): the number of meridians (longitude)
"""
__displayname__ = 'Globe Source'
__category__ = 'source'
def __init__(self, radius=6371.0e6, npar=15, nmer=36, **kwargs):
AlgorithmBase.__init__(
self, nInputPorts=0, nOutputPorts=1, outputType='vtkPolyData'
)
self.__radius = radius
self.__npar = npar
self.__nmer = nmer
# TODO: use **kwargs
def spherical_to_cartesian(self, meridian, parallel):
"""Converts longitude/latitude to catesian coordinates. Assumes the
arguments are given in degrees.
"""
lon_r = np.radians(meridian)
lat_r = np.radians(parallel)
x = self.__radius * np.cos(lat_r) * np.cos(lon_r)
y = self.__radius * np.cos(lat_r) * np.sin(lon_r)
z = self.__radius * np.sin(lat_r)
return np.vstack((x, y, z)).T
def create_sphere(self):
"""Creates longitude/latitude as 2D points and returns the corresponding
texture coordinates for those positions."""
lon = np.linspace(-180.0, 180.0, self.__nmer)
lat = np.linspace(-90.0, 90.0, self.__npar)
lon_g, lat_g = np.meshgrid(lon, lat, indexing='ij')
pos = np.vstack([lon_g.ravel(), lat_g.ravel()]).T
# Now create the texture map
tcgx, tcgy = np.meshgrid(
np.linspace(0.0, 1.0, len(lon)),
np.linspace(0.0, 1.0, len(lat)),
indexing='ij',
)
tex = np.vstack([tcgx.ravel(), tcgy.ravel()]).T
return pos, tex
def build_globe(self):
"""Generates the globe as ``vtkPolyData``"""
# NOTE: https://gitlab.kitware.com/paraview/paraview/issues/19417
from scipy.spatial import Delaunay
pos, tex = self.create_sphere()
pts = self.spherical_to_cartesian(pos[:, 0], pos[:, 1])
points = interface.points_to_poly_data(pts).GetPoints()
texcoords = interface.convert_array(tex, name='Texture Coordinates')
# Now generate triangles
cell_connectivity = Delaunay(pos).simplices.astype(int)
cells = vtk.vtkCellArray()
cells.SetNumberOfCells(cell_connectivity.shape[0])
cells.SetCells(
cell_connectivity.shape[0], interface.convert_cell_conn(cell_connectivity)
)
# Generate output
output = vtk.vtkPolyData()
output.SetPoints(points)
output.GetPointData().SetTCoords(texcoords)
output.SetPolys(cells)
return output
def RequestData(self, request, inInfo, outInfo):
"""The pipeline executes this to generate output"""
pdo = self.GetOutputData(outInfo, 0)
globe = self.build_globe()
pdo.ShallowCopy(globe)
return 1
def set_radius(self, radius):
"""Set the radius of the globe. Defualt is 6.371.0e9 meters"""
if self.__radius != radius:
self.__radius = radius
self.Modified()
def set_n_meridians(self, n):
"""Set the number of meridians to use"""
if self.__nmer != n:
self.__nmer = n
self.Modified()
def set_n_parallels(self, n):
"""Set the number of parallels to use"""
if self.__npar != n:
self.__npar = n
self.Modified()
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/model_build/earth.py",
"copies": "1",
"size": "5033",
"license": "bsd-3-clause",
"hash": 3898846965213843000,
"line_mean": 32.7785234899,
"line_max": 86,
"alpha_frac": 0.5827538248,
"autogenerated": false,
"ratio": 3.700735294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4783489118917647,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'override_settings',
)
try:
from django.test.utils import override_settings
except ImportError:
# Back-port override_settings from Django 1.4
# https://github.com/django/django/blob/stable/1.4.x/django/test/utils.py
from django.conf import settings, UserSettingsHolder
from django.utils.functional import wraps
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
# No setting_changed signal in Django 1.3
# for key, new_value in self.options.items():
# setting_changed.send(sender=settings._wrapped.__class__,
# setting=key, value=new_value)
def disable(self):
settings._wrapped = self.wrapped
# No setting_changed signal in Django 1.3
# for key in self.options:
# new_value = getattr(settings, key, None)
# setting_changed.send(sender=settings._wrapped.__class__,
# setting=key, value=new_value)
| {
"repo_name": "simpleenergy/djsailthru",
"path": "djsailthru/tests/utils.py",
"copies": "1",
"size": "2785",
"license": "mit",
"hash": -7606871024102275000,
"line_mean": 38.7857142857,
"line_max": 90,
"alpha_frac": 0.5587073609,
"autogenerated": false,
"ratio": 4.595709570957096,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006796857817665787,
"num_lines": 70
} |
__all__ = [
'PackedBinariesReader',
'MadagascarReader',
]
__displayname__ = 'Binary/Serialized File I/O'
import warnings
import numpy as np
import vtk
from .. import _helpers, interface
from ..base import ReaderBase
class PackedBinariesReader(ReaderBase):
"""This reads in float or double data that is packed into a binary file
format. It will treat the data as one long array and make a ``vtkTable``
with one column of that data. The reader uses defaults to import as floats
with native endianness. Use the Table to Uniform Grid or the Reshape Table
filters to give more meaning to the data. We chose to use a ``vtkTable``
object as the output of this reader because it gives us more flexibility in
the filters we can apply to this data down the pipeline and keeps thing
simple when using filters in this repository.
"""
__displayname__ = 'Packed Binaries Reader'
__category__ = 'reader'
extensions = 'H@ bin rsf rsf@ HH npz'
description = 'PVGeo: Packed Binaries Reader'
def __init__(self, **kwargs):
ReaderBase.__init__(self, nOutputPorts=1, outputType='vtkTable', **kwargs)
# Other Parameters
self.__data_name = kwargs.get('dataname', 'Data')
self.__dtypechar = kwargs.get('dtype', 'f')
self.__endian = kwargs.get('endian', '')
self.__dtype, self.__vtktype = interface.get_dtypes(
dtype=self.__dtypechar, endian=self.__endian
)
# Data objects to hold the read data for access by the pipeline methods
self.__data = []
def _read_raw_file(self, filename):
"""Interanl helper to read the raw data from the file"""
dtype = self.__dtype
if dtype == np.dtype('>f'):
# Checks if big-endian and fixes read
dtype = np.dtype('f')
try:
arr = np.fromfile(filename, dtype=dtype)
except (IOError, OSError) as fe:
raise _helpers.PVGeoError(str(fe))
return np.asarray(arr, dtype=self.__dtype)
def _get_file_contents(self, idx=None):
"""Interanl helper to get all contents for all files"""
if idx is not None:
filenames = [self.get_file_names(idx=idx)]
else:
filenames = self.get_file_names()
contents = []
for f in filenames:
contents.append(self._read_raw_file(f))
if idx is not None:
return contents[0]
return contents
def _read_up_front(self):
"""Should not need to be overridden"""
# Perform Read
self.__data = self._get_file_contents()
self.need_to_read(flag=False)
return 1
def _get_raw_data(self, idx=0):
"""This will return the proper data for the given timestep"""
return self.__data[idx]
def convert_array(self, arr):
"""Converts the numpy array to a vtkDataArray"""
# Put raw data into vtk array
data = interface.convert_array(
arr, name=self.__data_name, deep=True, array_type=self.__vtktype
)
return data
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to request data for current timestep"""
# Get output:
output = vtk.vtkTable.GetData(outInfo)
if self.need_to_read():
self._read_up_front()
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
# Generate the data object
arr = self._get_raw_data(idx=i)
data = self.convert_array(arr)
output.AddColumn(data)
return 1
#### Seters and Geters ####
def set_endian(self, endian):
"""Set the endianness of the data file.
Args:
endian (int or char): no preference = '' or 0, Little = 1 or `<` or Big = 2 `>`.
"""
pos = ['', '<', '>']
if isinstance(endian, int):
endian = pos[endian]
if endian != self.__endian:
self.__endian = endian
self.__dtype, self.__vtktype = interface.get_dtypes(
dtype=self.__dtypechar, endian=self.__endian
)
self.Modified()
def get_endian(self):
"""Get the endianness of the data file."""
return self.__endian
def set_data_type(self, dtype):
"""Set the data type of the binary file: `double='d'`, `float='f'`, `int='i'`"""
pos = ['d', 'f', 'i']
if isinstance(dtype, int):
dtype = pos[dtype]
if dtype != self.__dtype:
self.__dtypechar = dtype
self.__dtype, self.__vtktype = interface.get_dtypes(
dtype=self.__dtypechar, endian=self.__endian
)
self.Modified()
def get_data_types(self):
"""Get the data type of the binary file"""
return self.__dtype, self.__vtktype
def set_data_name(self, data_name):
"""The string name of the data array generated from the inut file."""
if data_name != self.__data_name:
self.__data_name = data_name
self.Modified(read_again=False) # Don't re-read. Just request data again
def get_data_name(self):
"""Get name used for the data array"""
return self.__data_name
class MadagascarReader(PackedBinariesReader):
"""This reads in float or double data that is packed into a Madagascar
binary file format with a leader header. The reader ignores all of the ascii
header details by searching for the sequence of three special characters:
EOL EOL EOT and it will treat the followng binary packed data as one long
array and make a ``vtkTable`` with one column of that data. The reader uses
defaults to import as floats with native endianness. Use the Table to
Uniform Grid or the Reshape Table filters to give more meaning to the data.
We will later implement the ability to create a gridded volume from the
header info. This reader is a quick fix for Samir. We chose to use a
``vtkTable`` object as the output of this reader because it gives us more
flexibility in the filters we can apply to this data down the pipeline and
keeps thing simple when using filters in this repository.
`Details Here`_.
.. _Details Here: http://www.ahay.org/wiki/RSF_Comprehensive_Description#Single-stream_RSF
"""
__displayname__ = 'Madagascar SSRSF Reader'
__category__ = 'reader'
# extensions are inherrited from PackedBinariesReader
description = 'PVGeo: Madagascar Single Stream RSF Files'
def __init__(self, **kwargs):
PackedBinariesReader.__init__(self, **kwargs)
def _read_raw_file(self, filename):
"""Reads the raw data from the file for Madagascar SSRSF files"""
dtype, vtktype = self.get_data_types()
CTLSEQ = b'\014\014\004' # The control sequence to seperate header from data
rpl = b''
raw = []
with open(filename, 'rb') as file:
raw = file.read()
idx = raw.find(CTLSEQ)
if idx == -1:
warnings.warn(
'This is not a single stream RSF format file. Treating entire file as packed binary data.'
)
else:
raw = raw[idx:] # deletes the header
raw = raw.replace(CTLSEQ, rpl) # removes the control sequence
arr = np.fromstring(raw, dtype=dtype)
return arr
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/readers/binaries.py",
"copies": "1",
"size": "7433",
"license": "bsd-3-clause",
"hash": -9131626530113603000,
"line_mean": 37.1179487179,
"line_max": 110,
"alpha_frac": 0.6056773846,
"autogenerated": false,
"ratio": 4.015667206915181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.512134459151518,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Parser',
'LineParser',
]
class Parser(object):
"""Abstract data parser"""
done = False
cache = ''
def clear(self):
"""Clears and returns current cache"""
data, self.cache = self.cache, ''
return data
def prepend(self, data):
"""Prepend data to cache"""
if data:
self.cache = data + self.cache
def append(self, data):
"""Append data to cache"""
if data:
self.cache = self.cache + data
def parse(self, data):
"""Feed chunk of data to parser. Returns parsed bits if available."""
if data:
self.cache += data
if self.done:
return ()
output = []
while self.cache and not self.done:
data, self.cache = self.cache, ''
bits = self.parseRaw(data)
if bits is None:
# parseRaw has not enough data
break
output.extend(bits)
return output
def finish(self):
"""Tell parser there is no more data. Returns parsed bits if available."""
self.done = True
return ()
def parseRaw(self, data):
"""Called by parse with current data chunk"""
raise NotImplementedError
class LineParser(Parser):
"""Line based parser"""
linemode = True
def parseRaw(self, data):
"""Parses and dispatches raw data"""
if self.linemode:
pos = data.find('\n')
if pos < 0:
self.prepend(data)
return None
if pos > 0 and data[pos-1] == '\r':
line, data = data[:pos-1], data[pos+1:]
else:
line, data = data[:pos], data[pos+1:]
self.prepend(data)
return self.parseLine(line)
else:
return self.parseData(data)
def setLineMode(self, extra=''):
"""Sets parsing to line mode"""
if extra:
self.prepend(extra)
self.linemode = True
def setDataMode(self, extra=''):
"""Sets parsing to data mode"""
if extra:
self.prepend(extra)
self.linemode = False
def parseLine(self, line):
"""Called by parseRaw with current line"""
raise NotImplementedError
def parseData(self, data):
"""Called by parseRaw with current data"""
raise NotImplementedError
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/parsers.py",
"copies": "1",
"size": "2477",
"license": "mit",
"hash": -9059625923754811000,
"line_mean": 26.5222222222,
"line_max": 82,
"alpha_frac": 0.5151392814,
"autogenerated": false,
"ratio": 4.503636363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5518775645036365,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'patch_getaddrinfo',
]
import functools
import socket
import threading
# Make an alias because we will monkey-patch it.
from socket import getaddrinfo as _getaddrinfo
from garage.collections import LruCache
class CachedGetaddrinfo:
"""Cache getaddrinfo result for a certain number for queries."""
class CacheEntry:
def __init__(self, result):
self.result = result
self.num_queried = 0
def __init__(
self,
expiration=1024,
capacity=32,
*,
getaddrinfo_func=None):
self._lock = threading.Lock()
self._expiration = expiration
self._cache = LruCache(capacity)
self._getaddrinfo_func = getaddrinfo_func or _getaddrinfo
@functools.wraps(_getaddrinfo)
def __call__(self, host, port, family=0, type=0, proto=0, flags=0):
key = (host, port, family, type, proto, flags)
with self._lock:
if key in self._cache:
entry = self._cache[key]
entry.num_queried += 1
if entry.num_queried < self._expiration:
return entry.result
new_entry = self.CacheEntry(
self._getaddrinfo_func(
host, port,
family=family,
type=type,
proto=proto,
flags=flags,
),
)
self._cache[key] = new_entry
return new_entry.result
def patch_getaddrinfo():
socket.getaddrinfo = CachedGetaddrinfo()
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/sockets.py",
"copies": "1",
"size": "1599",
"license": "mit",
"hash": 1472404502663211800,
"line_mean": 27.0526315789,
"line_max": 71,
"alpha_frac": 0.5428392745,
"autogenerated": false,
"ratio": 4.28686327077748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 57
} |
__all__ = [
'PJS'
]
def escape_value(value):
if type(value) in [str, unicode]:
value = '"{0}"'.format(value)
return value
class Attr(dict):
def __str__(self):
if self['namespace']:
return u'{namespace}.{attr_name}'.format(**self)
else:
return self['attr_name']
class AttrAccessor(object):
def __init__(self, attr_name):
self.attr_name = attr_name
def __get__(self, obj, owner):
attr = Attr()
attr['namespace'] = obj.namespace
attr['attr_name'] = self.attr_name
return attr
def __set__(self, obj, value):
obj.root.context.nodes.append(u'{0}.{1} = {2}'.format(obj.namespace, self.attr_name, escape_value(value)))
def __del__(self, obj):
pass
class Node(object):
html = AttrAccessor('innerHTML')
text = AttrAccessor('innerText')
def __init__(self, selector=None, parent=None, level=0, multi=False, var=None, namespace=''):
self.selector = selector
self.parent = parent
self.level = level+1
self.multi = multi
self.nodes = []
self.var = var
self.namespace = namespace and namespace+namespace[-1] or var[0]
self.delim = u'\n' + '\t'*self.level
if not parent:
self.root = self
self.context = self
else:
self.root = parent.root
def __enter__(self):
self.root.context = self
return self
def __exit__(self, e,v,t):
self.root.context = self.parent
if self.parent:
self.parent.add_node(self)
def add_node(self, node):
self.nodes.append(node.render())
def get_selector(self):
if self.selector:
return u'{0}.querySelector{1}("{2}")'.format(self.parent.namespace, self.multi and 'All' or '', self.selector)
if self.parent:
return u'{0}.{1}'.format(self.parent.namespace, self.var)
else:
return self.var
def e(self, selector):
return Node(selector=selector, parent=self, level=self.level, multi=False, namespace=self.namespace)
def el(self, selector):
return Node(selector=selector, parent=self, level=self.level, multi=True, namespace=self.namespace)
def render(self):
return u'(function({0}){{{3}{2};{4}}})({1})'.format(
self.namespace,
self.get_selector(),
';{0}'.format(self.delim).join(self.nodes),
self.delim,
self.delim[:-1]
)
class Window(Node):
@property
def document(self):
return Node(var='document', parent=self, level=1)
class PJSDescriptor(object):
def __init__(self, klass, kwargs):
self.klass = klass
self.kwargs = kwargs
def __get__(self, obj, owner):
node = self.klass(**self.kwargs)
obj._node = node
return node
def __set__(self, obj, value):
pass
class PJS(object):
window = PJSDescriptor(Window, {'var':'window'})
document = PJSDescriptor(Node, {'var':'document'})
_node = None
def __init__(self):
self._node = None
def __enter__(self):
return self
def __exit__(self, e, v, t):
pass
def __getattr__(self, attr):
if vars(self).has_key(attr):
return vars(self)[attr]
else:
return Attr(attr_name=attr, namespace=None)
def __setattr__(self, attr, value):
if attr in ['_node', 'window', 'document']:
vars(self)[attr] = value
else:
self._node.context.nodes.append(u'{0} = {1}'.format(attr, escape_value(value)))
def var(self, var):
defines = []
if type(var) == list:
defines.extend(var)
elif type(var) == dict:
defines.extend(['{0} = {1}'.format(name, escape_value(value)) for name, value in var.items()])
else:
defines.append(var)
self._node.context.nodes.append(u'var {0}'.format(', '.join(defines)))
def render(self):
return self._node.render()
| {
"repo_name": "veeloox/ramen",
"path": "ramen/pjs.py",
"copies": "1",
"size": "4180",
"license": "apache-2.0",
"hash": 8469163223985016000,
"line_mean": 25.125,
"line_max": 122,
"alpha_frac": 0.5397129187,
"autogenerated": false,
"ratio": 3.7965485921889193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.980314092465862,
"avg_score": 0.00662411724605991,
"num_lines": 160
} |
__all__ = [
'pods',
]
from pathlib import Path
import json
import logging
from garage import apps
from garage import scripts
from garage.assertions import ASSERT
from ops import models
from . import deps
from . import repos
LOG = logging.getLogger(__name__)
# Note that
# deploy_copy
# deploy_create_pod_manifest
# deploy_create_volumes
# deploy_fetch
# deploy_install_units
# and
# undeploy_remove
# are inverse operations to each other.
### Deploy
def deploy_copy(_, bundle_pod, local_pod):
"""Copy pod data from bundle to the pod directory, and return a new
pod object representing the pod directory.
All remote files will be fetched and stored in the pod directory
except images.
Specifically, this will create these under POD_DIR:
* pod.json
* images/...
* systemd/...
* volume-data/...
"""
LOG.info('%s - copy', bundle_pod)
with scripts.using_sudo():
# Generate new pod.json
scripts.mkdir(local_pod.pod_object_path.parent)
scripts.tee(
(json.dumps(local_pod.to_pod_data(), indent=4, sort_keys=True)
.encode('ascii')),
local_pod.pod_object_path,
)
# Copy systemd unit files
scripts.mkdir(local_pod.pod_systemd_path)
ASSERT.equal(
len(bundle_pod.systemd_units),
len(local_pod.systemd_units),
)
pairs = zip(bundle_pod.systemd_units, local_pod.systemd_units)
for bundle_unit, local_unit in pairs:
ASSERT.equal(bundle_unit.unit_name, local_unit.unit_name)
_cp_or_wget(bundle_unit, 'unit_file', local_unit.unit_file_path)
if local_unit.checksum:
scripts.ensure_checksum(
local_unit.unit_file_path, local_unit.checksum)
# Copy image files (but if it's URI, leave it to `rkt fetch`)
scripts.mkdir(local_pod.pod_images_path)
ASSERT.equal(len(bundle_pod.images), len(local_pod.images))
pairs = zip(bundle_pod.images, local_pod.images)
for bundle_image, local_image in pairs:
ASSERT.equal(bundle_image.id, local_image.id)
if bundle_image.image_path:
scripts.cp(bundle_image.image_path, local_image.image_path)
if bundle_image.signature:
scripts.cp(bundle_image.signature, local_image.signature)
# Copy volume data
scripts.mkdir(local_pod.pod_volume_data_path)
ASSERT.equal(len(bundle_pod.volumes), len(local_pod.volumes))
pairs = zip(bundle_pod.volumes, local_pod.volumes)
for bundle_volume, local_volume in pairs:
ASSERT.equal(bundle_volume.name, local_volume.name)
_cp_or_wget(bundle_volume, 'data', local_volume.data_path)
if local_volume.checksum:
scripts.ensure_checksum(
local_volume.data_path, local_volume.checksum)
return local_pod
def deploy_create_pod_manifest(repo, pod):
LOG.info('%s - create pod manifest', pod)
scripts.ensure_directory(repo.get_pod_dir(pod))
# Deployment-time volume allocation.
get_volume_path = lambda _, volume: pod.pod_volumes_path / volume.name
# Deployment-time port allocation.
ports = repo.get_ports()
def get_host_port(instance, port_name):
for port_allocation in pod.ports:
if port_allocation.name == port_name:
break
else:
port_allocation = None
if port_allocation:
# Okay, this is a statically assigned port; pick the first
# unassigned port number.
for port_number in port_allocation.host_ports:
if not ports.is_assigned(port_number):
break
else:
port_number = port_allocation.host_ports[0]
LOG.info(
'all are assigned; re-use the first one: %d',
port_number,
)
action_name = 'assign'
action = ports.assign
else:
port_number = ports.next_available_port()
action_name = 'allocate'
action = ports.allocate
LOG.info(
'%s%s%s - %s %s port %d',
pod,
' ' if instance.name else '',
instance.name or '',
action_name,
port_name,
port_number,
)
action(ports.Port(
pod_name=pod.name,
pod_version=pod.version,
instance=instance.name,
name=port_name,
port=port_number,
))
return port_number
with scripts.using_sudo():
scripts.mkdir(pod.pod_manifests_path)
for instance in pod.iter_instances():
# Generate Appc pod manifest.
manifest_base = json.dumps(
pod.make_manifest(
instance=instance,
get_volume_path=get_volume_path,
get_host_port=get_host_port,
),
indent=4,
sort_keys=True,
)
# TODO It might not be a great idea to do text substitution
# on JSON string, but it seems to be the only way to
# customize pod instances, and probably relatively safe to
# do. Hopefully I will find another way without text
# substitution.
manifest = instance.resolve_specifier(manifest_base)
scripts.tee(
manifest.encode('ascii'),
pod.get_pod_manifest_path(instance),
)
def deploy_create_volumes(pod):
"""Create volumes under POD_DIR/volumes."""
LOG.info('%s - create data volumes', pod)
with scripts.using_sudo():
volume_root = pod.pod_volumes_path
for volume in pod.volumes:
_create_volume(volume_root, volume)
def deploy_fetch(pod, *, image_ids=None):
"""Fetch container images from local files or from remote."""
LOG.info('%s - fetch images', pod)
if image_ids is None:
image_ids = _list_image_ids()
for image in pod.images:
if _match_image_id(image.id, image_ids):
LOG.debug('skip fetching image %s', image.id)
continue
cmd = ['rkt', 'fetch']
if image.signature:
cmd.extend(['--signature', image.signature])
if image.image_path:
if not image.signature:
LOG.warning('no signature for %s', image.image_path)
cmd.append('--insecure-options=image')
cmd.append(image.image_path)
else:
ASSERT.true(image.image_uri)
if image.image_uri.startswith('docker://'):
cmd.append('--insecure-options=image')
cmd.append(image.image_uri)
scripts.execute(cmd)
def deploy_install_units(pod):
"""Install and load systemd units."""
for unit in pod.systemd_units:
# TODO: Don't use `systemctl link` for now and figure out why it
# doesn't behave as I expect.
scripts.ensure_file(unit.unit_file_path)
with scripts.using_sudo():
scripts.cp(unit.unit_file_path, unit.unit_path)
_make_dropin_file(pod, unit)
with scripts.using_sudo():
scripts.systemctl_daemon_reload()
def deploy_enable(pod, predicate):
"""Enable default systemd unit instances."""
LOG.info('%s - enable pod units', pod)
has_instance = False
for instance in pod.filter_instances(predicate):
has_instance = True
if scripts.systemctl_is_enabled(instance.unit_name):
continue
LOG.info('%s - enable unit instance: %s', pod, instance.unit_name)
with scripts.using_sudo():
scripts.systemctl_enable(instance.unit_name)
if not scripts.systemctl_is_enabled(instance.unit_name):
raise RuntimeError(
'unit %s is not enabled' % instance.unit_name)
if not has_instance:
LOG.warning('%s - no unit to enable', pod)
def deploy_start(pod, predicate):
"""Start default systemd unit instances."""
LOG.info('%s - start pod units', pod)
has_instance = False
for instance in pod.filter_instances(predicate):
has_instance = True
if scripts.systemctl_is_active(instance.unit_name):
continue
LOG.info('%s - start unit instance: %s', pod, instance.unit_name)
with scripts.using_sudo():
scripts.systemctl_start(instance.unit_name)
if not scripts.systemctl_is_active(instance.unit_name):
raise RuntimeError(
'unit %s is not started' % instance.unit_name)
if not has_instance:
LOG.warning('%s - no unit to start', pod)
### Undeploy
# NOTE: These undeploy functions are resilient against the situations
# that pod state is unexpected (e.g., undeploy_stop is called when pod
# state is UNDEPLOYED). Meaning, you may call them without ensuring pod
# state beforehand.
def undeploy_stop(pod, predicate):
"""Stop default systemd units of the pod."""
LOG.info('%s - stop pod units', pod)
has_instance = False
for instance in pod.filter_instances(predicate):
has_instance = True
LOG.info('%s - stop unit instance: %s', pod, instance.unit_name)
with scripts.checking(False), scripts.using_sudo():
scripts.systemctl_stop(instance.unit_name)
if scripts.systemctl_is_active(instance.unit_name):
LOG.warning('unit %s is still active', instance.unit_name)
if not has_instance:
LOG.warning('%s - no unit to stop', pod)
def undeploy_disable(pod, predicate):
"""Disable default systemd units of the pod."""
LOG.info('%s - disable pod units', pod)
has_instance = False
for instance in pod.filter_instances(predicate):
has_instance = True
LOG.info('%s - disable unit instance: %s', pod, instance.unit_name)
with scripts.checking(False), scripts.using_sudo():
scripts.systemctl_disable(instance.unit_name)
if scripts.systemctl_is_enabled(instance.unit_name):
LOG.warning('unit %s is still enabled', instance.unit_name)
if not has_instance:
LOG.warning('%s - no unit to disable', pod)
def undeploy_remove(repo, pod):
"""Remove container images and the pod directory."""
LOG.info('%s - remove pod', pod)
# Undo deploy_install_units.
for unit in pod.systemd_units:
with scripts.using_sudo():
scripts.rm(unit.unit_path)
for instance in unit.instances:
scripts.rm(instance.dropin_path, recursive=True)
with scripts.checking(False), scripts.using_sudo():
scripts.systemctl_daemon_reload()
image_to_pod_table = repo.get_images()
# Undo deploy_fetch.
for image in pod.images:
if len(image_to_pod_table[image.id]) > 1:
LOG.debug('not remove image which is still in use: %s', image.id)
continue
cmd = ['rkt', 'image', 'rm', image.id]
if scripts.execute(cmd, check=False).returncode != 0:
LOG.warning('cannot remove image: %s', image.id)
# Undo deploy_copy and related actions, and if this is the last pod,
# remove the pods directory, too.
with scripts.using_sudo():
scripts.rm(repo.get_pod_dir(pod), recursive=True)
scripts.rmdir(repo.get_pods_dir(pod.name))
### Command-line interface
with_argument_instance = apps.with_decorators(
apps.with_argument(
'--instance-all', action='store_true',
help='select all instances',
),
apps.with_argument(
'--instance', action='append',
help='select instance(s) by name (the part after "@")',
),
)
with_argument_tag = apps.with_argument(
'tag',
help='set pod tag (format "name@version")',
)
@apps.with_prog('list')
@apps.with_help('list deployed pods')
def list_pods(_, repo):
"""List deployed pods."""
for pod_dir_name in repo.get_pod_dir_names():
for pod in repo.iter_pods(pod_dir_name):
print(pod)
return 0
@apps.with_prog('list-units')
@apps.with_help('list systemd units of deployed pods')
@apps.with_argument(
'--show-state', action='store_true',
help='also print unit state',
)
def list_units(args, repo):
"""List systemd units of deployed pods."""
for pod_dir_name in repo.get_pod_dir_names():
for pod in repo.iter_pods(pod_dir_name):
for instance in pod.iter_instances():
row = [pod, instance.unit_name]
if args.show_state:
if scripts.systemctl_is_enabled(instance.unit_name):
row.append('enabled')
if scripts.systemctl_is_active(instance.unit_name):
row.append('started')
print(*row)
return 0
@apps.with_prog('is-deployed')
@apps.with_help('check if a pod is deployed')
@with_argument_tag
def is_deployed(args, repo):
"""Check if a pod is deployed."""
if repo.is_pod_tag_deployed(args.tag):
return 0
else:
return 1
@apps.with_prog('is-enabled')
@apps.with_help('check if default unit instances are enabled')
@with_argument_instance
@with_argument_tag
def is_enabled(args, repo):
"""Check if default unit instances are enabled."""
return _check_unit_state(
args, repo,
'enabled',
_make_instance_predicate(args, models.Pod.should_enable),
scripts.systemctl_is_enabled,
)
@apps.with_prog('is-started')
@apps.with_help('check if default unit instances are started')
@with_argument_instance
@with_argument_tag
def is_started(args, repo):
"""Check if default unit instances are started."""
return _check_unit_state(
args, repo,
'started',
_make_instance_predicate(args, models.Pod.should_start),
scripts.systemctl_is_active,
)
def _check_unit_state(args, repo, state_name, predicate, check):
try:
pod = repo.get_pod_from_tag(args.tag)
except FileNotFoundError:
LOG.debug('no pod dir for: %s', args.tag)
return 1
okay = True
has_instance = False
for instance in pod.filter_instances(predicate):
has_instance = True
if not check(instance.unit_name):
LOG.debug('unit is not %s: %s', state_name, instance.unit_name)
okay = False
if not has_instance:
LOG.warning('%s - no unit to check for', pod)
return 0 if okay else 1
@apps.with_help('deploy a pod')
@apps.with_argument('pod_file', type=Path, help='set path to the pod file')
def deploy(args, repo):
"""Deploy a pod from a bundle."""
pod_file = args.pod_file
if pod_file.is_dir():
pod_file = pod_file / models.POD_JSON
scripts.ensure_file(pod_file)
bundle_pod = models.Pod(
json.loads(pod_file.read_text()),
pod_file.parent.absolute(),
)
pod = bundle_pod.make_local_pod(repo.get_pod_dir(bundle_pod))
if repo.is_pod_deployed(pod):
LOG.info('%s - pod has been deployed', pod)
return 0
LOG.info('%s - deploy', pod)
try:
deploy_copy(repo, bundle_pod, pod)
deploy_create_pod_manifest(repo, pod)
deploy_create_volumes(pod)
deploy_fetch(pod)
deploy_install_units(pod)
except Exception:
undeploy_remove(repo, pod)
raise
return 0
@apps.with_help('enable a pod')
@with_argument_instance
@with_argument_tag
def enable(args, repo):
"""Enable a deployed pod."""
return _deploy_operation(
args, repo,
'enable',
_make_instance_predicate(args, models.Pod.should_enable),
deploy_enable, undeploy_disable,
)
@apps.with_help('start a pod')
@with_argument_instance
@with_argument_tag
def start(args, repo):
"""Start a deployed pod."""
return _deploy_operation(
args, repo,
'start',
_make_instance_predicate(args, models.Pod.should_start),
deploy_start, undeploy_stop,
)
def _deploy_operation(
args, repo,
operator_name,
predicate,
operator, reverse_operator):
pod = repo.get_pod_from_tag(args.tag)
LOG.info('%s - %s', pod, operator_name)
try:
operator(pod, predicate)
except Exception:
reverse_operator(pod, predicate)
raise
return 0
@apps.with_help('stop a pod')
@with_argument_instance
@with_argument_tag
def stop(args, repo):
"""Stop a pod."""
return _undeploy_operation(
args, repo,
'stop',
_make_instance_predicate(args, models.Pod.should_start),
undeploy_stop,
)
@apps.with_help('disable a pod')
@with_argument_instance
@with_argument_tag
def disable(args, repo):
"""Disable a pod."""
return _undeploy_operation(
args, repo,
'disable',
_make_instance_predicate(args, models.Pod.should_enable),
undeploy_disable,
)
def _undeploy_operation(args, repo, operator_name, predicate, operator):
try:
pod = repo.get_pod_from_tag(args.tag)
except FileNotFoundError:
LOG.warning('%s - pod has not been deployed', args.tag)
return 0
LOG.info('%s - %s', pod, operator_name)
operator(pod, predicate)
return 0
@apps.with_help('undeploy a pod')
@with_argument_tag
def undeploy(args, repo):
"""Undeploy a deployed pod."""
try:
pod = repo.get_pod_from_tag(args.tag)
except FileNotFoundError:
LOG.warning('%s - pod has not been deployed', args.tag)
return 0
LOG.info('%s - undeploy', pod)
_undeploy_pod(repo, pod)
return 0
@apps.with_help('clean up pods')
@apps.with_argument(
'--keep', type=int, default=8,
help='keep latest number of versions (default to %(default)d)'
)
def cleanup(args, repo):
"""Clean up undeployed pods."""
if args.keep < 0:
raise ValueError('negative keep: %d' % args.keep)
def _is_enabled_or_started(pod):
for instance in pod.iter_instances():
if scripts.systemctl_is_enabled(instance.unit_name):
return True
if scripts.systemctl_is_active(instance.unit_name):
return True
return False
for pod_dir_name in repo.get_pod_dir_names():
LOG.info('%s - cleanup', pod_dir_name)
all_pods = list(repo.iter_pods(pod_dir_name))
num_left = len(all_pods)
for pod in all_pods:
if num_left <= args.keep:
break
if _is_enabled_or_started(pod):
LOG.info('refuse to undeploy pod: %s', pod)
continue
_undeploy_pod(repo, pod)
num_left -= 1
return 0
def _undeploy_pod(repo, pod):
undeploy_stop(pod, predicate=pod.all_instances)
undeploy_disable(pod, predicate=pod.all_instances)
undeploy_remove(repo, pod)
@apps.with_help('manage pods')
@apps.with_apps(
'operation', 'operation on pods',
list_pods,
list_units,
is_deployed,
is_enabled,
is_started,
deploy,
enable,
start,
stop,
disable,
undeploy,
cleanup,
)
def pods(args):
"""Manage containerized application pods."""
repo = repos.Repo(args.root)
return args.operation(args, repo=repo)
@apps.with_prog('refetch-all')
@apps.with_help('re-fetch all container images')
def refetch_all(args):
"""Re-fetch all container images.
Use this to work around the known issue that `rkt image gc` also
collects still-in-use images.
"""
# Sadly, the rkt projects has been terminated.
#scripts.execute([
# 'rkt', 'fetch',
# 'coreos.com/rkt/stage1-coreos:' + deps.PACKAGES['rkt'].version,
#])
repo = repos.Repo(args.root)
image_ids = _list_image_ids()
for pod_dir_name in repo.get_pod_dir_names():
for pod in repo.iter_pods(pod_dir_name):
deploy_fetch(pod, image_ids=image_ids)
return 0
### Helper functions
def _make_instance_predicate(args, default):
if args.instance_all:
predicate = None
elif args.instance:
instance_names = set(args.instance)
predicate = lambda instance: instance.name in instance_names
else:
predicate = default
return predicate
def _list_image_ids():
cmd = 'rkt image list --fields=id --full --no-legend'.split()
output = scripts.execute(cmd, capture_stdout=True).stdout
if not output:
return ()
output = output.decode('ascii')
return frozenset(filter(None, map(str.strip, output.split('\n'))))
def _match_image_id(target_id, image_ids):
for image_id in image_ids:
if image_id.startswith(target_id) or target_id.startswith(image_id):
return True
return False
def _cp_or_wget(obj, name, dst):
src_path = getattr(obj, name + '_path')
if src_path:
scripts.cp(src_path, dst)
return True
src_uri = getattr(obj, name + '_uri')
if src_uri:
scripts.wget(src_uri, dst)
return True
return False
def _create_volume(volume_root, volume):
# Create volume directory and change its owner
volume_path = volume_root / volume.name
if volume_path.exists():
raise RuntimeError('volume exists: %s' % volume_path)
scripts.mkdir(volume_path)
scripts.execute(
['chown', '%s:%s' % (volume.user, volume.group), volume_path])
# Extract contents for volume
if volume.data_path:
scripts.tar_extract(
volume.data_path,
volume_path,
tar_extra_flags=[
# This is default when sudo but just be explicit
'--preserve-permissions',
],
)
def _make_dropin_file(pod, unit):
contents = (
'[Service]\n'
'Environment="POD_MANIFEST={pod_manifest}"\n'
# Metadata of this pod instance.
'Environment="POD_NAME={pod_name}"\n'
'Environment="POD_VERSION={pod_version}"\n'
)
for instance in unit.instances:
scripts.mkdir(instance.dropin_path)
scripts.tee(
(contents
.format(
pod_manifest=pod.get_pod_manifest_path(instance),
pod_name=pod.name,
pod_version=pod.version,
)
.encode('ascii')),
instance.dropin_path / '10-pod-manifest.conf',
)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/onboard/pods.py",
"copies": "1",
"size": "22475",
"license": "mit",
"hash": -3201115213704644000,
"line_mean": 29.6616643929,
"line_max": 77,
"alpha_frac": 0.6001779755,
"autogenerated": false,
"ratio": 3.727197346600332,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4827375322100331,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'pods',
]
from pathlib import Path
import json
from garage import apps
from ops import models
@apps.with_prog('read-tag')
@apps.with_help('read tag in pod file')
@apps.with_argument(
'--suitable-for-filename', action='store_true',
help='transform tag value to be suitable for filename',
)
@apps.with_argument('pod_file', type=Path, help='provide pod file path')
def read_tag(args):
"""Read tag in pod file (useful in scripting)."""
pod_file = args.pod_file
if pod_file.is_dir():
pod_file = pod_file / models.POD_JSON
pod_data = json.loads(pod_file.read_text())
pod = models.Pod(pod_data, pod_file.parent.absolute())
if args.suitable_for_filename:
print('%s--%s' % (pod.name.make_suitable_for_filename(), pod.version))
else:
print(pod)
return 0
@apps.with_help('manage pods')
@apps.with_apps(
'operation', 'operation on pods',
read_tag,
)
def pods(args):
"""Manage containerized application pods."""
return args.operation(args)
| {
"repo_name": "clchiou/garage",
"path": "py/ops/ops/mob/pods.py",
"copies": "1",
"size": "1030",
"license": "mit",
"hash": -4897685150875935000,
"line_mean": 24.1219512195,
"line_max": 78,
"alpha_frac": 0.6524271845,
"autogenerated": false,
"ratio": 3.149847094801223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4302274279301223,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Poller',
'Polls',
# Poller implementations.
#
# TODO: Only epoll is supported as cross-platform is not priority.
'Epoll',
]
import enum
import errno
import math
import select
import threading
from typing import Sequence, Tuple, Union
from g1.bases.assertions import ASSERT
class Polls(enum.Enum):
"""Type of polls.
A task may either read or write a file, but never both at the same
time (at least I can't think of a use case of that).
"""
READ = enum.auto()
WRITE = enum.auto()
class Poller:
def close(self):
"""Close the poller."""
raise NotImplementedError
def notify_open(self, fd: int):
"""Add the given file descriptor to the poller."""
raise NotImplementedError
def notify_close(self, fd: int):
"""Remove the given file descriptor from the poller.
NOTE: This might be called in another thread.
"""
raise NotImplementedError
def poll(
self,
timeout: Union[float, None],
) -> Tuple[Sequence[int], Sequence[int]]:
"""Poll and return readable and writeable file descriptors.
NOTE: This could return extra file descriptors, like write-end
of pipes as readable file descriptors.
"""
raise NotImplementedError
class Epoll(Poller):
_EVENT_MASK = (
select.EPOLLIN | select.EPOLLOUT | select.EPOLLET | select.EPOLLRDHUP
)
# Add EPOLLHUP, EPOLLRDHUP, EPOLLERR to the mask. This should
# unblock all tasks whenever a file is readable or writeable, at the
# cost of (rare?) spurious wakeup or "extra" file descriptors.
_EVENT_IN = (
select.EPOLLIN | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLERR
)
_EVENT_OUT = (
select.EPOLLOUT | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLERR
)
def __init__(self):
self._lock = threading.Lock()
self._epoll = select.epoll()
self._closed_fds = set()
def close(self):
self._epoll.close()
def notify_open(self, fd):
ASSERT.false(self._epoll.closed)
try:
self._epoll.register(fd, self._EVENT_MASK)
except FileExistsError:
pass
def notify_close(self, fd):
ASSERT.false(self._epoll.closed)
with self._lock:
self._closed_fds.add(fd)
try:
self._epoll.unregister(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise
def poll(self, timeout):
ASSERT.false(self._epoll.closed)
with self._lock:
if self._closed_fds:
closed_fds, self._closed_fds = self._closed_fds, set()
return closed_fds, closed_fds
if timeout is None:
pass
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond.
timeout = math.ceil(timeout * 1e3) * 1e-3
can_read = []
can_write = []
# Since Python 3.5, poll retries with a re-computed timeout
# rather than raising InterruptedError (see PEP 475).
for fd, events in self._epoll.poll(timeout=timeout):
if events & self._EVENT_IN:
can_read.append(fd)
if events & self._EVENT_OUT:
can_write.append(fd)
return can_read, can_write
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/kernels/g1/asyncs/kernels/pollers.py",
"copies": "1",
"size": "3423",
"license": "mit",
"hash": -8486900153026107000,
"line_mean": 26.1666666667,
"line_max": 79,
"alpha_frac": 0.5889570552,
"autogenerated": false,
"ratio": 3.8986332574031892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.998759031260319,
"avg_score": 0,
"num_lines": 126
} |
__all__ = [
'POS_INF',
'NEG_INF',
'BoundType',
'IntegerInterval',
'parse',
]
import re
from enum import Enum
from garage.assertions import ASSERT
POS_INF = float('+inf')
NEG_INF = float('-inf')
class BoundType(Enum):
OPEN = 'OPEN'
CLOSED = 'CLOSED'
def __invert__(self):
if self is BoundType.OPEN:
return BoundType.CLOSED
else:
return BoundType.OPEN
class IntervalMixin:
def __repr__(self):
return 'Interval<%s>' % str(self)
def __and__(self, other):
return JointInterval(all, [self, other])
def __or__(self, other):
return JointInterval(any, [self, other])
def __xor__(self, other):
return (self & ~other) | (~self & other)
def filter(self, iterable, key=None):
if key is None:
key = lambda item: item
return filter(lambda item: key(item) in self, iterable)
class IntegerInterval(IntervalMixin):
def __init__(self, left, left_type, right, right_type):
ASSERT.less_or_equal(left, right)
ASSERT.type_of(left_type, BoundType)
ASSERT.type_of(right_type, BoundType)
self.left = left
self.left_type = left_type
self.right = right
self.right_type = right_type
def __str__(self):
return ('%s%s, %s%s' % (
'(' if self.left_type is BoundType.OPEN else '[',
self.left,
self.right,
')' if self.right_type is BoundType.OPEN else ']',
))
def __bool__(self):
if self.left == self.right:
return (self.left_type is BoundType.CLOSED and
self.right_type is BoundType.CLOSED)
elif self.left + 1 == self.right:
return (self.left_type is BoundType.CLOSED or
self.right_type is BoundType.CLOSED)
else:
return True
def __contains__(self, item):
if not self:
return False
elif self.left < item < self.right:
return True
elif item == self.left:
return self.left_type is BoundType.CLOSED
elif item == self.right:
return self.right_type is BoundType.CLOSED
else:
return False
def __invert__(self):
return (
IntegerInterval(
NEG_INF, BoundType.CLOSED, self.left, ~self.left_type) |
IntegerInterval(
self.right, ~self.right_type, POS_INF, BoundType.CLOSED))
class JointInterval(IntervalMixin):
def __init__(self, join, intervals):
ASSERT.in_(join, (all, any))
self.join = join
self.intervals = intervals
def __str__(self):
join_str = ' | ' if self.join is any else ' & '
return join_str.join(map(str, self.intervals))
def __bool__(self):
return self.join(bool(interval) for interval in self.intervals)
def __contains__(self, item):
return self.join(item in interval for interval in self.intervals)
def __invert__(self):
join = all if self.join is any else any
return JointInterval(join, [~interval for interval in self.intervals])
PATTERN_INTERVAL = re.compile(r'(\d*)-(\d*)')
PATTERN_NUMBER = re.compile(r'\d+')
def parse(interval_specs):
return JointInterval(any, list(map(_parse, interval_specs.split(','))))
def _parse(interval_spec):
match = PATTERN_INTERVAL.fullmatch(interval_spec)
if match:
if match.group(1):
left = int(match.group(1))
else:
left = NEG_INF
if match.group(2):
right = int(match.group(2))
else:
right = POS_INF
return IntegerInterval(left, BoundType.CLOSED, right, BoundType.CLOSED)
match = PATTERN_NUMBER.fullmatch(interval_spec)
if match:
point = int(interval_spec)
return IntegerInterval(
point, BoundType.CLOSED, point, BoundType.CLOSED)
raise SyntaxError('Cannot parse %r' % interval_spec)
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/intervals.py",
"copies": "1",
"size": "4014",
"license": "mit",
"hash": 6651482846127976000,
"line_mean": 26.1216216216,
"line_max": 79,
"alpha_frac": 0.5742401594,
"autogenerated": false,
"ratio": 3.7867924528301886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.986009417379175,
"avg_score": 0.00018768768768768769,
"num_lines": 148
} |
__all__ = (
"pre_init",
"post_init",
"pre_save",
"pre_save_post_validation",
"post_save",
"pre_delete",
"post_delete",
)
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace:
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal:
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError(
"signalling support is unavailable "
"because the blinker library is "
"not installed."
)
send = lambda *a, **kw: None # noqa
connect = (
disconnect
) = has_receivers_for = receivers_for = temporarily_connected_to = _fail
del _fail
# the namespace for code signals. If you are not mongoengine code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
pre_init = _signals.signal("pre_init")
post_init = _signals.signal("post_init")
pre_save = _signals.signal("pre_save")
pre_save_post_validation = _signals.signal("pre_save_post_validation")
post_save = _signals.signal("post_save")
pre_delete = _signals.signal("pre_delete")
post_delete = _signals.signal("post_delete")
pre_bulk_insert = _signals.signal("pre_bulk_insert")
post_bulk_insert = _signals.signal("post_bulk_insert")
| {
"repo_name": "MongoEngine/mongoengine",
"path": "mongoengine/signals.py",
"copies": "1",
"size": "1789",
"license": "mit",
"hash": 6367641472977445000,
"line_mean": 29.3220338983,
"line_max": 80,
"alpha_frac": 0.6193404136,
"autogenerated": false,
"ratio": 3.863930885529158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9980341815973686,
"avg_score": 0.0005858966310943712,
"num_lines": 59
} |
__all__ = [
"ProtocolError",
"LocalProtocolError",
"RemoteProtocolError",
"validate",
"make_sentinel",
"bytesify",
]
class ProtocolError(Exception):
"""Exception indicating a violation of the HTTP/1.1 protocol.
This as an abstract base class, with two concrete base classes:
:exc:`LocalProtocolError`, which indicates that you tried to do something
that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
indicates that the remote peer tried to do something that HTTP/1.1 says is
illegal. See :ref:`error-handling` for details.
In addition to the normal :exc:`Exception` features, it has one attribute:
.. attribute:: error_status_hint
This gives a suggestion as to what status code a server might use if
this error occurred as part of a request.
For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
how you might want to respond to a misbehaving peer, if you're
implementing a server.
For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
how your peer might have responded to *you* if h11 had allowed you to
continue.
The default is 400 Bad Request, a generic catch-all for protocol
violations.
"""
def __init__(self, msg, error_status_hint=400):
if type(self) is ProtocolError:
raise TypeError("tried to directly instantiate ProtocolError")
Exception.__init__(self, msg)
self.error_status_hint = error_status_hint
# Strategy: there are a number of public APIs where a LocalProtocolError can
# be raised (send(), all the different event constructors, ...), and only one
# public API where RemoteProtocolError can be raised
# (receive_data()). Therefore we always raise LocalProtocolError internally,
# and then receive_data will translate this into a RemoteProtocolError.
#
# Internally:
# LocalProtocolError is the generic "ProtocolError".
# Externally:
# LocalProtocolError is for local errors and RemoteProtocolError is for
# remote errors.
class LocalProtocolError(ProtocolError):
def _reraise_as_remote_protocol_error(self):
# After catching a LocalProtocolError, use this method to re-raise it
# as a RemoteProtocolError. This method must be called from inside an
# except: block.
#
# An easy way to get an equivalent RemoteProtocolError is just to
# modify 'self' in place.
self.__class__ = RemoteProtocolError
# But the re-raising is somewhat non-trivial -- you might think that
# now that we've modified the in-flight exception object, that just
# doing 'raise' to re-raise it would be enough. But it turns out that
# this doesn't work, because Python tracks the exception type
# (exc_info[0]) separately from the exception object (exc_info[1]),
# and we only modified the latter. So we really do need to re-raise
# the new type explicitly.
# On py3, the traceback is part of the exception object, so our
# in-place modification preserved it and we can just re-raise:
raise self
class RemoteProtocolError(ProtocolError):
pass
def validate(regex, data, msg="malformed data", *format_args):
match = regex.fullmatch(data)
if not match:
if format_args:
msg = msg.format(*format_args)
raise LocalProtocolError(msg)
return match.groupdict()
# Sentinel values
#
# - Inherit identity-based comparison and hashing from object
# - Have a nice repr
# - Have a *bonus property*: type(sentinel) is sentinel
#
# The bonus property is useful if you want to take the return value from
# next_event() and do some sort of dispatch based on type(event).
class _SentinelBase(type):
def __repr__(self):
return self.__name__
def make_sentinel(name):
cls = _SentinelBase(name, (_SentinelBase,), {})
cls.__class__ = cls
return cls
# Used for methods, request targets, HTTP versions, header names, and header
# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always
# returns bytes.
def bytesify(s):
# Fast-path:
if type(s) is bytes:
return s
if isinstance(s, str):
s = s.encode("ascii")
if isinstance(s, int):
raise TypeError("expected bytes-like object, not int")
return bytes(s)
| {
"repo_name": "python-hyper/h11",
"path": "h11/_util.py",
"copies": "1",
"size": "4387",
"license": "mit",
"hash": 6638147678113622000,
"line_mean": 34.9590163934,
"line_max": 78,
"alpha_frac": 0.6838386141,
"autogenerated": false,
"ratio": 4.115384615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 122
} |
__all__ = [
'Publisher',
]
import logging
import nng
import nng.asyncs
from g1.asyncs.bases import queues
from g1.bases import classes
LOG = logging.getLogger(__name__)
class Publisher:
def __init__(self, queue, wiredata, *, drop_when_full=True):
self._queue = queue
self._wiredata = wiredata
self._drop_when_full = drop_when_full
# For convenience, create socket before ``__enter__``.
self.socket = nng.asyncs.Socket(nng.Protocols.PUB0)
__repr__ = classes.make_repr('{self.socket!r}')
def __enter__(self):
self.socket.__enter__()
return self
def __exit__(self, *args):
messages = self._queue.close(graceful=False)
if messages:
LOG.warning('drop %d messages', len(messages))
return self.socket.__exit__(*args)
async def serve(self):
LOG.info('start publisher: %r', self)
try:
while True:
message = await self._queue.get()
try:
raw_message = self._wiredata.to_lower(message)
except Exception:
LOG.exception('to_lower error: %r', message)
continue
try:
# For now we publish with no topic.
await self.socket.send(raw_message)
except nng.Errors.ETIMEDOUT:
LOG.warning('send timeout; drop message: %r', message)
continue
except (queues.Closed, nng.Errors.ECLOSED):
pass
self._queue.close()
LOG.info('stop publisher: %r', self)
def shutdown(self):
self._queue.close()
async def publish(self, message):
await self._queue.put(message)
def publish_nonblocking(self, message):
try:
self._queue.put_nonblocking(message)
except queues.Full:
if self._drop_when_full:
LOG.warning('queue full; drop message: %r', message)
else:
raise
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/pubsub/publishers.py",
"copies": "1",
"size": "2045",
"license": "mit",
"hash": -5538128058392402000,
"line_mean": 27.8028169014,
"line_max": 74,
"alpha_frac": 0.5422982885,
"autogenerated": false,
"ratio": 4.081836327345309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 71
} |
__all__ = [
'PVGeoError',
'ErrorObserver',
'HiddenPrints',
]
import os
import re
import sys
class PVGeoError(Exception):
"""This is a custom error class for handling errors when proccessing on the
VTK pipeline. It makes the error messages easy to decipher in ParaView and
cleans the messages when used in Python outside of ParaView. When on the VTK
pipeline, errors aren't really raised but passed over and printed to the
console. This class makes decipher the error streams a whole lot easier for
human eyes.
"""
QUALIFIER_L = '@@@@PVGeoError ---> '
QUALIFIER_R = ' <--- PVGeoError@@@@'
SEARCHER = re.compile(
r'@@@@PVGeoError --->.+?<--- PVGeoError@@@@', re.MULTILINE | re.DOTALL
)
def __init__(self, message):
# Place special characters arround the message for easy extraction
self.message = (
'\n\n\n\n' + self.QUALIFIER_L + message + self.QUALIFIER_R + '\n\n\n\n'
)
def __str__(self):
return self.message # .replace(self.QUALIFIER, '')
@staticmethod
def clean_message(message):
return message.replace(PVGeoError.QUALIFIER_L, '').replace(
PVGeoError.QUALIFIER_R, ''
)
class ErrorObserver:
"""A class for catching errors when processing on a VTK pipeline. The
``AlgorithmBase`` class handles setting up this observer on initialization.
Example:
>>> import PVGeo
>>> # Only use this observer on sub classes of the AlgorithmBase:
>>> f = PVGeo.AlgorithmBase()
>>> f.Update()
>>> if f.error_occurred():
>>> print(f.get_error_message())
ERROR: ...
"""
def __init__(self):
self.__error_occurred = False
self.__get_error_message = None
self.__get_error_messageEtc = None
self.CallDataType = 'string0'
# Object to observe:
self.__observing = False
def __call__(self, obj, event, message):
self.__error_occurred = True
# Serch the message for a PVGeoError qualifier to extract
msg = PVGeoError.SEARCHER.findall(message)
if len(msg) > 0:
info = '\nPVGeoError: '
message = info + info.join(PVGeoError.clean_message(m) for m in msg)
elif self.__get_error_message is not None:
self.__get_error_messageEtc = message
return
# if no qualifier is present and message has not already been set, entire message stream gets set
self.__get_error_message = message
print(message)
def error_occurred(self):
"""Ask self if an error has occured"""
occ = self.__error_occurred
self.__error_occurred = False
return occ
def get_error_message(self, etc=False):
"""Get the last set error message
Return:
str: the last set error message
"""
if etc:
return self.__get_error_messageEtc
return self.__get_error_message
def make_observer(self, algorithm):
"""Make this an observer of an algorithm"""
if self.__observing:
raise RuntimeError('This error observer is already observing an algorithm.')
algorithm.GetExecutive().AddObserver('ErrorEvent', self)
algorithm.AddObserver('ErrorEvent', self)
self.__observing = True
return
class HiddenPrints:
"""Use this object to hide print statements when perfroming a task.
This is used to suppress printed warnings from discretize on import:
Example:
>>> with HiddenPrints():
... import discretize
"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/_helpers/errors.py",
"copies": "1",
"size": "3859",
"license": "bsd-3-clause",
"hash": 7504543315171796000,
"line_mean": 30.8925619835,
"line_max": 105,
"alpha_frac": 0.6079295154,
"autogenerated": false,
"ratio": 4.028183716075157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136113231475157,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Q'
]
def operand_maker(operand):
def func(self, value):
return self.value({operand: value})
return func
def list_maker(operand):
def func(self, q):
if not isinstance(q, Query):
raise TypeError('Expected Query object, got: {}'.format(q))
return Query({operand: [self, q]})
return func
class QueryError(Exception): pass
class Query(dict):
def __init__(self, *a, **k):
super(Query, self).__init__(*a, **k)
self.field = None
def _dup(self):
q = Query(self)
q.field = self.field
return q
def value(self, value):
if self.field is None:
raise QueryError('Cannot add a value to a query bound to multiple fields')
q = self._dup()
if isinstance(value, dict) and self.field in self:
if not isinstance(q[self.field], dict):
raise QueryError('Can only add keys to documents, not to: {}'.format(q[self.field]))
self._verify_not_overrides(q[self.field], value)
q[self.field] = Query(q[self.field], **value)
else:
q[self.field] = value
return q
def __getattr__(self, field):
if field == 'id':
field = '_id'
if self:
raise QueryError('Generating a Query on a sub document will abandon current query')
q = self._dup()
if self.field is not None:
q.field += '.' + field
else:
q.field = field
return q
def _verify_not_overrides(self, d1, d2):
overriden = set(d1) & set(d2)
if overriden:
raise QueryError("The following fields will be overriden: {} for Queries: {}, {}".format(list(overriden), d1, d2))
def __add__(self, other):
self._verify_not_overrides(self, other)
return Query(self, **other)
def __invert__(self):
return Query({'$not': self})
def mod(self, d, m):
return self.value({'$mod': [d, m]})
__eq__ = value
__or__ = list_maker('$or')
__and__ = list_maker('$and')
nor = list_maker('$nor')
__ne__ = operand_maker('$ne')
__gt__ = operand_maker('$gt')
__lt__ = operand_maker('$lt')
__ge__ = operand_maker('$gte')
__le__ = operand_maker('$lte')
exists = operand_maker('$exists')
contained = operand_maker('$in')
notcontained = operand_maker('$nin')
regex = operand_maker('$regex')
options = operand_maker('$options')
all = operand_maker('$all')
type = operand_maker('$type')
where = operand_maker('$where')
size = operand_maker('$size')
elemMatch = operand_maker('$elemMatch')
del operand_maker, list_maker
Q = Query()
| {
"repo_name": "veeloox/ramen",
"path": "ramen/db/query.py",
"copies": "1",
"size": "2713",
"license": "apache-2.0",
"hash": 3460813710402306000,
"line_mean": 27.2604166667,
"line_max": 126,
"alpha_frac": 0.5503133063,
"autogenerated": false,
"ratio": 3.6811397557666212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4731453062066621,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'reader', 'writer',
]
from migrator_csv import CSVReader, CSVWriter
from migrator_json import JSONReader, JSONWriter
from migrator_couchdb import CouchdbReader, CouchdbWriter
from migrator_couchbase import CouchbaseReader, CouchbaseWriter
from migrator_dir import DirReader, DirWriter
from migrator_zip import ZipReader, ZipWriter
sources = []
destinations = []
sources.extend(migrator_couchbase.sources)
sources.extend(migrator_csv.sources)
sources.extend(migrator_json.sources)
sources.extend(migrator_couchdb.sources)
sources.extend(migrator_dir.sources)
sources.extend(migrator_zip.sources)
destinations.extend(migrator_couchbase.destinations)
destinations.extend(migrator_csv.destinations)
destinations.extend(migrator_json.destinations)
destinations.extend(migrator_couchdb.destinations)
destinations.extend(migrator_dir.destinations)
destinations.extend(migrator_zip.destinations)
def reader(loc):
kind, fp = loc.split(':', 1)
if kind.lower() == 'csv':
return CSVReader(fp)
elif kind.lower() == 'json':
return JSONReader(fp)
elif kind.lower() == 'couchdb':
return CouchdbReader(fp)
elif kind.lower() == 'couchbase':
return CouchbaseReader(fp)
elif kind.lower() == 'dir':
return DirReader(fp)
elif kind.lower() == 'zip':
return ZipReader(fp)
def writer(loc):
kind, fp = loc.split(':', 1)
if kind.lower() == 'csv':
return CSVWriter(fp)
elif kind.lower() == 'json':
return JSONWriter(fp)
elif kind.lower() == 'couchdb':
return CouchdbWriter(fp)
elif kind.lower() == 'couchbase':
return CouchbaseWriter(fp)
elif kind.lower() == 'dir':
return DirWriter(fp)
elif kind.lower() == 'zip':
return ZipWriter(fp)
| {
"repo_name": "TOTVS/mdmpublic",
"path": "couchbase-cli/lib/python/couchbase/migrator/__init__.py",
"copies": "1",
"size": "1784",
"license": "bsd-2-clause",
"hash": 125794568742709490,
"line_mean": 29.2372881356,
"line_max": 63,
"alpha_frac": 0.6978699552,
"autogenerated": false,
"ratio": 3.3533834586466167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45512534138466165,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'recvfile',
]
import contextlib
import http.client
import logging
import re
import socket
from g1.asyncs.bases import adapters
from g1.bases import loggings
from g1.bases import pools
from g1.bases.assertions import ASSERT
LOG = logging.getLogger(__name__)
_CHUNK_SIZE = 8192
_BUFFER_POOL = pools.TimeoutPool(
pool_size=128,
allocate=lambda: bytearray(_CHUNK_SIZE),
release=lambda _: None,
)
async def recvfile(response, file):
"""Receive response body into a file.
The caller must set ``stream`` to true when make the request.
DANGER! This breaks the multiple levels of encapsulation, from
requests.Response all the way down to http.client.HTTPResponse.
As a result, the response object is most likely unusable after a
recvfile call, and you should probably close it immediately.
"""
# requests sets _content to False initially.
ASSERT.is_(response._content, False)
ASSERT.false(response._content_consumed)
urllib3_response = ASSERT.not_none(response.raw)
chunked = urllib3_response.chunked
httplib_response = ASSERT.isinstance(
urllib3_response._fp, http.client.HTTPResponse
)
ASSERT.false(httplib_response.closed)
sock = ASSERT.isinstance(httplib_response.fp.raw._sock, socket.socket)
output = DecoderChain(file)
if chunked:
chunk_decoder = ChunkDecoder()
output.add(chunk_decoder)
num_to_read = 0
eof = lambda: chunk_decoder.eof
else:
num_to_read = ASSERT.greater(
ASSERT.not_none(httplib_response.length), 0
)
eof = lambda: num_to_read <= 0
# Use urllib3's decoder code.
urllib3_response._init_decoder()
if urllib3_response._decoder is not None:
output.add(ContentDecoder(urllib3_response._decoder))
with contextlib.ExitStack() as stack:
src = adapters.FileAdapter(httplib_response.fp)
stack.callback(src.disown)
sock.setblocking(False)
stack.callback(sock.setblocking, True)
buffer = memoryview(stack.enter_context(_BUFFER_POOL.using()))
while not eof():
if chunked:
# TODO: If server sends more data at the end, like
# response of the next request, for now recvfile might
# read them, and then err out. Maybe recvfile should
# check this, and not read more than it should instead?
num_read = await src.readinto1(buffer)
else:
num_read = await src.readinto1(
buffer[:min(num_to_read, _CHUNK_SIZE)]
)
if num_read == 0:
break
output.write(buffer[:num_read])
num_to_read -= num_read
output.flush()
# Sanity check.
if not chunked:
ASSERT.equal(num_to_read, 0)
# Trick requests to release the connection back to the connection
# pool, rather than closing/discarding it.
response._content_consumed = True
# http.client.HTTPConnection tracks the last response; so you have
# to close it to make the connection object useable again.
httplib_response.close()
# Close the response for the caller since response is not useable
# after recvfile.
response.close()
loggings.ONCE_PER(
1000, LOG.info, 'buffer pool stats: %r', _BUFFER_POOL.get_stats()
)
class DecoderChain:
def __init__(self, file):
self._file = file
self._decoders = []
def add(self, decoder):
self._decoders.append(decoder)
def write(self, data):
pieces = [data]
for decoder in self._decoders:
pieces = decoder.decode(pieces)
self._write(pieces)
def flush(self):
for i, decoder in enumerate(self._decoders):
pieces = decoder.flush()
for d in self._decoders[i + 1:]:
pieces = d.decode(pieces)
self._write(pieces)
def _write(self, pieces):
for data in pieces:
if data:
self._file.write(data)
class ChunkDecoder:
_CRLF_PATTERN = re.compile(br'\r\n')
def __init__(self):
self.eof = False
self._chunk_remaining = -2
# Buffer for residual chunk size data from the last `_decode`.
# It is fairly small for now because we do not expect big chunk
# parameter.
self._buffer = memoryview(bytearray(64))
self._pos = 0
def decode(self, pieces):
ASSERT.false(self.eof)
output = []
for data in pieces:
if data:
self._decode(data, output)
return output
def _decode(self, data, output):
def move(n):
"""Move ``n`` bytes from ``data`` to ``output``."""
nonlocal data
ASSERT.greater_or_equal(self._chunk_remaining, n)
output.append(data[:n])
data = data[n:]
self._chunk_remaining -= n
def expect(pattern):
"""Drop ``pattern`` prefix from ``data``."""
nonlocal data
n = min(len(pattern), len(data))
ASSERT.equal(pattern[:n], data[:n])
data = data[n:]
return n
while data:
if self._chunk_remaining > 0:
move(min(self._chunk_remaining, len(data)))
continue
if self._chunk_remaining == 0:
self._chunk_remaining -= expect(b'\r\n')
continue
if self._chunk_remaining == -1:
self._chunk_remaining -= expect(b'\n')
continue
match = self._CRLF_PATTERN.search(data)
if not match:
self._append(data)
match = self._CRLF_PATTERN.search(self._buffer[:self._pos])
if not match:
break
data = self._reset()
chunk_size = data[:match.start()]
if self._pos > 0:
self._append(chunk_size)
chunk_size = self._reset()
# TODO: Handle parameters (stuff after ';').
chunk_size = int(
bytes(chunk_size).split(b';', maxsplit=1)[0],
base=16,
)
if chunk_size == 0:
# TODO: Handle trailers.
self.eof = True
else:
ASSERT.false(self.eof)
data = data[match.end():]
self._chunk_remaining = chunk_size
if self.eof:
ASSERT.empty(data)
def _append(self, data):
end = ASSERT.less_or_equal(self._pos + len(data), len(self._buffer))
self._buffer[self._pos:end] = data
self._pos = end
def _reset(self):
data = self._buffer[:self._pos]
self._pos = 0
return data
def flush(self):
ASSERT.true(self.eof)
ASSERT.equal(self._chunk_remaining, -2)
return []
class ContentDecoder:
def __init__(self, urllib3_decoder):
self._decoder = urllib3_decoder
def decode(self, pieces):
return [self._decoder.decompress(data) for data in pieces if data]
def flush(self):
return [self._decoder.flush()]
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/clients/g1/http/clients/recvfiles.py",
"copies": "1",
"size": "7242",
"license": "mit",
"hash": -448979069576053700,
"line_mean": 28.3198380567,
"line_max": 76,
"alpha_frac": 0.5691797846,
"autogenerated": false,
"ratio": 4.063973063973064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133152848573064,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'register',
'type_strategy',
'new_registry',
]
import lollipop.types as lt
import lollipop.validators as lv
import lollipop.utils as lu
import hypothesis.strategies as hs
import hypothesis.extra.datetime as hsd
import inspect
import six
try:
import hypothesis_regex
except ImportError:
hypothesis_regex = None
def find_validators(validators, validator_type):
return [validator
for validator in validators
if isinstance(validator, validator_type)]
def validate_with(v, x, context):
try:
v(x, context)
return True
except lv.ValidationError:
return False
def apply_validators(strategy, validators, context=None):
for validator in validators:
if isinstance(validator, lv.Predicate):
strategy = strategy.filter(
lambda x: validator.predicate(x, context)
)
else:
strategy = strategy.filter(
lambda x: validate_with(validator, x, context)
)
return strategy
class Registry(object):
def __init__(self):
self._converters = {}
self._type_converters = []
def __copy__(self):
registry = self.__class__()
for k, v in six.iteritems(self._converters):
registry.register(k, v)
for k, v in self._type_converters:
registry.register(k, v)
return registry
def register(self, type_or_class, converter):
if not callable(converter):
raise ValueError('Converter should be callable')
if isinstance(type_or_class, lt.Type):
self._converters[type_or_class] = converter
elif inspect.isclass(type_or_class) and issubclass(type_or_class, lt.Type):
self._type_converters.insert(0, (type_or_class, converter))
else:
raise ValueError('Type should be schema type or schema type class')
def convert(self, type, context=None):
if type in self._converters:
return self._converters[type](self, type, context=context)
any_of_validators = find_validators(type.validators, lv.AnyOf)
if any_of_validators:
allowed_values = set(any_of_validators[0].choices)
for validator in any_of_validators[1:]:
allowed_values = allowed_values.intersection(set(validator.choices))
if not allowed_values:
raise ValueError('Type %s does not match any value' % type)
if len(allowed_values) == 1:
return hs.just(list(allowed_values)[0])
return hs.one_of(map(hs.just, allowed_values))
for type_class, converter in self._type_converters:
if isinstance(type, type_class):
strategy = converter(self, type, context=context)
return apply_validators(strategy, type.validators)
raise ValueError('Unsupported type')
def any_strategy(registry, type, context=None):
return hs.text()
def string_strategy(registry, type, context=None):
if hypothesis_regex:
regex_validators = find_validators(type.validators, lv.Regexp)
if regex_validators:
validator = regex_validators[0]
return hypothesis_regex.regex(validator.regexp)
length_validators = find_validators(type.validators, lv.Length)
min_length, max_length = None, None
for validator in length_validators:
if validator.exact is not None or validator.min is not None:
value = validator.exact or validator.min
min_length = value if min_length is None else max([min_length, value])
if validator.exact is not None or validator.max is not None:
value = validator.exact or validator.min
max_length = value if max_length is None else min([max_length, value])
if min_length is not None and max_length is not None:
if min_length > max_length:
raise ValueError('Invalid settings for length validators')
return hs.text(min_size=min_length, max_size=max_length)
def integer_strategy(registry, type, context=None):
range_validators = find_validators(type.validators, lv.Range)
min_value, max_value = None, None
for validator in range_validators:
if validator.min is not None:
min_value = validator.min \
if min_value is None else max([min_value, validator.min])
if validator.max is not None:
max_value = validator.max \
if max_value is None else min([max_value, validator.max])
return hs.integers(min_value=min_value, max_value=max_value)
def float_strategy(registry, type, context=None):
range_validators = find_validators(type.validators, lv.Range)
min_value, max_value = None, None
for validator in range_validators:
if validator.min is not None:
min_value = validator.min \
if min_value is None else max([min_value, validator.min])
if validator.max is not None:
max_value = validator.max \
if max_value is None else min([max_value, validator.max])
return hs.floats(min_value=min_value, max_value=max_value)
def boolean_strategy(registry, type, context=None):
return hs.booleans()
def datetime_strategy(registry, type, context=None):
return hsd.datetimes()
def date_strategy(registry, type, context=None):
return hsd.dates()
def time_strategy(registry, type, context=None):
return hsd.times()
def list_strategy(registry, type, context=None):
min_length, max_length = None, None
for validator in find_validators(type.validators, lv.Length):
if validator.exact is not None or validator.min is not None:
value = validator.exact or validator.min
min_length = value if min_length is None else max([min_length, value])
if validator.exact is not None or validator.max is not None:
value = validator.exact or validator.min
max_length = value if max_length is None else min([max_length, value])
if min_length is not None and max_length is not None:
if min_length > max_length:
raise ValueError('Invalid settings for length validators')
unique_key = None
for validator in find_validators(type.validators, lv.Unique):
unique_key = validator.key
item_strategy = registry.convert(type.item_type, context=context)
for validator in find_validators(type.validators, lv.Each):
item_strategy = apply_validators(item_strategy, validator.validators)
return hs.lists(item_strategy,
min_size=min_length, max_size=max_length, unique_by=unique_key)
def tuple_strategy(registry, type, context=None):
return hs.tuples(*(registry.convert(item_type, context=context)
for item_type in type.item_types))
def dict_strategy(registry, type, context=None):
if getattr(type.value_types, 'default', None):
return hs.dictionaries(
keys=registry.convert(type.key_type, context=context),
values=registry.convert(type.value_types.default, context=context),
)
else:
return hs.fixed_dictionaries({
k: registry.convert(v, context=context)
for k, v in six.iteritems(type.value_types)
})
def object_strategy(registry, type, context=None):
constructor = type.constructor or (lambda **kwargs: lu.OpenStruct(kwargs))
return hs.builds(constructor, **{
k: registry.convert(v.field_type, context=context)
for k, v in six.iteritems(type.fields)
})
def constant_strategy(registry, type, context=None):
return hs.just(type.value)
def one_of_strategy(registry, type, context=None):
types = type.types
if hasattr(types, 'values'):
types = types.values()
return hs.one_of(*[registry.convert(t, context=context) for t in types])
def optional_strategy(registry, type, context=None):
inner_strategy = registry.convert(type.inner_type, context=context)
if type.load_default is not None:
return inner_strategy
return hs.one_of(hs.none(), inner_strategy)
def dump_only_strategy(registry, type, context=None):
return hs.nothing()
def inner_type_strategy(registry, type, context=None):
return registry.convert(type.inner_type, context=context)
def transform_strategy(registry, type, context=None):
return registry.convert(type.inner_type, context=context)\
.map(lambda x: type.post_load(x, context))
def new_registry():
registry = Registry()
for k, v in [
(lt.Any, any_strategy),
(lt.String, string_strategy),
(lt.Integer, integer_strategy),
(lt.Float, float_strategy),
(lt.Boolean, boolean_strategy),
(lt.DateTime, datetime_strategy),
(lt.Date, date_strategy),
(lt.Time, time_strategy),
(lt.List, list_strategy),
(lt.Tuple, tuple_strategy),
(lt.Dict, dict_strategy),
(lt.Object, object_strategy),
(lt.Constant, constant_strategy),
(lt.OneOf, one_of_strategy),
(lt.Optional, optional_strategy),
(lt.DumpOnly, dump_only_strategy),
(lt.LoadOnly, inner_type_strategy),
(lt.Transform, transform_strategy),
]:
registry.register(k, v)
return registry
DEFAULT_REGISTRY = new_registry()
register = DEFAULT_REGISTRY.register
type_strategy = DEFAULT_REGISTRY.convert
| {
"repo_name": "maximkulkin/lollipop-hypothesis",
"path": "lollipop_hypothesis/strategy.py",
"copies": "1",
"size": "9474",
"license": "mit",
"hash": -4621665009658123000,
"line_mean": 31.7820069204,
"line_max": 84,
"alpha_frac": 0.6476673,
"autogenerated": false,
"ratio": 3.980672268907563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128339568907563,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'RegistryMixin',
'get',
'reset',
]
import logging
from collections import defaultdict
from iga.core import WriteOnceDict
from iga.error import IgaError
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
_REGISTRY = defaultdict(WriteOnceDict)
def get(namespace):
return _REGISTRY[namespace]
def reset():
_REGISTRY.clear()
class RegistryMixin:
@classmethod
def register(cls, named_obj):
cls.register_with_name(named_obj.name, named_obj)
@classmethod
def register_with_name(cls, name, obj):
if not isinstance(obj, cls):
raise IgaError('%r is not of type %r' % (obj, cls))
namespace = _make_namespace(cls)
LOG.info('add object named %r to %s', name, namespace)
_REGISTRY[namespace][name] = obj
@classmethod
def get_all_objects(cls):
namespace = _make_namespace(cls)
return _REGISTRY[namespace]
@classmethod
def get_object(cls, name):
return cls.get_all_objects()[name]
def _make_namespace(klass):
return '%s.%s' % (klass.__module__, klass.__qualname__)
| {
"repo_name": "clchiou/iga",
"path": "iga/registry.py",
"copies": "1",
"size": "1132",
"license": "mit",
"hash": 792481317270191200,
"line_mean": 19.962962963,
"line_max": 63,
"alpha_frac": 0.6422261484,
"autogenerated": false,
"ratio": 3.5709779179810726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47132040663810726,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'repr_object',
'str_value',
'get_schema_id',
'camel_to_lower_snake',
'camel_to_upper_snake',
'snake_to_lower_camel',
'dicts_get',
]
import re
from . import native
def repr_object(obj):
"""The default __repr__ implementation."""
cls = obj.__class__
return (
'<%s.%s 0x%x %s>' % (cls.__module__, cls.__qualname__, id(obj), obj))
def str_value(value):
"""Format Python value to look like Cap'n Proto textual format."""
if value is None:
return 'void'
elif value is True:
return 'true'
elif value is False:
return 'false'
elif isinstance(value, str):
return '"%s"' % value.replace('"', '\\"')
elif isinstance(value, bytes):
return '0x"%s"' % ' '.join('%02x' % x for x in value)
else:
return str(value)
def get_schema_id(schema):
if isinstance(schema, native.ListSchema):
return _get_list_schema_id(schema)
node = schema.getProto()
if schema.isBranded():
return _get_branded_schema_id(schema, node)
else:
return schema.getProto().getId()
def _get_branded_schema_id(schema, node):
node_id = node.getId()
schema_id = ['g', node_id]
balist = schema.getBrandArgumentsAtScope(node_id)
schema_id.extend(balist[i].hashCode() for i in range(balist.size()))
return tuple(schema_id)
def _get_list_schema_id(schema):
"""Generate an unique id for list schema.
We cannot call schema.getProto().getId() to generate an unique id
because ListSchema is different - it is not associated with a Node.
"""
type_ = schema.getElementType()
level = 0
while type_.isList():
type_ = type_.asList().getElementType()
level += 1
return ('l', level, type_.hashCode())
# An upper case word followed a lower case letter. For now a "word" is
# anything longer than 1 letter.
CAMEL_PATTERN_1 = re.compile(r'([A-Z]{2})([a-z0-9])')
# A lower case letter followed an upper case letter.
CAMEL_PATTERN_2 = re.compile(r'([a-z0-9])([A-Z])')
def _camel_to_snake(camel):
camel = CAMEL_PATTERN_1.sub(r'\1_\2', camel)
camel = CAMEL_PATTERN_2.sub(r'\1_\2', camel)
return camel
def camel_to_upper_snake(camel):
"""Turn a CamelCase or camelCase name into a SNAKE_CASE one."""
return _camel_to_snake(camel).upper()
def camel_to_lower_snake(camel):
"""Turn a CamelCase or camelCase name into a snake_case one."""
return _camel_to_snake(camel).lower()
SNAKE_PATTERN = re.compile(r'_([a-z])')
def snake_to_lower_camel(snake):
"""Turn a SNAKE_CASE or snake_case name into camelCase."""
return SNAKE_PATTERN.sub(lambda s: s.group(1).upper(), snake.lower())
def dicts_get(dicts, key, default=None):
"""Do get() on multiple dict.
NOTE: While `d1.get(k) or d2.get(k)` looks cool, it is actually
incorrect because d1 might contain false value (like an empty tuple)
and you should return that instead of going on to d2.
"""
for d in dicts:
try:
return d[key]
except KeyError:
pass
return default
| {
"repo_name": "clchiou/garage",
"path": "py/capnp/capnp/bases.py",
"copies": "1",
"size": "3113",
"license": "mit",
"hash": -4218830512741482500,
"line_mean": 25.6068376068,
"line_max": 77,
"alpha_frac": 0.6186957918,
"autogenerated": false,
"ratio": 3.2494780793319413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4368173871131941,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Request',
'RequestParser',
]
import re
from kitsu.http.errors import *
from kitsu.http.headers import Headers
from kitsu.http.parsers import LineParser
class Request(object):
def __init__(self, method="GET", target="/", version=(1,1), headers=(), body=None):
self.method = method
self.target = target
self.version = version
self.headers = Headers(headers)
self.body = body
self.__parserState = 'COMMAND'
def toLines(self, lines=None):
if lines is None:
lines = []
target = self.target
target = re.sub(r"\s", "+", target)
if isinstance(target, unicode):
target = target.encode('utf-8')
lines.append("%s %s HTTP/%d.%d\r\n" % (self.method, target, self.version[0], self.version[1]))
self.headers.toLines(lines)
lines.append("\r\n")
return lines
def toString(self):
return ''.join(self.toLines())
def __str__(self):
return self.toString()
def __parseCommand(self, line):
parts = line.split(None, 2)
if len(parts) != 3:
raise HTTPDataError("request must be in 'METHOD target HTTP/n.n' format: %r" % (line,))
method, target, version = parts
if not version.startswith('HTTP/'):
raise HTTPDataError("protocol must be HTTP: %r" % (line,))
version = version[5:].split('.')
if len(version) != 2:
raise HTTPDataError("invalid version: %r" % (line,))
try:
version = (int(version[0]), int(version[1]))
except ValueError:
raise HTTPDataError("invalid version: %r" % (line,))
self.method = method
self.target = target
self.version = version
def parseLine(self, line):
if self.__parserState == 'COMMAND':
if not line:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html
# Just ignore all empty lines for maximum compatibility
return True
self.__parseCommand(line)
self.__parserState = 'HEADERS'
return True
elif self.__parserState == 'HEADERS':
if not self.headers.parseLine(line):
self.__parserState = 'DONE'
return False
return True
return False
class RequestParser(LineParser):
"""Request parser"""
def __init__(self):
self.request = Request()
def parseLine(self, line):
if not self.request.parseLine(line):
self.done = True
return (self.request,)
return ()
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/request.py",
"copies": "1",
"size": "2667",
"license": "mit",
"hash": -5490158163749073000,
"line_mean": 31.5243902439,
"line_max": 102,
"alpha_frac": 0.5519310086,
"autogenerated": false,
"ratio": 4.096774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5148705202148387,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Response',
'ResponseParser',
]
from kitsu.http.errors import *
from kitsu.http.headers import Headers
from kitsu.http.parsers import LineParser
class Response(object):
def __init__(self, version=(1,1), code=200, phrase='OK', headers=(), body=None):
self.version = version
self.code = code
self.phrase = phrase
self.headers = Headers(headers)
self.body = body
self.__parserState = 'STATUS'
def toLines(self, lines=None):
if lines is None:
lines = []
lines.append("HTTP/%d.%d %d %s\r\n" % (self.version[0], self.version[1], self.code, self.phrase))
self.headers.toLines(lines)
lines.append("\r\n")
return lines
def toString(self):
return ''.join(self.toLines())
def __str__(self):
return self.toString()
def __parseStatus(self, line):
parts = line.split(None, 2)
if len(parts) not in (2, 3):
raise HTTPDataError("response must be in 'HTTP/n.n status message' format: %r" % (line,))
version = parts[0]
code = parts[1]
phrase = len(parts) >= 3 and parts[2] or ""
if not version.startswith('HTTP/'):
raise HTTPDataError("protocol must be HTTP: %r" % (line,))
version = version[5:].split('.')
if len(version) != 2:
raise HTTPDataError("invalid version: %r" % (line,))
try:
version = (int(version[0]), int(version[1]))
except ValueError:
raise HTTPDataError("invalid version: %r" % (line,))
try:
code = int(code)
except ValueError:
raise HTTPDataError("status code must be a number: %r" % (line,))
self.version = version
self.code = code
self.phrase = phrase
def parseLine(self, line):
if self.__parserState == 'STATUS':
if not line:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html
# Just ignore all empty lines for maximum compatibility
return True
self.__parseStatus(line)
self.__parserState = 'HEADERS'
return True
elif self.__parserState == 'HEADERS':
if not self.headers.parseLine(line):
self.__parserState = 'DONE'
return False
return True
return False
class ResponseParser(LineParser):
"""Response parser"""
def __init__(self):
self.response = Response()
def parseLine(self, line):
if not self.response.parseLine(line):
self.done = True
return [self.response]
return []
| {
"repo_name": "snaury/kitsu.http",
"path": "kitsu/http/response.py",
"copies": "1",
"size": "2717",
"license": "mit",
"hash": -1713036753206793000,
"line_mean": 31.734939759,
"line_max": 105,
"alpha_frac": 0.5483989695,
"autogenerated": false,
"ratio": 4.043154761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01823910514421727,
"num_lines": 83
} |
__all__=[]
_rgb_files = [open('/tmp/rgb_debug.txt','w')]
import sys, time
_rgb_auto_flush=1
def rgb_time(offs=1):
t = time.time()
if offs: return t-_rgb_t0
return t
_rgb_t0=rgb_time(0)
def rgb_debug(*msg,**kwds):
msg = ' '.join(map(str,msg)) + '\n'
for f in _rgb_files:
f.write(msg)
if _rgb_auto_flush:
flush = getattr(f,'flush',None)
if flush: flush()
def rgb_pprint(*obj):
if obj:
if isinstance(obj[0],(str,unicode)):
rgb_debug(obj[0])
obj = obj[1:]
import pprint
for o in obj:
for f in _rgb_files:
pprint.pprint(o,f)
def rgb_stack(*msg,**kwds):
import inspect
rgb_debug(*msg)
i = 1
while 1:
f = sys._getframe(i)
if f.f_globals.get('_rgb_t0',None) is not _rgb_t0: break
i += 1
F = inspect.stack()
frameCount = kwds.get('_frameCount',0)
showLocals = kwds.get('_showLocals',0)
if not frameCount:
F = F[i:]
else:
F = F[i:i+frameCount]
for f in F:
rgb_debug('file:',f[1],'line:',f[2],'in',f[3])
for l in f[4]: rgb_debug(l)
if showLocals:
rgb_debug(' locals=%r' % f[0].f_locals)
class _RGB_Wrapper(object):
def __init__(self,func,funcname=None,show=0,show_kwds={}):
self.func = func
self.funcname = funcname or func.__name__
if not callable(show):
show=show and rgb_stack or rgb_debug
self.show = show
self.show_kwds= show_kwds
self.called = 0
def __call__(self,*args,**kwds):
func = self.func
if not self.called:
self.called = 1
try:
self.show('%s(*%r,**%r) called' % (self.funcname,args,kwds),**self.show_kwds)
finally:
self.called = 0
return func(*args,**kwds)
def rgb_wrap(func,show=1,funcname=None,show_kwds={}):
return _RGB_Wrapper(func,funcname,show,show_kwds=show_kwds)
class rgb_watch_writes:
def __init__(self,f,cb,*cbargs):
self._f = f
self._cb = cb
self._cbargs = cbargs
def write(self,msg):
self._cb(*((msg,)+self._cbargs))
self._f.write(msg)
def __getattr__(self,a):
return getattr(self._f,a)
def rgb_print_exc(*msg):
if msg: rgb_debug(*msg)
import traceback
for f in _rgb_files: traceback.print_exc(file=f)
if _rgb_auto_flush: rgb_flush()
def rgb_add_file(f):
if f not in _rgb_files: _rgb_files.append(f)
def rgb_auto_flush(v=1):
_rgb_auto_flush=v
def rgb_flush():
for f in _rgb_files:
flush = getattr(f,'flush',None)
if flush: flush()
import __builtin__
__builtin__.rgb_debug = rgb_debug
__builtin__.rgb_stack = rgb_stack
__builtin__.rgb_pprint = rgb_pprint
__builtin__.rgb_time = rgb_time
__builtin__.rgb_print_exc = rgb_print_exc
__builtin__.rgb_auto_flush = rgb_auto_flush
__builtin__.rgb_flush = rgb_flush
__builtin__.rgb_wrap = rgb_wrap
__builtin__.rgb_add_file = rgb_add_file
__builtin__.rgb_watch_writes = rgb_watch_writes
rgb_debug.__module__=sys.modules['rgb_debug']
sys.modules['rgb_debug'] = rgb_debug
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/576381_mad_debugging_using_builtin/recipe-576381.py",
"copies": "1",
"size": "3168",
"license": "mit",
"hash": 7865288659716538000,
"line_mean": 27.8,
"line_max": 93,
"alpha_frac": 0.5596590909,
"autogenerated": false,
"ratio": 3.0787172011661808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9021459353813713,
"avg_score": 0.023383387650493446,
"num_lines": 110
} |
__all__ = [
'RpcConnectionError',
'RpcError',
'Connector',
]
import contextlib
import functools
import logging
import pickle
from multiprocessing.connection import Client
from garage.assertions import ASSERT
LOG = logging.getLogger(__name__)
class RpcError(Exception):
pass
class RpcConnectionError(RpcError):
pass
class Connector:
def __init__(self, server_proc, address, protocol, authkey):
self.server_proc = server_proc
self.address = address
self.protocol = protocol
self.authkey = bytes(authkey, encoding='ascii')
@contextlib.contextmanager
def connect(self):
try:
with Client(self.address, authkey=self.authkey) as conn:
version_info = conn.recv()['version_info']
LOG.debug('server version %s', version_info)
server = ServerStub(conn, self.address, self.protocol)
try:
yield server
finally:
server.close()
except (FileNotFoundError, EOFError) as exc:
raise RpcConnectionError(
'cannot connect to %s' % self.address) from exc
def shutdown(self):
# close/shutdown should never fail (i.e., no-throw).
try:
with self.connect() as server:
server.shutdown()
except (
ConnectionRefusedError, ConnectionResetError, RpcConnectionError
) as exc:
LOG.warning('cannot shutdown server: %r', exc)
class ServerStub:
def __init__(self, conn, address, protocol):
self.stub = Stub(conn, address, protocol)
self.vars = Vars(self.stub)
self.funcs = Funcs(self.stub)
# Don't call close/shutdown if it has been closed. Although
# this doesn't make the program "more right", it keeps logs
# cleaner.
self._closed = False
def shutdown(self):
# close/shutdown should never fail (i.e., no-throw).
if self._closed:
return False
try:
_, err = self.stub({'command': 'shutdown'})
if err:
LOG.error('cannot shutdown server due to %s', err)
except RpcError as exc:
LOG.warning('cannot send shutdown request', exc_info=True)
err = exc
if not err:
self._closed = True
return err
def close(self):
# close/shutdown should never fail (i.e., no-throw).
if self._closed:
return False
try:
_, err = self.stub({'command': 'close'})
if err:
LOG.error('cannot close stub due to %s', err)
except RpcError as exc:
LOG.warning(
'cannot send close request: %r',
exc,
exc_info=not isinstance(exc.__cause__, BrokenPipeError),
)
err = exc
if not err:
self._closed = True
return err
def execute(self, source):
_, err = self.stub({
'command': 'execute',
'source': source,
'filename': self.stub.address,
})
if err:
raise RpcError('cannot execute %r due to %s' % (source, err))
class Vars:
def __init__(self, stub):
object.__setattr__(self, '_stub', stub)
def __getattr__(self, name):
response, err = self._stub({'command': 'get', 'name': name})
ASSERT(
('value' in response) != bool(err),
'expect either %r or %r but not both', response, err,
)
if err:
raise AttributeError('cannot get %r due to %s' % (name, err))
return response.get('value')
def __setattr__(self, name, value):
_, err = self._stub({'command': 'set', 'name': name, 'value': value})
if err:
raise AttributeError('cannot set %r due to %s' % (name, err))
def __delattr__(self, name):
_, err = self._stub({'command': 'del', 'name': name})
if err:
raise AttributeError('cannot delete %r due to %s' % (name, err))
class Funcs:
def __init__(self, stub):
self._stub = stub
def __getattr__(self, name):
return functools.partial(self._call_stub, name)
def _call_stub(self, name, *args, **kwargs):
response, err = self._stub(
{'command': 'call', 'name': name, 'args': args, 'kwargs': kwargs})
ASSERT(
('value' in response) != bool(err),
'expect either %r or %r but not both', response, err,
)
if err:
raise RpcError('cannot call %r due to %s' % (name, err))
return response.get('value')
class Stub:
def __init__(self, conn, address, protocol):
self.conn = conn
self.address = address
self.protocol = protocol
def __call__(self, request):
try:
self.conn.send_bytes(pickle.dumps(request, protocol=self.protocol))
response = self.conn.recv()
except IOError as exc:
raise RpcError('cannot send request %r' % request) from exc
return response, response.get('error')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/multiprocessing/client.py",
"copies": "1",
"size": "5176",
"license": "mit",
"hash": 6295408970142538000,
"line_mean": 28.4090909091,
"line_max": 79,
"alpha_frac": 0.5459814529,
"autogenerated": false,
"ratio": 4.150761828388132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 176
} |
__all__ = [
'RpcConnectionError',
'RpcError',
'python',
]
import contextlib
import logging
import os
import os.path
import random
import shutil
import subprocess
import tempfile
import time
import garage.multiprocessing.server
from garage.multiprocessing.client import Connector
from garage.multiprocessing.client import RpcConnectionError
from garage.multiprocessing.client import RpcError
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def python(
executable='python2',
protocol=2,
authkey=None,
max_workers=8,
popen_kwargs=None):
"""Start a server and return a Connector object
(default to python2).
"""
authkey = authkey or str(random.randint(1, 1e8))
with contextlib.ExitStack() as stack:
address = stack.enter_context(create_socket())
server_proc = stack.enter_context(start_server(
executable, address, authkey, max_workers, popen_kwargs or {}))
connector = Connector(server_proc, address, protocol, authkey)
try:
yield connector
finally:
connector.shutdown()
@contextlib.contextmanager
def create_socket():
tempdir = tempfile.mkdtemp()
try:
socket_path = os.path.join(tempdir, 'server.socket')
LOG.info('socket path %s', socket_path)
yield socket_path
finally:
LOG.info('remove socket path %s', socket_path)
shutil.rmtree(tempdir)
@contextlib.contextmanager
def start_server(executable, address, authkey, max_workers, popen_kwargs):
script_path = garage.multiprocessing.server.__file__
args = [
executable, script_path,
'--listen-sock', address,
'--max-workers', str(max_workers),
]
if LOG.isEnabledFor(logging.DEBUG):
args.append('-vv')
elif LOG.isEnabledFor(logging.INFO):
args.append('-v')
env = dict(os.environ)
env['AUTHKEY'] = authkey
server_proc = subprocess.Popen(
args, start_new_session=True, env=env, **popen_kwargs)
try:
wait_file_creation(address, timeout=3)
yield server_proc
finally:
if server_proc.wait() != 0:
LOG.warning('server returns %d', server_proc.returncode)
def wait_file_creation(path, timeout):
end_time = time.time() + timeout
while not os.path.exists(path):
time.sleep(0.1)
if end_time < time.time():
raise Exception('timeout')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/multiprocessing/__init__.py",
"copies": "1",
"size": "2456",
"license": "mit",
"hash": 3500680503647709000,
"line_mean": 26.2888888889,
"line_max": 75,
"alpha_frac": 0.6486156352,
"autogenerated": false,
"ratio": 3.8922345483359746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5040850183535974,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Rule',
'RuleData',
'RuleFunc',
'RuleType',
]
import logging
from collections import namedtuple
import iga.context
import iga.filetype
import iga.precond
from iga.core import KeyedSets
from iga.fargparse import FuncArgsParser
from iga.registry import RegistryMixin
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class RuleType(RegistryMixin):
ANY_FILE_TYPE = object()
@staticmethod
def make(**kwargs):
kwargs.setdefault('make_outputs', _make_no_outputs)
return RuleType(**kwargs)
def __init__(self,
name,
input_types,
output_types,
make_outputs,
ninja_rules,
generate_buildstmts):
self.name = name
self.input_types = input_types
self.output_types = output_types
self._make_outputs = make_outputs
self.ninja_rules = ninja_rules
self.generate_buildstmts = generate_buildstmts
def make_outputs(self, inputs):
if self.output_types is RuleType.ANY_FILE_TYPE:
outputs = KeyedSets(iga.filetype.get_all())
else:
outputs = KeyedSets(self.output_types)
outputs.update(self._make_outputs(inputs))
return outputs
def _make_no_outputs(_):
return {}
class RuleFunc(RegistryMixin):
@staticmethod
def make(rule_func):
parser = FuncArgsParser.make(rule_func)
return RuleFunc(
name=rule_func.__name__,
rule_func=rule_func,
parser=parser,
)
def __init__(self,
name,
rule_func,
parser):
self.name = name
self.rule_func = rule_func
self.parser = parser
def __call__(self, *args, **kwargs):
args, kwargs, ignored = self.parser.parse((args, kwargs))
if ignored:
LOG.debug('%s ignores %r', self.name, ignored)
rule_data = self.rule_func(*args, **kwargs)
iga.precond.check_type(rule_data, RuleData)
iga.context.current()['rule_data'].append(rule_data)
class Rule(RegistryMixin):
@staticmethod
def make(rule_data):
rule_type = RuleType.get_object(rule_data.rule_type)
if rule_type.input_types is RuleType.ANY_FILE_TYPE:
inputs = KeyedSets(iga.filetype.get_all())
else:
inputs = KeyedSets(rule_type.input_types)
inputs.update(rule_data.inputs)
if rule_type.output_types is RuleType.ANY_FILE_TYPE:
outputs = KeyedSets(iga.filetype.get_all())
else:
outputs = KeyedSets(rule_type.output_types)
outputs.update(rule_data.outputs)
return Rule(
name=rule_data.name,
rule_type=rule_type,
inputs=inputs,
outputs=outputs,
variables=rule_data.variables,
)
def __init__(self, name, rule_type, inputs, outputs, variables):
self.name = name
self.rule_type = rule_type
self.inputs = inputs
self.outputs = outputs
self.variables = variables
def write_to(self, ninja_file):
for buildstmt in self.rule_type.generate_buildstmts(self):
buildstmt.write_to(ninja_file)
class RuleData(namedtuple('RuleData', '''
name
rule_type
inputs
input_patterns
outputs
variables
''')):
@staticmethod
def make(**kwargs):
kwargs.setdefault('inputs', {})
kwargs.setdefault('input_patterns', [])
kwargs.setdefault('outputs', {})
kwargs.setdefault('variables', {})
return RuleData(**kwargs)
| {
"repo_name": "clchiou/iga",
"path": "iga/rule.py",
"copies": "1",
"size": "3714",
"license": "mit",
"hash": -2099060486041543000,
"line_mean": 25.5285714286,
"line_max": 68,
"alpha_frac": 0.584006462,
"autogenerated": false,
"ratio": 3.9892588614393127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 140
} |
__all__ = [
"Schedule",
"Season",
"Team",
"team_from_str"
]
class Season(object):
"""Data model of types of seasons"""
POST = 'POST'
PRE = 'PRE'
PRO = 'PRO'
REGULAR = 'REG'
class Schedule(object):
"""Data model of a season's week's schedule"""
def __init__(self, season, week, stype=Season.REGULAR, data=None, games=None):
self.season = season
self.week = week
self.stype = stype
self.data = data
self.games = games
def __str__(self):
return self.data if self.games is None else str(self.games)
class Team(object):
"""Data model of a team"""
def __init__(self, code, name):
self._code = code
self._name = name
def __str__(self):
return '{} ({})'.format(self._name, self._code)
@property
def code(self):
return self._code
@property
def name(self):
return self._name
_STR_TO_TEAM_MAPPING = {}
_TEAM_METADATA = [
('SF', '49ers'),
('CHI', 'bears'),
('CIN', 'bengals'),
('BUF', 'bills'),
('DEN', 'broncos'),
('CLE', 'browns'),
('TB', 'buccaneers'),
('ARI', 'cardinals'),
('LAC', 'chargers'),
('KC', 'chiefs'),
('IND', 'colts'),
('DAL', 'cowboys'),
('MIA', 'dolphins'),
('PHI', 'eagles'),
('ATL', 'falcons'),
('NYG', 'giants'),
('JAX', 'jaguars'),
('NYJ', 'jets'),
('DET', 'lions'),
('GB', 'packers'),
('CAR', 'panthers'),
('OAK', 'raiders'),
('LA', 'rams'),
('BAL', 'ravens'),
('WAS', 'redskins'),
('NO', 'saints'),
('SEA', 'seahawks'),
('PIT', 'steelers'),
('HOU', 'texans'),
('TEN', 'titans'),
('NE', 'patriots'),
('MIN', 'vikings')
]
for team_metadatum in _TEAM_METADATA:
team = Team(*team_metadatum)
_STR_TO_TEAM_MAPPING[team_metadatum[0]] = team
_STR_TO_TEAM_MAPPING[team_metadatum[1]] = team
def team_from_str(team_str):
team = _STR_TO_TEAM_MAPPING.get(team_str)
if team is None:
raise RuntimeError('No team with code or name of `{}`'.format(team_str))
return team
| {
"repo_name": "YuHChen/fantasy-football-picker",
"path": "ffpicker/data/models.py",
"copies": "1",
"size": "2101",
"license": "mit",
"hash": -3838611471386020000,
"line_mean": 22.3444444444,
"line_max": 82,
"alpha_frac": 0.5173726797,
"autogenerated": false,
"ratio": 2.9675141242937855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3984886803993785,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'schema_cp2k_general_settings', 'schema_derivative_couplings', 'schema_single_points',
'schema_distribute_absorption_spectrum',
'schema_distribute_derivative_couplings',
'schema_distribute_single_points',
'schema_absorption_spectrum']
from numbers import Real
from schema import (And, Optional, Or, Schema, Use)
import os
import pkg_resources as pkg
def merge(d1, d2):
"""
merge two dictionaries using without modifying the original
"""
x = d1.copy()
x.update(d2)
return x
schema_cp2k_general_settings = Schema({
# "Basis set to carry out the quantum chemistry simulation"
"basis": str,
# "Pseudo-potential to carry out the quantum chemistry simulation"
"potential": str,
# Charge of the system
Optional("charge", default=0): int,
# Multiplicity
Optional("multiplicity", default=1): int,
# Specify the Cartesian components for the cell vector
"cell_parameters": Or(
Real,
lambda xs: len(xs) == 3 and isinstance(xs, list),
lambda xs: len(xs) == 3 and all(len(r) == 3 for r in xs)),
# Type of periodicity
"periodic": And(
str, Use(str.lower), lambda s: s in (
"none", "x", "y", "z", "xy", "xy", "yz", "xyz")),
# Specify the angles between the vectors defining the unit cell
Optional("cell_angles"): list,
# Path to the folder containing the basis set specifications
Optional("path_basis", default=pkg.resource_filename("nac", "basis")): os.path.isdir,
# Settings describing the input of the quantum package
"cp2k_settings_main": object,
# Settings describing the input of the quantum package
# to compute the guess wavefunction"
"cp2k_settings_guess": object,
# Restart File Name
Optional("wfn_restart_file_name", default=None): Or(str, None),
# File containing the Parameters of the cell if those
# parameters change during the MD simulation.
Optional("file_cell_parameters", default=None): Or(str, None),
# Quality of the auxiliar basis cFIT
Optional("aux_fit", default="verygood"): And(
str, Use(str.lower), lambda s: s in
("low", "medium", "good", "verygood", "excellent"))
})
dict_general_options = {
# Number of occupied/virtual orbitals to use
'active_space': And(list, lambda xs: len(xs) == 2),
# Index of the HOMO
Optional("nHOMO"): int,
# Index of the orbitals to compute the couplings
Optional("mo_index_range"): tuple,
# "default quantum package used"
Optional("package_name", default="cp2k"): str,
# project
Optional("project_name", default="namd"): str,
# Working directory
Optional("scratch_path", default=None): str,
# path to the HDF5 to store the results
Optional("path_hdf5", default="quantum.hdf5"): str,
# path to xyz trajectory of the Molecular dynamics
"path_traj_xyz": os.path.exists,
# Real from where to start enumerating the folders create for each point in the MD
Optional("enumerate_from", default=0): int,
# Ignore the warning issues by the quantum package and keep computing
Optional("ignore_warnings", default=False): bool,
# Calculate the guess wave function in either the first point of the trajectory or in all
Optional("calculate_guesses", default="first"):
And(str, Use(str.lower), lambda s: s in ("first", "all")),
# Units of the molecular geometry on the MD file
Optional("geometry_units", default="angstrom"):
And(str, Use(str.lower), lambda s: s in (
"angstrom", "au")),
# Integration time step used for the MD (femtoseconds)
Optional("dt", default=1): Real,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
dict_derivative_couplings = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "derivative_couplings"),
# Algorithm used to compute the derivative couplings
Optional("algorithm", default="levine"):
And(str, Use(str.lower), lambda s: ("levine", "3points")),
# Use MPI to compute the couplings
Optional("mpi", default=False): bool,
# Track the crossing between states
Optional("tracking", default=True): bool,
# Write the overlaps in ascii
Optional("write_overlaps", default=False): bool,
# Compute the overlap between molecular geometries using a dephase"
Optional("overlaps_deph", default=False): bool
}
dict_merged_derivative_couplings = merge(
dict_general_options, dict_derivative_couplings)
schema_derivative_couplings = Schema(
dict_merged_derivative_couplings)
schema_job_scheduler = Schema({
Optional("scheduler", default="SLURM"):
And(str, Use(str.upper), lambda s: ("SLURM", "PBS")),
Optional("nodes", default=1): int,
Optional("tasks", default=1): int,
Optional("wall_time", default="01:00:00"): str,
Optional("job_name", default="namd"): str,
Optional("queue_name", default="short"): str,
Optional("load_modules", default=""): str
})
dict_distribute = {
Optional("workdir", default=os.getcwd()): str,
# Number of chunks to split the trajectory
"blocks": int,
# Resource manager configuration
"job_scheduler": schema_job_scheduler,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
}
dict_distribute_derivative_couplings = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_derivative_couplings")
}
schema_distribute_derivative_couplings = Schema(
merge(dict_distribute, merge(
dict_merged_derivative_couplings, dict_distribute_derivative_couplings)))
dict_absorption_spectrum = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "absorption_spectrum"),
# Type of TDDFT calculations. Available: sing_orb, stda, stddft
Optional("tddft", default="stda"): And(
str, Use(str.lower), lambda s: s in ("sing_orb", "stda", "stdft")),
# Interval between MD points where the oscillators are computed"
Optional("stride", default=1): int,
# description: Exchange-correlation functional used in the DFT calculations,
Optional("xc_dft", default="pbe"): str
}
dict_merged_absorption_spectrum = merge(
dict_general_options, dict_absorption_spectrum)
schema_absorption_spectrum = Schema(dict_merged_absorption_spectrum)
dict_distribute_absorption_spectrum = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_absorption_spectrum")
}
schema_distribute_absorption_spectrum = Schema(
merge(dict_distribute, merge(
dict_merged_absorption_spectrum, dict_distribute_absorption_spectrum)))
dict_single_points = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "single_points"),
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
dict_distribute_single_points = {
# Name of the workflow to run
"workflow": And(
str, Use(str.lower), lambda s: s == "distribute_single_points")
}
dict_merged_single_points = merge(dict_general_options, dict_single_points)
schema_single_points = Schema(dict_merged_single_points)
schema_distribute_single_points = Schema(
merge(dict_distribute, merge(
dict_merged_single_points, dict_distribute_single_points)))
| {
"repo_name": "felipeZ/nonAdiabaticCoupling",
"path": "nac/workflows/schemas.py",
"copies": "1",
"size": "7466",
"license": "mit",
"hash": -484774629108433100,
"line_mean": 28.864,
"line_max": 93,
"alpha_frac": 0.6707741763,
"autogenerated": false,
"ratio": 3.6348588120740017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4805632988374002,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'SchemaLoader',
]
import logging
from g1.bases import classes
from g1.bases import labels
from g1.bases.assertions import ASSERT
from . import _capnp
# pylint: disable=c-extension-no-member
from . import bases
#
# TODO: We expose classes and methods as needed for now; so don't expect
# them to be comprehensive.
#
LOG = logging.getLogger(__name__)
# Annotation node id from capnp/c++.capnp.
CXX_NAMESPACE = 0xb9c6f99ebf805f2c
CXX_NAME = 0xf264a779fef191ce
class SchemaLoader:
def __init__(self):
self._loader = _capnp.SchemaLoader()
self.files = {}
self.struct_schemas = {}
self.enum_schemas = {}
self.interface_schemas = {}
self.const_schemas = {}
self.annotations = {}
def __enter__(self):
return self
def __exit__(self, *_):
loader, self._loader = self._loader, None
loader._reset()
def load(self, codegen_request_bytes):
self._do_load(
codegen_request_bytes,
ASSERT.not_none(self._loader).load,
)
def load_once(self, codegen_request_bytes):
self._do_load(
codegen_request_bytes,
ASSERT.not_none(self._loader).loadOnce,
)
def _do_load(self, codegen_request_bytes, load):
reader = _capnp.FlatArrayMessageReader(codegen_request_bytes)
try:
codegen_request = reader.getRoot()
for node in codegen_request.getNodes():
load(node)
finally:
reader._reset()
self._update()
def _update(self):
ASSERT.not_none(self._loader)
LOG.debug('update schema tables')
id_to_schema = {
schema.proto.id: schema
for schema in map(Schema, self._loader.getAllLoaded())
}
for schema in id_to_schema.values():
if schema.proto.is_file():
path = schema.proto.display_name
if path not in self.files:
LOG.debug('add file node: %s', path)
self.files[path] = schema
continue
if schema.proto.is_struct():
table = self.struct_schemas
elif schema.proto.is_enum():
table = self.enum_schemas
elif schema.proto.is_interface():
table = self.interface_schemas
elif schema.proto.is_const():
table = self.const_schemas
elif schema.proto.is_annotation():
table = self.annotations
else:
ASSERT.unreachable('unexpected schema kind: {}', schema)
label = labels.Label(
self._get_module_path(schema, id_to_schema),
self._get_object_path(schema, id_to_schema),
)
if label in table:
continue
LOG.debug('add schema: %s', label)
if schema.proto.is_struct():
table[label] = schema.as_struct()
elif schema.proto.is_enum():
table[label] = schema.as_enum()
elif schema.proto.is_interface():
table[label] = schema.as_interface()
elif schema.proto.is_const():
table[label] = schema.as_const()
elif schema.proto.is_annotation():
table[label] = schema
else:
ASSERT.unreachable('unexpected schema kind: {}', schema)
@staticmethod
def _get_module_path(schema, id_to_schema):
while schema and not schema.proto.is_file():
schema = id_to_schema.get(schema.proto.scope_id)
ASSERT.not_none(schema)
for annotation in schema.proto.annotations:
if annotation.id == CXX_NAMESPACE:
return annotation.value.text.replace('::', '.').strip('.')
LOG.debug('no Cxx.namespace annotation: %r', schema)
# As a last resort, derive module path from display name.
path = schema.proto.display_name
if path.endswith('.capnp'):
path = path[:-len('.capnp')]
return path.replace('/', '.')
@staticmethod
def _get_object_path(schema, id_to_schema):
parts = []
while schema and not schema.proto.is_file():
parts.append(schema.name)
schema = id_to_schema.get(schema.proto.scope_id)
parts.reverse()
return '.'.join(parts)
#
# C++ ``capnp::schema`` namespace types.
#
# Use this to work around cyclic reference to ``_Schema.Value``.
def _to_value(raw):
return _Schema.Value(raw)
# Namespace class to avoid conflicts.
class _Schema:
class Node(bases.Base):
class Struct(bases.Base):
_raw_type = _capnp.schema.Node.Struct
is_group = bases.def_p(_raw_type.getIsGroup)
_raw_type = _capnp.schema.Node
__repr__ = classes.make_repr(
'id={self.id} '
'scope_id={self.scope_id} '
'display_name={self.display_name!r} '
'which={self.which}'
)
id = bases.def_p(_raw_type.getId)
display_name = bases.def_mp(
'display_name',
bases.to_str,
_raw_type.getDisplayName,
)
display_name_prefix_length = bases.def_p(
_raw_type.getDisplayNamePrefixLength
)
scope_id = bases.def_p(_raw_type.getScopeId)
@classes.memorizing_property
def annotations(self):
return tuple(map(_Schema.Annotation, self._raw.getAnnotations()))
which = bases.def_p(_raw_type.which)
is_file = bases.def_f0(_raw_type.isFile)
is_struct = bases.def_f0(_raw_type.isStruct)
is_enum = bases.def_f0(_raw_type.isEnum)
is_interface = bases.def_f0(_raw_type.isInterface)
is_const = bases.def_f0(_raw_type.isConst)
is_annotation = bases.def_f0(_raw_type.isAnnotation)
struct = bases.def_mp('struct', Struct, _raw_type.getStruct)
class Field(bases.Base):
class Slot(bases.Base):
_raw_type = _capnp.schema.Field.Slot
had_explicit_default = bases.def_p(_raw_type.getHadExplicitDefault)
_raw_type = _capnp.schema.Field
__repr__ = classes.make_repr(
'name={self.name!r} code_order={self.code_order}'
)
name = bases.def_mp('name', bases.to_str, _raw_type.getName)
code_order = bases.def_p(_raw_type.getCodeOrder)
which = bases.def_p(_raw_type.which)
is_slot = bases.def_f0(_raw_type.isSlot)
is_group = bases.def_f0(_raw_type.isGroup)
slot = bases.def_mp('slot', Slot, _raw_type.getSlot)
class Enumerant(bases.Base):
_raw_type = _capnp.schema.Enumerant
__repr__ = classes.make_repr(
'name={self.name!r} code_order={self.code_order}'
)
name = bases.def_mp('name', bases.to_str, _raw_type.getName)
code_order = bases.def_p(_raw_type.getCodeOrder)
class Value(bases.Base):
_raw_type = _capnp.schema.Value
__repr__ = classes.make_repr('which={self.which}')
which = bases.def_p(_raw_type.which)
is_void = bases.def_f0(_raw_type.isVoid)
is_bool = bases.def_f0(_raw_type.isBool)
is_int8 = bases.def_f0(_raw_type.isInt8)
is_int16 = bases.def_f0(_raw_type.isInt16)
is_int32 = bases.def_f0(_raw_type.isInt32)
is_int64 = bases.def_f0(_raw_type.isInt64)
is_uint8 = bases.def_f0(_raw_type.isUint8)
is_uint16 = bases.def_f0(_raw_type.isUint16)
is_uint32 = bases.def_f0(_raw_type.isUint32)
is_uint64 = bases.def_f0(_raw_type.isUint64)
is_float32 = bases.def_f0(_raw_type.isFloat32)
is_float64 = bases.def_f0(_raw_type.isFloat64)
is_text = bases.def_f0(_raw_type.isText)
is_data = bases.def_f0(_raw_type.isData)
is_list = bases.def_f0(_raw_type.isList)
is_enum = bases.def_f0(_raw_type.isEnum)
is_struct = bases.def_f0(_raw_type.isStruct)
is_interface = bases.def_f0(_raw_type.isInterface)
is_any_pointer = bases.def_f0(_raw_type.isAnyPointer)
@classes.memorizing_property
def text(self):
ASSERT.true(self._raw.isText())
return str(self._raw.getText(), 'utf-8')
class Annotation(bases.Base):
_raw_type = _capnp.schema.Annotation
__repr__ = classes.make_repr('id={self.id} value={self.value}')
id = bases.def_p(_raw_type.getId)
value = bases.def_mp('value', _to_value, _raw_type.getValue)
#
# C++ ``capnp`` namespace types.
#
# Use this to work around cyclic reference to ``Type``.
def _to_type(raw):
return Type(raw)
class Schema(bases.Base):
_raw_type = _capnp.Schema
__repr__ = classes.make_repr('proto={self.proto!r}')
proto = bases.def_mp('proto', _Schema.Node, _raw_type.getProto)
is_branded = bases.def_f0(_raw_type.isBranded)
# Use explicit functional form to work around cyclic reference in
# the ``asX`` methods below.
def as_struct(self):
return StructSchema(self._raw.asStruct())
def as_enum(self):
return EnumSchema(self._raw.asEnum())
def as_interface(self):
return InterfaceSchema(self._raw.asInterface())
def as_const(self):
return ConstSchema(self._raw.asConst())
short_display_name = bases.def_mp(
'short_display_name',
bases.to_str,
_raw_type.getShortDisplayName,
)
@classes.memorizing_property
def name(self):
for annotation in self.proto.annotations: # pylint: disable=no-member
if annotation.id == CXX_NAME:
name = annotation.value.text
break
else:
name = self.short_display_name
return ASSERT.not_none(name)
class StructSchema(Schema):
class Field(bases.Base):
_raw_type = _capnp.StructSchema.Field
__repr__ = classes.make_repr(
'proto={self.proto!r} index={self.index} type={self.type!r}'
)
proto = bases.def_mp('proto', _Schema.Field, _raw_type.getProto)
index = bases.def_p(_raw_type.getIndex)
type = bases.def_mp('type', _to_type, _raw_type.getType)
_raw_type = _capnp.StructSchema
@classes.memorizing_property
def fields(self):
return {
f.proto.name: f
for f in map(StructSchema.Field, self._raw.getFields())
}
@classes.memorizing_property
def union_fields(self):
return {
f.proto.name: f
for f in map(StructSchema.Field, self._raw.getUnionFields())
}
@classes.memorizing_property
def non_union_fields(self):
return {
f.proto.name: f
for f in map(StructSchema.Field, self._raw.getNonUnionFields())
}
class EnumSchema(Schema):
class Enumerant(bases.Base):
_raw_type = _capnp.EnumSchema.Enumerant
__repr__ = classes.make_repr(
'proto={self.proto!r} ordinal={self.ordinal} index={self.index}'
)
proto = bases.def_mp('proto', _Schema.Enumerant, _raw_type.getProto)
ordinal = bases.def_p(_raw_type.getOrdinal)
index = bases.def_p(_raw_type.getIndex)
_raw_type = _capnp.EnumSchema
@classes.memorizing_property
def enumerants(self):
return {
e.proto.name: e
for e in map(EnumSchema.Enumerant, self._raw.getEnumerants())
}
class InterfaceSchema(Schema):
_raw_type = _capnp.InterfaceSchema
class ConstSchema(Schema):
_raw_type = _capnp.ConstSchema
__repr__ = classes.make_repr('type={self.type!r}')
type = bases.def_mp('type', _to_type, _raw_type.getType)
class ListSchema(bases.Base):
_raw_type = _capnp.ListSchema
__repr__ = classes.make_repr('element_type={self.element_type!r}')
element_type = bases.def_mp('type', _to_type, _raw_type.getElementType)
class Type(bases.Base):
_raw_type = _capnp.Type
__repr__ = classes.make_repr('which={self.which}')
which = bases.def_p(_raw_type.which)
as_struct = bases.def_f0(StructSchema, _raw_type.asStruct)
as_enum = bases.def_f0(EnumSchema, _raw_type.asEnum)
as_interface = bases.def_f0(InterfaceSchema, _raw_type.asInterface)
as_list = bases.def_f0(ListSchema, _raw_type.asList)
is_void = bases.def_f0(_raw_type.isVoid)
is_bool = bases.def_f0(_raw_type.isBool)
is_int8 = bases.def_f0(_raw_type.isInt8)
is_int16 = bases.def_f0(_raw_type.isInt16)
is_int32 = bases.def_f0(_raw_type.isInt32)
is_int64 = bases.def_f0(_raw_type.isInt64)
is_uint8 = bases.def_f0(_raw_type.isUInt8)
is_uint16 = bases.def_f0(_raw_type.isUInt16)
is_uint32 = bases.def_f0(_raw_type.isUInt32)
is_uint64 = bases.def_f0(_raw_type.isUInt64)
is_float32 = bases.def_f0(_raw_type.isFloat32)
is_float64 = bases.def_f0(_raw_type.isFloat64)
is_text = bases.def_f0(_raw_type.isText)
is_data = bases.def_f0(_raw_type.isData)
is_list = bases.def_f0(_raw_type.isList)
is_enum = bases.def_f0(_raw_type.isEnum)
is_struct = bases.def_f0(_raw_type.isStruct)
is_interface = bases.def_f0(_raw_type.isInterface)
is_any_pointer = bases.def_f0(_raw_type.isAnyPointer)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/third-party/capnp/capnp/schemas.py",
"copies": "1",
"size": "13293",
"license": "mit",
"hash": 6981049713775603000,
"line_mean": 28.1513157895,
"line_max": 79,
"alpha_frac": 0.5884300008,
"autogenerated": false,
"ratio": 3.356818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44452481826181817,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"serialize",
"deserialize"
]
__doc__ = "Interaction between Optizelle with JSON formatted files"
import numpy
import Optizelle.Exception
from Optizelle.Properties import *
class Extendable(object):
"""Allows a function to be extended"""
def __init__(self, fn):
self.fns = {}
self.fn = fn
def register(self,fn,vector_type):
"""Extends the current function with fn. This function will only be
called if the first argument matches vector_type."""
# Check our arguments
checkFunction("(de)serialize",fn)
checkType("vector_type",vector_type)
# Register the function
self.fns[vector_type]=fn
def __call__(self,*args):
try:
return self.fns[type(args[0])](*args)
except:
return self.fn(*args)
@ Extendable
def serialize(x,name,iter):
"""Converts a vector to a JSON formatted string"""
raise Optizelle.Exception.t(
"The serialize function for the vector %s not defined." % str(x))
@ Extendable
def deserialize(x,x_json):
"""Converts a JSON formatted string to a vector"""
raise Optizelle.Exception.t(
"The deserialize function for the vector %s not defined." % str(x))
def serialize_Rm(x,name,iter):
"""Serializes a numpy array for the vector space Optizelle.Rm"""
# Create the json representation
x_json="[ "
for i in range(x.size):
x_json += str(x[i]) + ", "
x_json=x_json[0:-2]
x_json +=" ]"
return x_json
def deserialize_Rm(x,x_json):
"""Deserializes a numpy array for the vector space Optizelle.Rm"""
# Eliminate all whitespace
x_json="".join(x_json.split())
# Check if we're a vector
if x_json[0:1]!="[" or x_json[-1:]!="]":
raise TypeError("Attempted to deserialize a non-numpy.array vector.")
# Eliminate the initial and final delimiters
x_json=x_json[1:-1]
# Create a list of the numbers involved
x_json=x_json.split(",")
# Convert the strings to numbers
x_json=numpy.vectorize(lambda x:float(x))(x_json)
# Create an Optizelle.Rm vector
return numpy.array(x_json)
# Register the serialization routines for numpy arrays
serialize.register(serialize_Rm,numpy.ndarray)
deserialize.register(deserialize_Rm,numpy.ndarray)
| {
"repo_name": "OptimoJoe/Optizelle",
"path": "src/python/Optizelle/json/Serialization.py",
"copies": "1",
"size": "2316",
"license": "bsd-2-clause",
"hash": -5691979469940897000,
"line_mean": 26.5714285714,
"line_max": 77,
"alpha_frac": 0.6424870466,
"autogenerated": false,
"ratio": 3.519756838905775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4662243885505775,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Server',
]
import dataclasses
import logging
import nng
import nng.asyncs
from g1.bases import classes
from g1.bases.assertions import ASSERT
from . import utils
LOG = logging.getLogger(__name__)
class Server:
"""Expose an (asynchronous) application object on a socket.
This is a fairly simple server for providing remote method calls.
If application defines context management (i.e., ``__enter__``), it
will be called when server's context management is called. This
provides some sorts of server start/stop callbacks to application.
"""
def __init__(
self,
application,
request_type,
response_type,
wiredata,
*,
warning_level_exc_types=(),
invalid_request_error=None,
internal_server_error=None,
):
self._application = application
self._request_type = request_type
self._response_type = response_type
self._wiredata = wiredata
self._warning_level_exc_types = frozenset(warning_level_exc_types)
self._declared_error_types = utils.get_declared_error_types(
self._response_type
)
# For convenience, create socket before ``__enter__``.
self.socket = nng.asyncs.Socket(nng.Protocols.REP0)
# Prepared errors.
self._invalid_request_error_wire = self._lower_error_or_none(
invalid_request_error
)
self._internal_server_error_wire = self._lower_error_or_none(
internal_server_error
)
def _lower_error_or_none(self, error):
if error is None:
return None
ASSERT.isinstance(error, Exception)
error_name = ASSERT(
self._match_error_type(error), 'unknown error type: {!r}', error
)
return self._wiredata.to_lower(
self._response_type(
error=self._response_type.Error(**{error_name: error})
)
)
def _match_error_type(self, error):
# NOTE: We match the exact type rather than calling isinstance
# because error types could form a hierarchy, and isinstance
# might match a parent error type rather than a child type.
return self._declared_error_types.get(type(error))
__repr__ = classes.make_repr('{self.socket!r}')
def __enter__(self):
self.socket.__enter__()
return self
def __exit__(self, *args):
return self.socket.__exit__(*args)
async def serve(self):
"""Serve requests sequentially.
To serve requests concurrently, just spawn multiple tasks
running this.
"""
LOG.info('start server: %r', self)
try:
with nng.asyncs.Context(ASSERT.not_none(self.socket)) as context:
while True:
response = await self._serve(await context.recv())
if response is not None:
await context.send(response)
except nng.Errors.ECLOSED:
pass
LOG.info('stop server: %r', self)
def shutdown(self):
self.socket.close()
async def _serve(self, request):
LOG.debug('wire request: %r', request)
try:
request = self._wiredata.to_upper(self._request_type, request)
except Exception:
LOG.warning('to_upper error: %r', request, exc_info=True)
return self._invalid_request_error_wire
try:
method_name, method_args = utils.select(request.args)
except Exception:
LOG.warning('invalid request: %r', request, exc_info=True)
return self._invalid_request_error_wire
try:
method = getattr(self._application, method_name)
except AttributeError:
LOG.warning('unknown method: %s: %r', method_name, request)
return self._invalid_request_error_wire
try:
result = await method(
**{
field.name: getattr(method_args, field.name)
for field in dataclasses.fields(method_args)
}
)
except Exception as exc:
if type(exc) in self._warning_level_exc_types: # pylint: disable=unidiomatic-typecheck
log = LOG.warning
exc_info = False
else:
log = LOG.error
exc_info = True
log('server error: %r -> %r', request, exc, exc_info=exc_info)
response = self._make_error_response(exc)
if response is None:
return self._internal_server_error_wire
else:
response = self._response_type(
result=self._response_type.Result(**{method_name: result})
)
try:
response = self._wiredata.to_lower(response)
except Exception:
# It should be an error when a response object that is fully
# under our control cannot be lowered correctly.
LOG.exception('to_lower error: %r, %r', request, response)
return self._internal_server_error_wire
LOG.debug('wire response: %r', response)
return response
def _make_error_response(self, error):
error_name = self._match_error_type(error)
if error_name is None:
return None
return self._response_type(
error=self._response_type.Error(**{error_name: error})
)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/reqrep/servers.py",
"copies": "1",
"size": "5491",
"license": "mit",
"hash": -5754546632971647000,
"line_mean": 31.6845238095,
"line_max": 99,
"alpha_frac": 0.5760335094,
"autogenerated": false,
"ratio": 4.253292021688614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5329325531088613,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Session',
]
from . import bases
class Session:
"""Session.
For most use cases, this is your go-to choice. It supports local
cache, rate limit, retry, and priority (when given a priority
executor).
"""
def __init__(
self,
*,
executor=None,
num_pools=0,
num_connections_per_pool=0,
**kwargs,
):
self._base_session = bases.BaseSession(
executor=executor,
num_pools=num_pools,
num_connections_per_pool=num_connections_per_pool,
)
self._sender = bases.Sender(self._base_session.send, **kwargs)
@property
def headers(self):
return self._base_session.headers
@property
def cookies(self):
return self._base_session.cookies
def update_cookies(self, cookie_dict):
return self._base_session.update_cookies(cookie_dict)
async def send(self, request, **kwargs):
return await self._sender(request, **kwargs)
def send_blocking(self, request, **kwargs):
return self._base_session.send_blocking(request, **kwargs)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/http/clients/g1/http/clients/clients.py",
"copies": "1",
"size": "1129",
"license": "mit",
"hash": -6673328851724814000,
"line_mean": 23.5434782609,
"line_max": 70,
"alpha_frac": 0.5987599646,
"autogenerated": false,
"ratio": 3.9337979094076654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5032557874007665,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Session',
'SessionError',
'Stream',
'StreamClosed',
# HTTP/2 entities
'Request',
'Response',
# HTTP/2 entity properties
'Method',
'Scheme',
'Status',
# Helpers
'get_library_version',
'make_ssl_context',
]
from http import HTTPStatus as Status # Rename for consistency
import ctypes
import enum
import functools
import io
import logging
from curio import socket
from curio import ssl
import curio
from garage.assertions import ASSERT
from garage.asyncs import queues
from .nghttp2 import *
LOG = logging.getLogger(__name__)
py_object_p = ctypes.POINTER(ctypes.py_object)
def get_library_version():
version = nghttp2_version(0).contents
return {
'age': version.age,
'version_num': version.version_num,
'version_str': version.version_str.decode('utf-8'),
'proto_str': version.proto_str.decode('utf-8'),
}
def make_ssl_context(certfile, keyfile, *, client_authentication=False):
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certfile, keyfile)
if client_authentication:
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(cafile=certfile)
if ssl.HAS_ALPN:
ssl_context.set_alpn_protocols([NGHTTP2_PROTO_VERSION_ID])
if ssl.HAS_NPN:
ssl_context.set_npn_protocols([NGHTTP2_PROTO_VERSION_ID])
return ssl_context
class SessionError(Exception):
pass
class StreamClosed(SessionError):
pass
class Method(enum.Enum):
OPTIONS = b'OPTIONS'
GET = b'GET'
HEAD = b'HEAD'
POST = b'POST'
PUT = b'PUT'
DELETE = b'DELETE'
TRACE = b'TRACE'
CONNECT = b'CONNECT'
class Scheme(enum.Enum):
HTTP = b'http'
HTTPS = b'https'
class Session:
"""Represent an HTTP/2 session to the server.
You spawn a serve() task which will process the HTTP/2 traffic, and
you interact with the serve() task via the public interface of the
Session object.
"""
INCOMING_BUFFER_SIZE = 65536 # TCP packet <= 64KB
MAX_CONCURRENT_STREAMS = 128
SETTINGS_TIMEOUT = 5 # Unit: seconds
def __init__(self, sock):
self._sock = sock
# Guard self._sendall()
self._lock = curio.Lock()
self._session = None # Own nghttp2_session object
self._user_data = None # Own `py_object(self)`
self._streams = {} # Own Stream objects
self._stream_queue = queues.Queue()
# Set to non-None to start settings timer
self._settings_timeout = None
# Track the current callback for better logging
self._current_callback = None
# For PushPromise
self._scheme = self._guess_scheme(self._sock)
self._host = self._sock.getsockname()[0].encode('ascii')
@property
def _id(self):
if self._session is not None:
return hex(ctypes.addressof(self._session.contents))
else:
return None
@staticmethod
def _guess_scheme(sock):
try:
sock.context
except AttributeError:
return Scheme.HTTP
else:
return Scheme.HTTPS
def _log(self, logger, message, *args):
logger('session=%s: %s: ' + message,
self._id, self._current_callback or '?', *args)
_debug = functools.partialmethod(_log, LOG.debug)
_info = functools.partialmethod(_log, LOG.info)
_warning = functools.partialmethod(_log, LOG.warning)
async def serve(self):
if self._session is not None:
raise SessionError('session is already active: %s' % self._id)
# It is said that when many clients rapidly connect then close,
# getpeername might raise "Transport endpoint is not connected"
# error (I cannot reproduce this locally, but I do see this sort
# of issue on the production server).
try:
peer_name = self._sock.getpeername()
except OSError as exc:
LOG.debug('connection has already been closed: %r', exc)
await self._sock.close()
return
# Create nghttp2_session object
self._session, self._user_data = self._make_session()
LOG.debug('session=%s: create %s session for client: %s',
self._id, self._scheme.name, peer_name)
try:
# Disable Nagle algorithm
self._sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Set SETTINGS frame
settings = (nghttp2_settings_entry * 2)()
settings[0].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS
settings[0].value = self.MAX_CONCURRENT_STREAMS
settings[1].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE
settings[1].value = NGHTTP2_INITIAL_WINDOW_SIZE
nghttp2_submit_settings(
self._session, NGHTTP2_FLAG_NONE, settings, len(settings))
# Start serving!
error_code = NGHTTP2_NO_ERROR
try:
while True:
async with curio.timeout_after(self._settings_timeout):
if not await self._serve_tick():
break
except curio.TaskTimeout:
LOG.debug('session=%s: settings timeout', self._id)
error_code = NGHTTP2_SETTINGS_TIMEOUT
# Graceful exit
for stream in self._streams.values():
stream._on_close(NGHTTP2_NO_ERROR)
nghttp2_session_terminate_session(self._session, error_code)
await self._sendall()
finally:
LOG.debug('session=%s: destroy session', self._id)
nghttp2_session_del(self._session)
# Disown objects
self._session = None
self._user_data = None
self._streams.clear()
self._stream_queue.close()
await self._sock.close()
async def _serve_tick(self):
try:
data = await self._sock.recv(self.INCOMING_BUFFER_SIZE)
except (BrokenPipeError, ConnectionResetError):
return False
except OSError as exc:
LOG.warning('session=%s: %r', self._id, exc)
return False
LOG.debug('session=%s: recv %d bytes', self._id, len(data))
if not data:
LOG.debug('session=%s: connection is closed', self._id)
return False
try:
rc = nghttp2_session_mem_recv(self._session, data, len(data))
except Nghttp2Error as exc:
if exc.error_code == NGHTTP2_ERR_BAD_CLIENT_MAGIC:
LOG.debug('session=%s: bad client magic', self._id)
return False
raise
if rc != len(data):
# In the current implementation, nghttp2_session_mem_recv
# always tries to processes all input data normally.
raise SessionError(
'expect nghttp2_session_mem_recv to process %d bytes but only %d' %
(len(data), rc))
if not await self._sendall():
LOG.debug('session=%s: bye!', self._id)
return False
return True
def _make_session(self):
session = ctypes.POINTER(nghttp2_session)()
# You should own user_data to prevent it from being garbage
# collected
user_data = ctypes.py_object(self)
callbacks = ctypes.POINTER(nghttp2_session_callbacks)()
nghttp2_session_callbacks_new(ctypes.byref(callbacks))
try:
nghttp2_session_callbacks_set_on_frame_recv_callback(
callbacks, self._on_frame_recv)
nghttp2_session_callbacks_set_on_data_chunk_recv_callback(
callbacks, self._on_data_chunk_recv)
nghttp2_session_callbacks_set_on_frame_send_callback(
callbacks, self._on_frame_send)
nghttp2_session_callbacks_set_on_frame_not_send_callback(
callbacks, self._on_frame_not_send)
nghttp2_session_callbacks_set_on_stream_close_callback(
callbacks, self._on_stream_close)
nghttp2_session_callbacks_set_on_begin_headers_callback(
callbacks, self._on_begin_headers)
nghttp2_session_callbacks_set_on_header_callback(
callbacks, self._on_header)
nghttp2_session_server_new(
ctypes.byref(session),
callbacks,
_addrof(user_data),
)
return session, user_data
finally:
nghttp2_session_callbacks_del(callbacks)
async def _sendall(self):
async with self._lock:
return await self._sendall_impl()
async def _sendall_impl(self):
ASSERT.not_none(self._session)
buffers = []
total_length = 0
while True:
buffer = ctypes.c_void_p()
length = nghttp2_session_mem_send(
self._session, ctypes.byref(buffer))
if length == 0:
break
buffers.append(ctypes.string_at(buffer, length))
total_length += length
LOG.debug('session=%s: send %d bytes from %d parts',
self._id, total_length, len(buffers))
# Unfortunately SSLSocket disallow scatter/gather sendmsg.
try:
await self._sock.sendall(b''.join(buffers))
except (BrokenPipeError, ConnectionResetError):
return False
except OSError as exc:
LOG.warning('session=%s: %r', self._id, exc)
return False
return (nghttp2_session_want_read(self._session) != 0 or
nghttp2_session_want_write(self._session) != 0)
class _CallbackReturn(Exception):
def __init__(self, code):
super().__init__()
self.code = code
def declare_callback(c_func_signature):
def wrap(py_func):
def trampoline(session, *args):
try:
self = ctypes.cast(args[-1], py_object_p).contents.value
# Callbacks should not be nested
ASSERT.none(self._current_callback)
self._current_callback = py_func.__name__
try:
return py_func(self, session, *args[:-1])
finally:
self._current_callback = None
except Session._CallbackReturn as ret:
return ret.code
except Exception:
LOG.exception('session=0x%x: err when calling %s',
ctypes.addressof(session.contents),
py_func.__name__)
return NGHTTP2_ERR_CALLBACK_FAILURE
return c_func_signature(trampoline)
return wrap
@declare_callback(nghttp2_on_frame_recv_callback)
def _on_frame_recv(self, session, frame):
frame = frame.contents
self._debug('type=%d, stream=%d', frame.hd.type, frame.hd.stream_id)
if (frame.hd.type == NGHTTP2_SETTINGS and
(frame.hd.flags & NGHTTP2_FLAG_ACK) != 0):
self._debug('clear settings timeout')
self._settings_timeout = None
if (frame.hd.type == NGHTTP2_HEADERS and
frame.headers.cat == NGHTTP2_HCAT_REQUEST and
(frame.hd.flags & NGHTTP2_FLAG_END_STREAM) != 0):
stream = self._get_stream(frame.hd.stream_id)
if stream._on_request_done():
self._stream_queue.put_nowait(stream)
else:
self._rst_stream_if_not_closed(frame.hd.stream_id)
if (frame.hd.type == NGHTTP2_DATA and
(frame.hd.flags & NGHTTP2_FLAG_END_STREAM) != 0):
stream = self._get_stream(frame.hd.stream_id)
if stream._on_request_done():
self._stream_queue.put_nowait(stream)
else:
self._rst_stream_if_not_closed(frame.hd.stream_id)
return 0
@declare_callback(nghttp2_on_data_chunk_recv_callback)
def _on_data_chunk_recv(self, session, flags, stream_id, data, length):
self._debug('stream=%d, length=%d', stream_id, length)
self._get_stream(stream_id)._on_data(ctypes.string_at(data, length))
return 0
@declare_callback(nghttp2_on_frame_send_callback)
def _on_frame_send(self, session, frame):
frame = frame.contents
self._debug('type=%d, stream=%d', frame.hd.type, frame.hd.stream_id)
if frame.hd.type == NGHTTP2_SETTINGS:
if (frame.hd.flags & NGHTTP2_FLAG_ACK) != 0:
return 0
self._debug('set settings timeout: %f', self.SETTINGS_TIMEOUT)
self._settings_timeout = self.SETTINGS_TIMEOUT
if (frame.hd.type == NGHTTP2_HEADERS and
(frame.hd.flags & NGHTTP2_FLAG_END_STREAM) != 0):
return self._rst_stream_if_not_closed(frame.hd.stream_id)
if frame.hd.type == NGHTTP2_PUSH_PROMISE:
# For PUSH_PROMISE, send push response immediately
stream = self._get_stream(frame.push_promise.promised_stream_id)
stream._submit_response_nowait(stream.response)
return 0
@declare_callback(nghttp2_on_frame_not_send_callback)
def _on_frame_not_send(self, session, frame, error_code):
frame = frame.contents
self._debug('type=%d, stream=%d, error_code=%d',
frame.hd.type, frame.hd.stream_id, error_code)
if frame.hd.type == NGHTTP2_PUSH_PROMISE:
# We have to remove stream here; otherwise, it is not
# removed until session is terminated
self._warning('remove stream %d', frame.hd.stream_id)
self._get_stream(frame.hd.stream_id, remove=True)
return 0
@declare_callback(nghttp2_on_stream_close_callback)
def _on_stream_close(self, session, stream_id, error_code):
self._debug('stream=%d, error_code=%d', stream_id, error_code)
self._get_stream(stream_id, remove=True)._on_close(error_code)
return 0
@declare_callback(nghttp2_on_begin_headers_callback)
def _on_begin_headers(self, session, frame):
frame = frame.contents
self._debug('type=%d, stream=%d', frame.hd.type, frame.hd.stream_id)
if (frame.hd.type == NGHTTP2_HEADERS and
frame.headers.cat == NGHTTP2_HCAT_REQUEST):
self._make_stream(frame.hd.stream_id)
return 0
@declare_callback(nghttp2_on_header_callback)
def _on_header(
self, session, frame, name, namelen, value, valuelen, flags):
frame = frame.contents
name = ctypes.string_at(name, namelen)
values = ctypes.string_at(value, valuelen).split(b'\x00')
self._debug('type=%d, stream=%d, %r=%r',
frame.hd.type, frame.hd.stream_id, name, values)
self._get_stream(frame.hd.stream_id)._on_header(name, values)
return 0
@declare_callback(nghttp2_data_source_read_callback)
def _on_data_source_read(
self, session, stream_id, buf, length, data_flags, source):
self._debug('stream=%d', stream_id)
source = source.contents
read = ctypes.cast(source.ptr, py_object_p).contents.value
data, error_code = read(length)
if error_code != 0:
return error_code
num_read = len(data)
if num_read:
ctypes.memmove(buf, data, num_read)
if num_read == 0:
data_flags[0] = NGHTTP2_DATA_FLAG_EOF
self._rst_stream_if_not_closed(stream_id)
return num_read
del declare_callback
def _make_stream(self, stream_id):
if stream_id in self._streams:
self._warning('stream=%d: stream object exist', stream_id)
raise Session._CallbackReturn(0)
stream = Stream(self, stream_id)
self._streams[stream_id] = stream
return stream
def _get_stream(self, stream_id, *, remove=False):
try:
if remove:
return self._streams.pop(stream_id)
else:
return self._streams[stream_id]
except KeyError:
self._warning('stream=%d: no stream object', stream_id)
raise Session._CallbackReturn(0) from None
def _rst_stream(self, stream_id, error_code=NGHTTP2_INTERNAL_ERROR):
self._debug('stream=%d: rst_stream due to %d', stream_id, error_code)
return nghttp2_submit_rst_stream(
self._session, NGHTTP2_FLAG_NONE, stream_id, error_code)
def _rst_stream_if_not_closed(self, stream_id):
rc = nghttp2_session_get_stream_remote_close(self._session, stream_id)
if rc == 0:
return self._rst_stream(stream_id, NGHTTP2_NO_ERROR)
return 0
async def next_stream(self):
"""Return next stream or None when the session is closed."""
try:
return await self._stream_queue.get()
except queues.Closed:
return None
def __aiter__(self):
return self
async def __anext__(self):
try:
return await self._stream_queue.get()
except queues.Closed:
raise StopAsyncIteration from None
class Stream:
"""Represent HTTP/2 stream."""
def __init__(self, session, stream_id):
# Store a copy of session ID so that we may print stream even
# after the session is clsoed.
self._session_id = session._id
self._id = stream_id
self._session = session # Cyclic reference :(
self.request = None
self._headers = []
self._data_chunks = []
self.response = None # Own response
def __str__(self):
return '<Stream session=%s stream=%d>' % (self._session_id, self._id)
# For these callbacks (the `_on_X` methods), Session should not call
# them after the stream is closed; otherwise it is a bug, and thus
# we raise AssertionError.
def _on_header(self, name, values):
ASSERT.not_none(self._session)
ASSERT.none(self.request)
for value in values:
self._headers.append((name, value))
def _on_data(self, data):
ASSERT.not_none(self._session)
ASSERT.none(self.request)
self._data_chunks.append(data)
def _on_request_done(self):
ASSERT.not_none(self._session)
ASSERT.none(self.request)
if self._data_chunks:
body = b''.join(self._data_chunks)
else:
body = None
try:
self.request = Request._make(self._headers, body)
except Exception as exc:
LOG.warning('invalid request headers: %r', exc)
return False
else:
return True
finally:
del self._headers
del self._data_chunks
def _on_close(self, error_code):
ASSERT.not_none(self._session)
LOG.debug('%s: close due to %d', self, error_code)
self._session = None # Break cycle
# For the submit_X methods below, it is possible that that are
# called after the stream is closed; thus we throw StreamClosed.
def _ensure_not_closed(self):
if self._session is None:
raise StreamClosed
# Non-blocking version of submit() that should be called in the
# Session object's callback functions.
def _submit_response_nowait(self, response):
ASSERT.in_(self.response, (None, response))
# Assign response before _ensure_not_closed so that even if the
# stream is closed, upper layer may still have access to the
# response object.
self.response = response
self._ensure_not_closed()
LOG.debug('%s: submit response', self)
owners = []
nva, nvlen = response._make_headers(self._session, owners)
try:
nghttp2_submit_response(
self._session._session,
self._id,
nva, nvlen,
response._make_data_provider_ptr(),
)
except Nghttp2Error:
self._session._rst_stream(self._id)
raise
async def submit_response(self, response):
"""Send response to client."""
self._submit_response_nowait(response)
await self._session._sendall()
async def submit_push_promise(self, request, response):
"""Push resource to client.
Note that this must be used before submit().
"""
self._ensure_not_closed()
LOG.debug('%s: submit push promise', self)
owners = []
nva, nvlen = request._make_headers(self._session, owners)
promised_stream_id = nghttp2_submit_push_promise(
self._session._session,
NGHTTP2_FLAG_NONE,
self._id,
nva, nvlen,
None,
)
LOG.debug('%s: push promise stream: %d', self, promised_stream_id)
promised_stream = self._session._make_stream(promised_stream_id)
promised_stream.response = response
await self._session._sendall()
async def submit_rst_stream(self, error_code=NGHTTP2_INTERNAL_ERROR):
self._ensure_not_closed()
self._session._rst_stream(self._id, error_code)
await self._session._sendall()
class Buffer:
"""Response body buffer."""
def __init__(self, stream):
self._stream = stream # Cyclic reference :(
self._data_chunks = []
self._deferred = False
self._aborted = False
self._closed = False
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, *_):
if exc_type:
await self.abort()
else:
await self.close()
def _read(self, length):
if self._aborted:
return b'', NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE
elif not self._data_chunks:
if self._closed:
return b'', 0
else:
self._deferred = True
return b'', NGHTTP2_ERR_DEFERRED
elif length >= len(self._data_chunks[0]):
return bytes(self._data_chunks.pop(0)), 0
else:
data = self._data_chunks[0][:length]
self._data_chunks[0] = self._data_chunks[0][length:]
return bytes(data), 0
async def write(self, data):
ASSERT(
not self._aborted and not self._closed,
'expect Buffer state: not %r and not %r == True',
self._aborted, self._closed,
)
if data:
self._data_chunks.append(memoryview(data))
await self._send()
return len(data)
# Note that while Session.serve() will continue sending data to
# the client after buffer is aborted or closed, we still need to
# call self._send() in abort() and close() since Session.serve()
# could be blocked on socket.recv() and make no progress.
async def abort(self):
ASSERT(
not self._aborted and not self._closed,
'expect Buffer state: not %r and not %r == True',
self._aborted, self._closed,
)
self._aborted = True
await self._send()
self._stream = None # Break cycle
async def close(self):
ASSERT(
not self._aborted and not self._closed,
'expect Buffer state: not %r and not %r == True',
self._aborted, self._closed,
)
self._closed = True
await self._send()
self._stream = None # Break cycle
async def _send(self):
if self._stream._session is None:
return # This stream was closed
if self._deferred:
nghttp2_session_resume_data(
self._stream._session._session, self._stream._id)
self._deferred = False
await self._stream._session._sendall()
def make_buffer(self):
return self.Buffer(self)
class Entity:
def _make_headers(self, session, owners):
nvlen = self._get_num_headers()
nva = (nghttp2_nv * nvlen)()
for nv, (name, value) in zip(nva, self._iter_headers(session)):
self._set_nv(nv, name, value, owners)
return nva, nvlen
def _get_num_headers(self):
raise NotImplementedError
def _iter_headers(self, session):
raise NotImplementedError
@staticmethod
def _set_nv(nv, name, value, owners):
nv.name = Entity._bytes_to_void_ptr(name, owners)
nv.namelen = len(name)
nv.value = Entity._bytes_to_void_ptr(value, owners)
nv.valuelen = len(value)
nv.flags = NGHTTP2_NV_FLAG_NONE
@staticmethod
def _bytes_to_void_ptr(byte_string, owners):
buffer = ctypes.create_string_buffer(byte_string, len(byte_string))
owners.append(buffer)
return _addrof(buffer)
class Request(Entity):
@classmethod
def _make(cls, headers, body):
kwargs = {}
extra_headers = []
for name, value in headers:
if name == b':method':
kwargs['method'] = Method(value)
elif name == b':scheme':
kwargs['scheme'] = Scheme(value)
elif name == b':authority':
kwargs['authority'] = value
elif name.lower() == b'host' and 'authority' not in kwargs:
# If this request is translated from a HTTP/1 request by
# nghttpx, the ':authority' header might be omitted. I
# do not know whether this is standard conforming, but
# this is what is implemented in nghttpx; check
# Http2DownstreamConnection::push_request_headers in
# shrpx_http2_downstream_connection.cc, either one of
# :authority or host is set, but not both.
kwargs['authority'] = value
elif name == b':path':
kwargs['path'] = value
else:
extra_headers.append((name, value))
if len(kwargs) != 4:
raise ValueError('miss HTTP/2 headers: %r' % headers)
return cls(headers=extra_headers, body=body, **kwargs)
def __init__(self, *,
method=Method.GET,
scheme=None,
authority=None,
path,
headers=None,
body=None):
self.method = method
self.scheme = scheme
self.authority = authority
self.path = path
self.headers = headers or []
self.body = body
def _get_num_headers(self):
# Extra four for method, scheme, authority, and path
return 4 + len(self.headers)
def _iter_headers(self, session):
if session:
ASSERT.not_none(session._scheme)
ASSERT.not_none(session._host)
yield (b':method', self.method.value)
yield (b':scheme', (self.scheme or session._scheme).value)
yield (b':authority', self.authority or session._host)
yield (b':path', self.path)
yield from self.headers
def iter_headers(self):
ASSERT.not_none(self.scheme)
ASSERT.not_none(self.authority)
yield from self._iter_headers(None)
class Response(Entity):
def __init__(self, *, status=Status.OK, headers=None, body=None):
self.status = status
self.headers = headers or []
self.body = body
self._owners = []
def _get_num_headers(self):
# Extra one for status
return 1 + len(self.headers)
def _iter_headers(self, _):
yield (b':status', b'%d' % self.status)
yield from self.headers
def _make_data_provider_ptr(self):
if not self.body:
return None
if isinstance(self.body, bytes):
buffer = io.BytesIO(self.body)
read = lambda length: (buffer.read(length), 0)
elif isinstance(self.body, Stream.Buffer):
read = self.body._read
else:
raise TypeError('body is neither bytes nor Buffer: %r' % self.body)
read = ctypes.py_object(read)
self._owners.append(read)
provider = nghttp2_data_provider()
provider.read_callback = Session._on_data_source_read
provider.source.ptr = _addrof(read)
return ctypes.byref(provider)
def iter_headers(self):
yield from self._iter_headers(None)
def _addrof(obj):
return ctypes.cast(ctypes.byref(obj), ctypes.c_void_p)
| {
"repo_name": "clchiou/garage",
"path": "py/http2/http2/__init__.py",
"copies": "1",
"size": "28903",
"license": "mit",
"hash": 7926566957729146000,
"line_mean": 33.4493444577,
"line_max": 83,
"alpha_frac": 0.5701484275,
"autogenerated": false,
"ratio": 3.940959912735206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00003439083962709748,
"num_lines": 839
} |
__all__ = [
'set_storage',
'scalar',
]
import core
dtypes = ("float", "double", "int32", "int64")
def IM(dir, mode="read", msecs=500):
im = core.Im()
READ = "read"
WRITE = "write"
if mode == READ:
im.start_read_service(dir, msecs)
else:
im.start_write_service(dir, msecs)
return im
class _Scalar(object):
'''
Python syntax wrapper for the core.ScalarHelper object.
'''
def __init__(self, core_object):
self._core_object = core_object
def add(self, id, vs):
'''
add a scalar record
:param id: int
id in the x-corrdinate
:param vs: list
values
:return: None
'''
self._core_object.add_record(id, vs)
def set_captions(self, cs):
'''
set the captions, one caption for one line.
:param cs: list of str
:return: None
'''
self._core_object.set_captions(cs)
@property
def captions(self):
return self._core_object.get_captions()
@property
def records(self):
'''
get all the records, format like
[
[0.1, 0.2], # first record
[0.2, 0.3], # second record
# ...
]
:return: list of list
'''
return self._core_object.get_records()
@property
def ids(self):
'''
get all the ids for the records
:return: list of int
'''
return self._core_object.get_ids()
@property
def timestamps(self):
'''
get all the timestamps for the records
:return: list of int
'''
return self._core_object.get_timestamps()
@property
def size(self):
return self._core_object.get_record_size()
def scalar(im, tag, dtype='float'):
'''
create a scalar component.
:param tag: str
name of this component.
:param dtype: string
the data type that will be used in underlying storage.
:return: object of core.Tablet
'''
assert dtype in dtypes, "invalid dtype(%s), should be one of %s" % (
dtype, str(dtypes))
tablet = im.add_tablet(tag, -1)
dtype2obj = {
'float': tablet.as_float_scalar,
'double': tablet.as_double_scalar,
'int32': tablet.as_int32_scalar,
'int64': tablet.as_int64_scalar,
}
obj = dtype2obj[dtype](im)
return _Scalar(obj)
| {
"repo_name": "VisualDL/VisualDL",
"path": "visualdl/python/summary.py",
"copies": "1",
"size": "2418",
"license": "apache-2.0",
"hash": -5370777447925581000,
"line_mean": 22.0285714286,
"line_max": 72,
"alpha_frac": 0.5421836228,
"autogenerated": false,
"ratio": 3.680365296803653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47225489196036524,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'SGeMSGridReader',
'WriteImageDataToSGeMS',
]
__displayname__ = 'SGeMS File I/O'
import re
import numpy as np
import pandas as pd
import vtk
from .. import _helpers, interface
from ..base import WriterBase
from .gslib import GSLibReader
class SGeMSGridReader(GSLibReader):
"""Generates ``vtkImageData`` from the uniform grid defined in the inout
file in the SGeMS grid format. This format is simply the GSLIB format where
the header line defines the dimensions of the uniform grid.
"""
__displayname__ = 'SGeMS Grid Reader'
__category__ = 'reader'
extensions = GSLibReader.extensions + 'gslibgrid mtxset'
description = 'PVGeo: SGeMS Uniform Grid'
def __init__(self, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0), **kwargs):
GSLibReader.__init__(self, outputType='vtkImageData', **kwargs)
self.__extent = None
self.__origin = origin
self.__spacing = spacing
self.__mask = kwargs.get("mask", -9966699.0)
def __parse_extent(self, header):
regex = re.compile(r'\S\s\((\d+)x(\d+)x(\d+)\)')
dims = regex.findall(header)
if len(dims) < 1:
regex = re.compile(r'(\d+) (\d+) (\d+)')
dims = regex.findall(header)
if len(dims) < 1:
raise _helpers.PVGeoError('File not in proper SGeMS Grid fromat.')
dims = dims[0]
return int(dims[0]), int(dims[1]), int(dims[2])
def _read_extent(self):
"""Reads the input file for the SGeMS format to get output extents.
Computationally inexpensive method to discover whole output extent.
Return:
tuple :
This returns a tuple of the whole extent for the uniform
grid to be made of the input file (0,n1-1, 0,n2-1, 0,n3-1).
This output should be directly passed to set the whole output
extent.
"""
# Read first file... extent cannot vary with time
# TODO: make more efficient to only reader header of file
fileLines = self._get_file_contents(idx=0)
h = fileLines[0 + self.get_skip_rows()]
n1, n2, n3 = self.__parse_extent(h)
return (0, n1, 0, n2, 0, n3)
def _extract_header(self, content):
"""Internal helper to parse header info for the SGeMS file format"""
titles, content = GSLibReader._extract_header(self, content)
h = self.get_file_header()
try:
if self.__extent is None:
self.__extent = self.__parse_extent(h)
elif self.__extent != (self.__parse_extent(h)):
raise _helpers.PVGeoError('Grid dimensions change in file time series.')
except ValueError:
raise _helpers.PVGeoError('File not in proper SGeMS Grid fromat.')
return titles, content
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to get output data object for given time step.
Constructs the ``vtkImageData``
"""
# Get output:
output = vtk.vtkImageData.GetData(outInfo)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
if self.need_to_read():
self._read_up_front()
# Generate the data object
n1, n2, n3 = self.__extent
dx, dy, dz = self.__spacing
ox, oy, oz = self.__origin
output.SetDimensions(n1 + 1, n2 + 1, n3 + 1)
output.SetSpacing(dx, dy, dz)
output.SetOrigin(ox, oy, oz)
# Use table generator and convert because its easy:
table = vtk.vtkTable()
df = self._get_raw_data(idx=i)
# Replace all masked values with NaN
df.replace(self.__mask, np.nan, inplace=True)
interface.data_frame_to_table(df, table)
# now get arrays from table and add to point data of pdo
for i in range(table.GetNumberOfColumns()):
output.GetCellData().AddArray(table.GetColumn(i))
del table
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set grid extents."""
# Call parent to handle time stuff
GSLibReader.RequestInformation(self, request, inInfo, outInfo)
# Now set whole output extent
ext = self._read_extent()
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
return 1
def set_spacing(self, dx, dy, dz):
"""Set the spacing for each axial direction"""
spac = (dx, dy, dz)
if self.__spacing != spac:
self.__spacing = spac
self.Modified(read_again=False)
def set_origin(self, ox, oy, oz):
"""Set the origin corner of the grid"""
origin = (ox, oy, oz)
if self.__origin != origin:
self.__origin = origin
self.Modified(read_again=False)
class WriteImageDataToSGeMS(WriterBase):
"""Writes a ``vtkImageData`` object to the SGeMS uniform grid format.
This writer can only handle point data. Note that this will only handle
CellData as that is convention with SGeMS.
"""
__displayname__ = 'Write ``vtkImageData`` To SGeMS Grid Format'
__category__ = 'writer'
def __init__(self, inputType='vtkImageData'):
WriterBase.__init__(self, inputType=inputType, ext='SGeMS')
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out the input ``vtkImage`` data to the SGeMS file format"""
# Get the input data object
grd = input_data_object
# Get grid dimensions and minus one becuase this defines nodes
nx, ny, nz = grd.GetDimensions()
nx -= 1
ny -= 1
nz -= 1
numArrs = grd.GetCellData().GetNumberOfArrays()
arrs = []
titles = []
# Get data arrays
for i in range(numArrs):
vtkarr = grd.GetCellData().GetArray(i)
arrs.append(interface.convert_array(vtkarr))
titles.append(vtkarr.GetName())
datanames = '\n'.join(titles)
df = pd.DataFrame(np.array(arrs).T)
with open(filename, 'w') as f:
f.write('%d %d %d\n' % (nx, ny, nz))
f.write('%d\n' % len(titles))
f.write(datanames)
f.write('\n')
df.to_csv(
f, sep=' ', header=None, index=False, float_format=self.get_format()
)
return 1
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/gslib/sgems.py",
"copies": "1",
"size": "6547",
"license": "bsd-3-clause",
"hash": -1227308266124074500,
"line_mean": 35.1712707182,
"line_max": 88,
"alpha_frac": 0.5904994654,
"autogenerated": false,
"ratio": 3.6822272215973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47727266869973,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'SignalQueue',
'uninstall_handlers',
]
import contextlib
import errno
import logging
import os
import signal
import struct
import threading
from garage.assertions import ASSERT
from garage.collections import SingletonMeta
from . import queues
from . import utils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def uninstall_handlers(*signums):
"""Context for uninstalling default signal handlers."""
with contextlib.ExitStack() as stack:
for signum in signums:
handler = signal.signal(signum, null_handler)
stack.callback(signal.signal, signum, handler)
yield
# You can't install SIG_IGN - that will even disable signal delivery to
# the wakeup fd. Instead, you need a null handler.
def null_handler(signum, frame):
pass
class SignalQueue(metaclass=SingletonMeta):
THREAD_NAME = 'signals'
CAPACITY = 64
def __init__(self, capacity=None):
current_thread = threading.current_thread()
ASSERT(
current_thread.ident == threading.main_thread().ident,
'expect signal queue be initialized in the main thread, not %r',
current_thread,
)
if capacity is None:
capacity = self.CAPACITY
stack = contextlib.ExitStack()
try:
self._queue = queues.Queue(capacity=capacity)
stack.callback(self._queue.close)
rfd, wfd = os.pipe2(os.O_CLOEXEC)
stack.callback(os.close, rfd)
stack.callback(os.close, wfd)
os.set_blocking(wfd, False)
last_fd = signal.set_wakeup_fd(wfd)
stack.callback(restore_wakeup_fd, last_fd, wfd)
ASSERT(
last_fd == -1,
'expect no signal wakeup fd being set: %d', last_fd,
)
thread = threading.Thread(
target=receive_signals,
name=self.THREAD_NAME,
args=(rfd, self._queue),
daemon=True,
)
thread.start()
utils.set_pthread_name(thread, self.THREAD_NAME)
except Exception:
stack.close()
raise
self._stack = stack
def __bool__(self):
return bool(self._queue)
def __len__(self):
return len(self._queue)
def is_full(self):
return self._queue.is_full()
def is_closed(self):
return self._queue.is_closed()
def close(self, graceful=True):
items = self._queue.close(graceful=graceful)
self._stack.close()
return items
def get(self, block=True, timeout=None):
return self._queue.get(block=block, timeout=timeout)
def restore_wakeup_fd(restore_fd, expect_fd):
if threading.get_ident() != threading.main_thread().ident:
LOG.error(
'cannot restore signal wakeup fd in non-main thread: fd=%d',
restore_fd,
)
return
last_fd = signal.set_wakeup_fd(restore_fd)
if last_fd != expect_fd:
LOG.error(
'expect last signal wakeup fd to be %d, not %d',
expect_fd, last_fd,
)
def receive_signals(rfd, queue):
LOG.info('start receiving signals')
try:
while not queue.is_closed():
try:
data = os.read(rfd, 64)
except OSError as e:
if e.errno != errno.EBADF:
LOG.exception('cannot read signals: fd=%d', rfd)
break
signums = struct.unpack('%uB' % len(data), data)
for signum in signums:
try:
signum = signal.Signals(signum)
except ValueError:
LOG.error('unrecognizable signum: %d', signum)
try:
queue.put(signum, block=False)
except queues.Full:
LOG.error('drop signal: %s', signum)
except queues.Closed:
LOG.warning('drop signal and all the rest: %s', signum)
break
except Exception:
LOG.exception('encounter unexpected error')
finally:
# To notify the other side that I am dead.
queue.close()
LOG.info('exit')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/threads/signals.py",
"copies": "1",
"size": "4283",
"license": "mit",
"hash": -7653047591992971000,
"line_mean": 24.494047619,
"line_max": 76,
"alpha_frac": 0.5605883726,
"autogenerated": false,
"ratio": 4.0790476190476195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 168
} |
__all__ = [
'SignalSource',
]
import signal
import socket
import threading
from g1.bases.assertions import ASSERT
from g1.bases.classes import SingletonMeta
from . import adapters
class SignalSource(metaclass=SingletonMeta):
"""Signal queue.
Python runtime implements a UNIX signal handler that writes signal
number to a file descriptor (which is globally unique, by the way).
``SignalSource`` wraps this feature.
NOTE: This class is a singleton (calling ``SignalSource()`` returns
the same instance). We make this design choice because UNIX signal
handling is always strange and global.
"""
def __init__(self):
self._sock_r = self._sock_w = self._wakeup_fd = None
self._handlers = {}
def __enter__(self):
# ``set_wakeup_fd`` can only be called from the main thread.
ASSERT.is_(threading.current_thread(), threading.main_thread())
# Disallow nested use; ``SignalSource`` is a singleton and is
# intended to be used as such.
ASSERT.none(self._wakeup_fd)
sock_r, self._sock_w = socket.socketpair()
self._sock_r = adapters.SocketAdapter(sock_r)
self._sock_w.setblocking(False)
self._wakeup_fd = signal.set_wakeup_fd(self._sock_w.fileno())
return self
def __exit__(self, *_):
for signum in tuple(self._handlers):
self.disable(signum)
signal.set_wakeup_fd(self._wakeup_fd)
self._sock_r.close()
self._sock_w.close()
self._sock_r = self._sock_w = self._wakeup_fd = None
self._handlers.clear()
def enable(self, signum):
"""Enable receiving signal ``signum``."""
ASSERT.not_none(self._wakeup_fd)
# Disallow repeated enable; ``SignalSource`` is a singleton and
# is intended to be used as such.
ASSERT.not_in(signum, self._handlers)
# Register a dummy signal handler to ask Python to write the
# signal number to the wakeup file descriptor.
self._handlers[signum] = signal.signal(signum, _noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(signum, False)
def disable(self, signum):
"""Disable receiving signal ``signum``."""
ASSERT.not_none(self._wakeup_fd)
ASSERT.in_(signum, self._handlers)
signal.signal(signum, self._handlers.pop(signum))
# Should we also restore ``signal.siginterrupt``? But how?
async def get(self):
one_byte = (await self._sock_r.recv(1))[0]
return signal.Signals(one_byte) # pylint: disable=no-member
def _noop(*_):
pass
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/bases/g1/asyncs/bases/signals.py",
"copies": "1",
"size": "2630",
"license": "mit",
"hash": 1999074594452012000,
"line_mean": 33.1558441558,
"line_max": 71,
"alpha_frac": 0.6342205323,
"autogenerated": false,
"ratio": 3.7733142037302727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49075347360302723,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'SlackHookHandler',
'EmailHandler',
'get_logger',
'log_exception',
'get_timed_rotating_logger',
'get_timed_rotating_file_handler',
'get_slack_hook_handler',
'get_email_handler',
'set_root_logger',
]
import logging
import functools
import logging.handlers
import autos.notification.slack as slack
import autos.notification.email as email
DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
DEFAULT_HANDLER_LEVEL = 'NOTSET'
class SlackHookHandler(logging.Handler):
def __init__(self, url, username=None, channel=None):
logging.Handler.__init__(self)
self.url = url
self.username = username
self.channel = channel
self.hook = slack.IncomingWebhook(url=url)
def emit(self, record):
message = self.format(record)
self.hook.send(
text=message,
username=self.username,
channel=self.channel,
)
class EmailHandler(logging.Handler):
def __init__(
self,
send_from,
send_to,
subject,
username,
password,
**opts
):
logging.Handler.__init__(self)
self.send = functools.partial(
email.send_email,
send_from=send_from,
send_to=send_to,
subject=subject,
username=username,
password=password,
**opts
)
def emit(self, record):
message = self.format(record)
self.send(text=message)
def get_logger(name=None):
return logging.getLogger(name)
def log_exception(logger):
def actual_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
logger.exception('EXCEPTION_OCCURED')
raise
return wrapper
return actual_decorator
def get_timed_rotating_logger(
name,
filename,
level='INFO',
when='D',
backup_count=7,
log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
):
"""Create a timed rotating logger with the given name which write to filename.
:type name: str
:param name: Logger name.
:type filename: str
:param filename: Log file name.
:rtype: logging.Logger
:return: Logger instance.
"""
logger = get_logger(name)
logger.propagate = False
logger.setLevel(level)
formatter = logging.Formatter(log_format)
handler = logging.handlers.TimedRotatingFileHandler(
filename=filename,
when=when,
backupCount=backup_count,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def get_timed_rotating_file_handler(
filename,
level=DEFAULT_HANDLER_LEVEL,
when='D',
backup_count=7,
**opts
):
handler = logging.handlers.TimedRotatingFileHandler(
filename=filename,
when=when,
backupCount=backup_count,
**opts,
)
handler.setLevel(level)
return handler
def get_slack_hook_handler(
url,
level=DEFAULT_HANDLER_LEVEL,
**opts
):
handler = SlackHookHandler(url=url, **opts)
handler.setLevel(level)
return handler
def get_email_handler(
send_from,
username,
password,
level=DEFAULT_HANDLER_LEVEL,
**opts
):
handler = EmailHandler(
send_from=send_from,
username=username,
password=password,
**opts
)
handler.setLevel(level)
return handler
def set_root_logger(
*handlers,
level='INFO',
format=DEFAULT_LOG_FORMAT
):
logging.basicConfig(
level=level,
format=format,
handlers=handlers,
)
return get_logger()
| {
"repo_name": "hans-t/autos",
"path": "autos/utils/logging.py",
"copies": "1",
"size": "3895",
"license": "mit",
"hash": 4417282178819656700,
"line_mean": 20.7597765363,
"line_max": 82,
"alpha_frac": 0.5833119384,
"autogenerated": false,
"ratio": 4.027921406411582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5111233344811582,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Socket',
'device',
'terminate',
]
import curio
import curio.traps
from . import (
SocketBase,
errors,
terminate as _terminate,
)
from .constants import (
AF_SP,
AF_SP_RAW,
NN_DONTWAIT,
)
async def device(sock1, sock2=None):
"""Re-implement nn_device without threads."""
def test_fd(sock, fd_name):
try:
getattr(sock.options, fd_name)
except errors.ENOPROTOOPT:
return False
else:
return True
async def forward(s1, s2):
while True:
try:
with await s1.recvmsg() as message:
await s2.sendmsg(message)
except errors.EBADF:
break
errors.asserts(
sock1.options.nn_domain == AF_SP_RAW,
'expect raw socket: %r', sock1,
)
errors.asserts(
sock2 is None or sock2.options.nn_domain == AF_SP_RAW,
'expect raw socket: %r', sock2,
)
if sock2 is None:
await forward(sock1, sock1)
return
async with curio.TaskGroup() as group:
okay = False
if test_fd(sock1, 'nn_rcvfd') and test_fd(sock2, 'nn_sndfd'):
await group.spawn(forward(sock1, sock2))
okay = True
if test_fd(sock2, 'nn_rcvfd') and test_fd(sock1, 'nn_sndfd'):
await group.spawn(forward(sock2, sock1))
okay = True
if not okay:
raise AssertionError('incorrect direction: %r, %r', sock1, sock2)
await group.join()
#
# Note about the hack:
#
# After a file descriptor (specifically, nn_sndfd, and nn_rcvfd) is
# added to curio's event loop, it can't to detect when file descriptor
# is closed. As a result, __transmit will be blocked forever on waiting
# the file descriptor becoming readable.
#
# To address this issue, before we close the socket, we will get the
# curio kernel object, and mark the blocked tasks as ready manually.
#
async def terminate():
# HACK: Mark tasks as ready before close sockets.
kernel = await curio.traps._get_kernel()
# Make a copy before modify it.
items = tuple(kernel._selector.get_map().items())
for fd, key in items:
if isinstance(fd, Fd):
rtask, wtask = key.data
_mark_ready(kernel, rtask)
_mark_ready(kernel, wtask)
kernel._selector.unregister(fd)
# Now we may close sockets.
_terminate()
class Socket(SocketBase):
def __init__(self, *, domain=AF_SP, protocol=None, socket_fd=None):
super().__init__(domain=domain, protocol=protocol, socket_fd=socket_fd)
# Fields for tracking info for the close-socket hack.
self.__kernels_fds = [] # Allow duplications.
async def __aenter__(self):
return super().__enter__()
async def __aexit__(self, *exc_info):
return super().__exit__(*exc_info) # XXX: Would this block?
def close(self):
# HACK: Mark tasks as ready before close the socket.
for kernel, fd in self.__kernels_fds:
try:
key = kernel._selector.get_key(fd)
except KeyError:
continue
rtask, wtask = key.data
_mark_ready(kernel, rtask)
_mark_ready(kernel, wtask)
kernel._selector.unregister(fd)
# Now we may close the socket.
super().close()
async def send(self, message, size=None, flags=0):
return await self.__transmit(
self.options.nn_sndfd,
self._send,
(message, size, flags | NN_DONTWAIT),
)
async def recv(self, message=None, size=None, flags=0):
return await self.__transmit(
self.options.nn_rcvfd,
self._recv,
(message, size, flags | NN_DONTWAIT),
)
async def sendmsg(self, message, flags=0):
return await self.__transmit(
self.options.nn_sndfd,
self._sendmsg,
(message, flags | NN_DONTWAIT),
)
async def recvmsg(self, message=None, flags=0):
return await self.__transmit(
self.options.nn_rcvfd,
self._recvmsg,
(message, flags | NN_DONTWAIT),
)
async def __transmit(self, eventfd, transmit, args):
while True:
# It's closed while we were blocked.
if self.fd is None:
raise errors.EBADF
try:
return transmit(*args)
except errors.EAGAIN:
pass
# Wrap eventfd so that terminate() may find it.
eventfd = Fd(eventfd)
pair = (await curio.traps._get_kernel(), eventfd)
self.__kernels_fds.append(pair)
try:
await curio.traps._read_wait(eventfd)
finally:
self.__kernels_fds.remove(pair)
# A wrapper class for separating out "our" file descriptors.
class Fd(int):
pass
def _mark_ready(kernel, task):
if task is None:
return
kernel._ready.append(task)
task.next_value = None
task.next_exc = None
task.state = 'READY'
task.cancel_func = None
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/curio.py",
"copies": "1",
"size": "5176",
"license": "mit",
"hash": -28893090306039348,
"line_mean": 26.0994764398,
"line_max": 79,
"alpha_frac": 0.5654945904,
"autogenerated": false,
"ratio": 3.7808619430241053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9846356533424105,
"avg_score": 0,
"num_lines": 191
} |
__all__ = [
'Socket',
]
import asyncio
from . import SocketBase
from . import errors
from .constants import AF_SP, NN_DONTWAIT
class FileDescriptorManager:
def __init__(self, fd, cb, add_watcher, remove_watcher):
self.fd = fd
self.cb = cb
self.add_watcher = add_watcher
self.remove_watcher = remove_watcher
self.num_waiters = 0
def __enter__(self):
if self.num_waiters == 0:
self.add_watcher(self.fd, self.cb)
self.num_waiters += 1
def __exit__(self, *_):
self.num_waiters -= 1
if self.num_waiters == 0:
self.remove_watcher(self.fd)
class Socket(SocketBase):
def __init__(self, *, domain=AF_SP, protocol=None, socket_fd=None,
loop=None):
super().__init__(domain=domain, protocol=protocol, socket_fd=socket_fd)
self.__sndfd_ready = asyncio.Event(loop=loop)
self.__rcvfd_ready = asyncio.Event(loop=loop)
# Get sndfd/rcvfd lazily since not all protocols support both.
self.__sndfd_manager = None
self.__rcvfd_manager = None
self.__loop = loop or asyncio.get_event_loop()
def close(self):
if self.fd is None:
return
super().close()
# Wake up all waiters in send() and recv().
self.__sndfd_ready.set()
self.__rcvfd_ready.set()
async def __aenter__(self):
return super().__enter__()
async def __aexit__(self, *exc_info):
return super().__exit__(*exc_info) # XXX: Would this block?
async def send(self, message, size=None, flags=0):
if self.__sndfd_manager is None:
self.__sndfd_manager = FileDescriptorManager(
self.options.nn_sndfd,
self.__sndfd_ready.set,
self.__loop.add_reader,
self.__loop.remove_reader,
)
with self.__sndfd_manager:
return await self.__transmit(
self.__sndfd_ready,
self._send,
(message, size, flags | NN_DONTWAIT),
)
async def recv(self, message=None, size=None, flags=0):
if self.__rcvfd_manager is None:
self.__rcvfd_manager = FileDescriptorManager(
self.options.nn_rcvfd,
self.__rcvfd_ready.set,
self.__loop.add_reader,
self.__loop.remove_reader,
)
with self.__rcvfd_manager:
return await self.__transmit(
self.__rcvfd_ready,
self._recv,
(message, size, flags | NN_DONTWAIT),
)
async def __transmit(self, ready, transmit, args):
while True:
await ready.wait() # Many watiers could be waited at this point.
if self.fd is None:
# It's closed while we were blocked.
raise errors.EBADF
try:
return transmit(*args)
except errors.EAGAIN:
pass
ready.clear() # Wait for the next readiness event.
| {
"repo_name": "clchiou/garage",
"path": "py/nanomsg/nanomsg/asyncio.py",
"copies": "1",
"size": "3092",
"license": "mit",
"hash": 7563371100689004000,
"line_mean": 30.5510204082,
"line_max": 79,
"alpha_frac": 0.5352522639,
"autogenerated": false,
"ratio": 3.8601747815230962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48954270454230964,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'SocketServer',
]
import errno
import logging
from g1.asyncs.bases import servers
from g1.asyncs.bases import tasks
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class SocketServer:
def __init__(self, socket, handler, max_connections=0):
self._socket = socket
self._handler = handler
self._max_connections = max_connections
async def serve(self):
LOG.info('start server: %r', self._socket)
with self._socket:
if self._max_connections <= 0:
capacity = self._max_connections
else:
# +1 for the `_accept` task.
capacity = self._max_connections + 1
async with tasks.CompletionQueue(capacity) as queue:
await servers.supervise_server(
queue,
(queue.spawn(self._accept(queue)), ),
)
LOG.info('stop server: %r', self._socket)
async def _accept(self, queue):
while True:
if queue.is_full():
LOG.warning(
'handler task queue is full; '
'we cannot accept any new connections'
)
await queue.puttable()
try:
sock, addr = await self._socket.accept()
except OSError as exc:
if exc.errno == errno.EBADF:
LOG.info('server socket close: %r', self._socket)
break
raise
LOG.debug('serve client: %r', addr)
queue.spawn(self._handler(sock, addr))
def shutdown(self):
self._socket.close()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/networks/servers/g1/networks/servers/__init__.py",
"copies": "1",
"size": "1686",
"license": "mit",
"hash": 3545811214640131600,
"line_mean": 29.1071428571,
"line_max": 69,
"alpha_frac": 0.5225385528,
"autogenerated": false,
"ratio": 4.4136125654450264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5436151118245026,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'split',
'mkNonce',
'checkTimestamp',
]
from openid import cryptutil
from time import strptime, strftime, gmtime, time
from calendar import timegm
import string
NONCE_CHARS = string.ascii_letters + string.digits
# Keep nonces for five hours (allow five hours for the combination of
# request time and clock skew). This is probably way more than is
# necessary, but there is not much overhead in storing nonces.
SKEW = 60 * 60 * 5
time_fmt = '%Y-%m-%dT%H:%M:%SZ'
time_str_len = len('0000-00-00T00:00:00Z')
def split(nonce_string):
"""Extract a timestamp from the given nonce string
@param nonce_string: the nonce from which to extract the timestamp
@type nonce_string: str
@returns: A pair of a Unix timestamp and the salt characters
@returntype: (int, str)
@raises ValueError: if the nonce does not start with a correctly
formatted time string
"""
timestamp_str = nonce_string[:time_str_len]
try:
timestamp = timegm(strptime(timestamp_str, time_fmt))
except AssertionError: # Python 2.2
timestamp = -1
if timestamp < 0:
raise ValueError('time out of range')
return timestamp, nonce_string[time_str_len:]
def checkTimestamp(nonce_string, allowed_skew=SKEW, now=None):
"""Is the timestamp that is part of the specified nonce string
within the allowed clock-skew of the current time?
@param nonce_string: The nonce that is being checked
@type nonce_string: str
@param allowed_skew: How many seconds should be allowed for
completing the request, allowing for clock skew.
@type allowed_skew: int
@param now: The current time, as a Unix timestamp
@type now: int
@returntype: bool
@returns: Whether the timestamp is correctly formatted and within
the allowed skew of the current time.
"""
try:
stamp, _ = split(nonce_string)
except ValueError:
return False
else:
if now is None:
now = time()
# Time after which we should not use the nonce
past = now - allowed_skew
# Time that is too far in the future for us to allow
future = now + allowed_skew
# the stamp is not too far in the future and is not too far in
# the past
return past <= stamp <= future
def mkNonce(when=None):
"""Generate a nonce with the current timestamp
@param when: Unix timestamp representing the issue time of the
nonce. Defaults to the current time.
@type when: int
@returntype: str
@returns: A string that should be usable as a one-way nonce
@see: time
"""
salt = cryptutil.randomString(6, NONCE_CHARS)
if when is None:
t = gmtime()
else:
t = gmtime(when)
time_str = strftime(time_fmt, t)
return time_str + salt
| {
"repo_name": "harshita-gupta/Harvard-FRSEM-Catalog-2016-17",
"path": "flask/lib/python2.7/site-packages/openid/store/nonce.py",
"copies": "180",
"size": "2843",
"license": "mit",
"hash": 5846635125210840000,
"line_mean": 28.0102040816,
"line_max": 70,
"alpha_frac": 0.6581076328,
"autogenerated": false,
"ratio": 3.943134535367545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012504049238743116,
"num_lines": 98
} |
__all__ = [
'split',
'mkNonce',
'checkTimestamp',
]
from openid import cryptutil
from time import strptime, strftime, gmtime, time
from calendar import timegm
import string
NONCE_CHARS = string.ascii_letters + string.digits
# Keep nonces for five hours (allow five hours for the combination of
# request time and clock skew). This is probably way more than is
# necessary, but there is not much overhead in storing nonces.
SKEW = 60 * 60 * 5
time_fmt = '%Y-%m-%dT%H:%M:%SZ'
time_str_len = len('0000-00-00T00:00:00Z')
def split(nonce_string):
"""Extract a timestamp from the given nonce string
@param nonce_string: the nonce from which to extract the timestamp
@type nonce_string: str
@returns: A pair of a Unix timestamp and the salt characters
@returntype: (int, str)
@raises ValueError: if the nonce does not start with a correctly
formatted time string
"""
timestamp_str = nonce_string[:time_str_len]
try:
timestamp = timegm(strptime(timestamp_str, time_fmt))
except AssertionError: # Python 2.2
timestamp = -1
if timestamp < 0:
raise ValueError('time out of range')
return timestamp, nonce_string[time_str_len:]
def checkTimestamp(nonce_string, allowed_skew=SKEW, now=None):
"""Is the timestamp that is part of the specified nonce string
within the allowed clock-skew of the current time?
@param nonce_string: The nonce that is being checked
@type nonce_string: str
@param allowed_skew: How many seconds should be allowed for
completing the request, allowing for clock skew.
@type allowed_skew: int
@param now: The current time, as a Unix timestamp
@type now: int
@returntype: bool
@returns: Whether the timestamp is correctly formatted and within
the allowed skew of the current time.
"""
try:
stamp, _ = split(nonce_string)
except ValueError:
return False
else:
if now is None:
now = time()
# Time after which we should not use the nonce
past = now - allowed_skew
# Time that is too far in the future for us to allow
future = now + allowed_skew
# the stamp is not too far in the future and is not too far in
# the past
return past <= stamp <= future
def mkNonce(when=None):
"""Generate a nonce with the current timestamp
@param when: Unix timestamp representing the issue time of the
nonce. Defaults to the current time.
@type when: int
@returntype: str
@returns: A string that should be usable as a one-way nonce
@see: time
"""
salt = cryptutil.randomString(6, NONCE_CHARS)
if when is None:
t = gmtime()
else:
t = gmtime(when)
time_str = strftime(time_fmt, t)
return time_str + salt
| {
"repo_name": "arantebillywilson/python-snippets",
"path": "microblog/flask/lib/python3.5/site-packages/openid/store/nonce.py",
"copies": "5",
"size": "2843",
"license": "mit",
"hash": 1085158578616329100,
"line_mean": 27.1485148515,
"line_max": 70,
"alpha_frac": 0.6581076328,
"autogenerated": false,
"ratio": 3.9376731301939056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7095780762993906,
"avg_score": null,
"num_lines": null
} |
__all__ = (
"StatusLine",
)
def prepare_text(text, max_len, elide_string, elide_point):
stext, _, _ = str(text).partition("\n")
stext = stext.replace("\t", " ")
if len(stext) > max_len:
lead_len = int(0.5 + elide_point * max_len - len(elide_string))
return "%s%s%s" % (
stext[:lead_len],
elide_string,
stext[-max_len + lead_len + len(elide_string):]
)
return stext
class StatusLine(object):
def __init__(self, stream, line_width=78, elide_string="...", elide_point=0.33):
self._stream = stream
self._line_width = line_width
self._last_len = 0
self._elide_string = str(elide_string)
self._elide_point = elide_point
@property
def line_width(self):
return self._line_width
def clear(self):
self.set_text("")
self.set_text("")
def set_text(self, new_text):
text = prepare_text(new_text, self._line_width, self._elide_string, self._elide_point)
now_len = len(text)
self._stream.write("\r%s" % text)
if now_len < self._last_len:
self._stream.write(" " * (self._last_len - now_len))
self._stream.flush()
self._last_len = now_len
| {
"repo_name": "yellcorp/dupescan",
"path": "dupescan/console.py",
"copies": "1",
"size": "1267",
"license": "mit",
"hash": -7575415903441616000,
"line_mean": 27.1555555556,
"line_max": 94,
"alpha_frac": 0.5414364641,
"autogenerated": false,
"ratio": 3.1994949494949494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42409314135949494,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'StreamReader', 'StreamWriter', 'StreamReaderProtocol',
'open_connection', 'start_server')
import socket
import sys
import warnings
import weakref
if hasattr(socket, 'AF_UNIX'):
__all__ += ('open_unix_connection', 'start_unix_server')
from . import coroutines
from . import events
from . import exceptions
from . import format_helpers
from . import protocols
from .log import logger
from .tasks import sleep
_DEFAULT_LIMIT = 2 ** 16 # 64 KiB
async def open_connection(host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""A wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_connection(
lambda: protocol, host, port, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
async def start_server(client_connected_cb, host=None, port=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Start a socket server, call back for each client connected.
The first parameter, `client_connected_cb`, takes two parameters:
client_reader, client_writer. client_reader is a StreamReader
object, while client_writer is a StreamWriter object. This
parameter can either be a plain callback function or a coroutine;
if it is a coroutine, it will be automatically converted into a
Task.
The rest of the arguments are all the usual arguments to
loop.create_server() except protocol_factory; most common are
positional host and port, with various optional keyword arguments
following. The return value is the same as loop.create_server().
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
The return value is the same as loop.create_server(), i.e. a
Server object which can be used to stop the service.
"""
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return await loop.create_server(factory, host, port, **kwds)
if hasattr(socket, 'AF_UNIX'):
# UNIX Domain Sockets are supported on this platform
async def open_unix_connection(path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `open_connection` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_unix_connection(
lambda: protocol, path, **kwds)
writer = StreamWriter(transport, protocol, reader, loop)
return reader, writer
async def start_unix_server(client_connected_cb, path=None, *,
loop=None, limit=_DEFAULT_LIMIT, **kwds):
"""Similar to `start_server` but works with UNIX Domain Sockets."""
if loop is None:
loop = events.get_event_loop()
else:
warnings.warn("The loop argument is deprecated since Python 3.8, "
"and scheduled for removal in Python 3.10.",
DeprecationWarning, stacklevel=2)
def factory():
reader = StreamReader(limit=limit, loop=loop)
protocol = StreamReaderProtocol(reader, client_connected_cb,
loop=loop)
return protocol
return await loop.create_unix_server(factory, path, **kwds)
class FlowControlMixin(protocols.Protocol):
"""Reusable flow control logic for StreamWriter.drain().
This implements the protocol methods pause_writing(),
resume_writing() and connection_lost(). If the subclass overrides
these it must call the super methods.
StreamWriter.drain() must wait for _drain_helper() coroutine.
"""
def __init__(self, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._paused = False
self._drain_waiter = None
self._connection_lost = False
def pause_writing(self):
assert not self._paused
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses writing", self)
def resume_writing(self):
assert self._paused
self._paused = False
if self._loop.get_debug():
logger.debug("%r resumes writing", self)
waiter = self._drain_waiter
if waiter is not None:
self._drain_waiter = None
if not waiter.done():
waiter.set_result(None)
def connection_lost(self, exc):
self._connection_lost = True
# Wake up the writer if currently paused.
if not self._paused:
return
waiter = self._drain_waiter
if waiter is None:
return
self._drain_waiter = None
if waiter.done():
return
if exc is None:
waiter.set_result(None)
else:
waiter.set_exception(exc)
async def _drain_helper(self):
if self._connection_lost:
raise ConnectionResetError('Connection lost')
if not self._paused:
return
waiter = self._drain_waiter
assert waiter is None or waiter.cancelled()
waiter = self._loop.create_future()
self._drain_waiter = waiter
await waiter
def _get_close_waiter(self, stream):
raise NotImplementedError
class StreamReaderProtocol(FlowControlMixin, protocols.Protocol):
"""Helper class to adapt between Protocol and StreamReader.
(This is a helper class instead of making StreamReader itself a
Protocol subclass, because the StreamReader has other potential
uses, and to prevent the user of the StreamReader to accidentally
call inappropriate methods of the protocol.)
"""
_source_traceback = None
def __init__(self, stream_reader, client_connected_cb=None, loop=None):
super().__init__(loop=loop)
if stream_reader is not None:
self._stream_reader_wr = weakref.ref(stream_reader,
self._on_reader_gc)
self._source_traceback = stream_reader._source_traceback
else:
self._stream_reader_wr = None
if client_connected_cb is not None:
# This is a stream created by the `create_server()` function.
# Keep a strong reference to the reader until a connection
# is established.
self._strong_reader = stream_reader
self._reject_connection = False
self._stream_writer = None
self._transport = None
self._client_connected_cb = client_connected_cb
self._over_ssl = False
self._closed = self._loop.create_future()
def _on_reader_gc(self, wr):
transport = self._transport
if transport is not None:
# connection_made was called
context = {
'message': ('An open stream object is being garbage '
'collected; call "stream.close()" explicitly.')
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
transport.abort()
else:
self._reject_connection = True
self._stream_reader_wr = None
@property
def _stream_reader(self):
if self._stream_reader_wr is None:
return None
return self._stream_reader_wr()
def connection_made(self, transport):
if self._reject_connection:
context = {
'message': ('An open stream was garbage collected prior to '
'establishing network connection; '
'call "stream.close()" explicitly.')
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
transport.abort()
return
self._transport = transport
reader = self._stream_reader
if reader is not None:
reader.set_transport(transport)
self._over_ssl = transport.get_extra_info('sslcontext') is not None
if self._client_connected_cb is not None:
self._stream_writer = StreamWriter(transport, self,
reader,
self._loop)
res = self._client_connected_cb(reader,
self._stream_writer)
if coroutines.iscoroutine(res):
self._loop.create_task(res)
self._strong_reader = None
def connection_lost(self, exc):
reader = self._stream_reader
if reader is not None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
if not self._closed.done():
if exc is None:
self._closed.set_result(None)
else:
self._closed.set_exception(exc)
super().connection_lost(exc)
self._stream_reader_wr = None
self._stream_writer = None
self._transport = None
def data_received(self, data):
reader = self._stream_reader
if reader is not None:
reader.feed_data(data)
def eof_received(self):
reader = self._stream_reader
if reader is not None:
reader.feed_eof()
if self._over_ssl:
# Prevent a warning in SSLProtocol.eof_received:
# "returning true from eof_received()
# has no effect when using ssl"
return False
return True
def _get_close_waiter(self, stream):
return self._closed
def __del__(self):
# Prevent reports about unhandled exceptions.
# Better than self._closed._log_traceback = False hack
closed = self._closed
if closed.done() and not closed.cancelled():
closed.exception()
class StreamWriter:
"""Wraps a Transport.
This exposes write(), writelines(), [can_]write_eof(),
get_extra_info() and close(). It adds drain() which returns an
optional Future on which you can wait for flow control. It also
adds a transport property which references the Transport
directly.
"""
def __init__(self, transport, protocol, reader, loop):
self._transport = transport
self._protocol = protocol
# drain() expects that the reader has an exception() method
assert reader is None or isinstance(reader, StreamReader)
self._reader = reader
self._loop = loop
self._complete_fut = self._loop.create_future()
self._complete_fut.set_result(None)
def __repr__(self):
info = [self.__class__.__name__, f'transport={self._transport!r}']
if self._reader is not None:
info.append(f'reader={self._reader!r}')
return '<{}>'.format(' '.join(info))
@property
def transport(self):
return self._transport
def write(self, data):
self._transport.write(data)
def writelines(self, data):
self._transport.writelines(data)
def write_eof(self):
return self._transport.write_eof()
def can_write_eof(self):
return self._transport.can_write_eof()
def close(self):
return self._transport.close()
def is_closing(self):
return self._transport.is_closing()
async def wait_closed(self):
await self._protocol._get_close_waiter(self)
def get_extra_info(self, name, default=None):
return self._transport.get_extra_info(name, default)
async def drain(self):
"""Flush the write buffer.
The intended use is to write
w.write(data)
await w.drain()
"""
if self._reader is not None:
exc = self._reader.exception()
if exc is not None:
raise exc
if self._transport.is_closing():
# Wait for protocol.connection_lost() call
# Raise connection closing error if any,
# ConnectionResetError otherwise
# Yield to the event loop so connection_lost() may be
# called. Without this, _drain_helper() would return
# immediately, and code that calls
# write(...); await drain()
# in a loop would never call connection_lost(), so it
# would not see an error when the socket is closed.
await sleep(0)
await self._protocol._drain_helper()
class StreamReader:
_source_traceback = None
def __init__(self, limit=_DEFAULT_LIMIT, loop=None):
# The line length limit is a security feature;
# it also doubles as half the buffer limit.
if limit <= 0:
raise ValueError('Limit cannot be <= 0')
self._limit = limit
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._buffer = bytearray()
self._eof = False # Whether we're done.
self._waiter = None # A future used by _wait_for_data()
self._exception = None
self._transport = None
self._paused = False
if self._loop.get_debug():
self._source_traceback = format_helpers.extract_stack(
sys._getframe(1))
def __repr__(self):
info = ['StreamReader']
if self._buffer:
info.append(f'{len(self._buffer)} bytes')
if self._eof:
info.append('eof')
if self._limit != _DEFAULT_LIMIT:
info.append(f'limit={self._limit}')
if self._waiter:
info.append(f'waiter={self._waiter!r}')
if self._exception:
info.append(f'exception={self._exception!r}')
if self._transport:
info.append(f'transport={self._transport!r}')
if self._paused:
info.append('paused')
return '<{}>'.format(' '.join(info))
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_exception(exc)
def _wakeup_waiter(self):
"""Wakeup read*() functions waiting for data or EOF."""
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(None)
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def _maybe_resume_transport(self):
if self._paused and len(self._buffer) <= self._limit:
self._paused = False
self._transport.resume_reading()
def feed_eof(self):
self._eof = True
self._wakeup_waiter()
def at_eof(self):
"""Return True if the buffer is empty and 'feed_eof' was called."""
return self._eof and not self._buffer
def feed_data(self, data):
assert not self._eof, 'feed_data after feed_eof'
if not data:
return
self._buffer.extend(data)
self._wakeup_waiter()
if (self._transport is not None and
not self._paused and
len(self._buffer) > 2 * self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
async def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
f'{func_name}() called while another coroutine is '
f'already waiting for incoming data')
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
await self._waiter
finally:
self._waiter = None
async def readline(self):
"""Read chunk of data from the stream until newline (b'\n') is found.
On success, return chunk that ends with newline. If only partial
line can be read due to EOF, return incomplete line without
terminating newline. When EOF was reached while no bytes read, empty
bytes object is returned.
If limit is reached, ValueError will be raised. In that case, if
newline was found, complete line including newline will be removed
from internal buffer. Else, internal buffer will be cleared. Limit is
compared against part of the line without newline.
If stream was paused, this function will automatically resume it if
needed.
"""
sep = b'\n'
seplen = len(sep)
try:
line = await self.readuntil(sep)
except exceptions.IncompleteReadError as e:
return e.partial
except exceptions.LimitOverrunError as e:
if self._buffer.startswith(sep, e.consumed):
del self._buffer[:e.consumed + seplen]
else:
self._buffer.clear()
self._maybe_resume_transport()
raise ValueError(e.args[0])
return line
async def readuntil(self, separator=b'\n'):
"""Read data from the stream until ``separator`` is found.
On success, the data and separator will be removed from the
internal buffer (consumed). Returned data will include the
separator at the end.
Configured stream limit is used to check result. Limit sets the
maximal length of data that can be returned, not counting the
separator.
If an EOF occurs and the complete separator is still not found,
an IncompleteReadError exception will be raised, and the internal
buffer will be reset. The IncompleteReadError.partial attribute
may contain the separator partially.
If the data cannot be read because of over limit, a
LimitOverrunError exception will be raised, and the data
will be left in the internal buffer, so it can be read again.
"""
seplen = len(separator)
if seplen == 0:
raise ValueError('Separator should be at least one-byte string')
if self._exception is not None:
raise self._exception
# Consume whole buffer except last bytes, which length is
# one less than seplen. Let's check corner cases with
# separator='SEPARATOR':
# * we have received almost complete separator (without last
# byte). i.e buffer='some textSEPARATO'. In this case we
# can safely consume len(separator) - 1 bytes.
# * last byte of buffer is first byte of separator, i.e.
# buffer='abcdefghijklmnopqrS'. We may safely consume
# everything except that last byte, but this require to
# analyze bytes of buffer that match partial separator.
# This is slow and/or require FSM. For this case our
# implementation is not optimal, since require rescanning
# of data that is known to not belong to separator. In
# real world, separator will not be so long to notice
# performance problems. Even when reading MIME-encoded
# messages :)
# `offset` is the number of bytes from the beginning of the buffer
# where there is no occurrence of `separator`.
offset = 0
# Loop until we find `separator` in the buffer, exceed the buffer size,
# or an EOF has happened.
while True:
buflen = len(self._buffer)
# Check if we now have enough data in the buffer for `separator` to
# fit.
if buflen - offset >= seplen:
isep = self._buffer.find(separator, offset)
if isep != -1:
# `separator` is in the buffer. `isep` will be used later
# to retrieve the data.
break
# see upper comment for explanation.
offset = buflen + 1 - seplen
if offset > self._limit:
raise exceptions.LimitOverrunError(
'Separator is not found, and chunk exceed the limit',
offset)
# Complete message (with full separator) may be present in buffer
# even when EOF flag is set. This may happen when the last chunk
# adds data which makes separator be found. That's why we check for
# EOF *ater* inspecting the buffer.
if self._eof:
chunk = bytes(self._buffer)
self._buffer.clear()
raise exceptions.IncompleteReadError(chunk, None)
# _wait_for_data() will resume reading if stream was paused.
await self._wait_for_data('readuntil')
if isep > self._limit:
raise exceptions.LimitOverrunError(
'Separator is found, but chunk is longer than limit', isep)
chunk = self._buffer[:isep + seplen]
del self._buffer[:isep + seplen]
self._maybe_resume_transport()
return bytes(chunk)
async def read(self, n=-1):
"""Read up to `n` bytes from the stream.
If n is not provided, or set to -1, read until EOF and return all read
bytes. If the EOF was received and the internal buffer is empty, return
an empty bytes object.
If n is zero, return empty bytes object immediately.
If n is positive, this function try to read `n` bytes, and may return
less or equal bytes than requested, but at least one byte. If EOF was
received before any byte is read, this function returns empty byte
object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if self._exception is not None:
raise self._exception
if n == 0:
return b''
if n < 0:
# This used to just loop creating a new waiter hoping to
# collect everything in self._buffer, but that would
# deadlock if the subprocess sends more than self.limit
# bytes. So just call self.read(self._limit) until EOF.
blocks = []
while True:
block = await self.read(self._limit)
if not block:
break
blocks.append(block)
return b''.join(blocks)
if not self._buffer and not self._eof:
await self._wait_for_data('read')
# This will work right even if buffer is less than n bytes
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
async def readexactly(self, n):
"""Read exactly `n` bytes.
Raise an IncompleteReadError if EOF is reached before `n` bytes can be
read. The IncompleteReadError.partial attribute of the exception will
contain the partial read bytes.
if n is zero, return empty bytes object.
Returned value is not limited with limit, configured at stream
creation.
If stream was paused, this function will automatically resume it if
needed.
"""
if n < 0:
raise ValueError('readexactly size can not be less than zero')
if self._exception is not None:
raise self._exception
if n == 0:
return b''
while len(self._buffer) < n:
if self._eof:
incomplete = bytes(self._buffer)
self._buffer.clear()
raise exceptions.IncompleteReadError(incomplete, n)
await self._wait_for_data('readexactly')
if len(self._buffer) == n:
data = bytes(self._buffer)
self._buffer.clear()
else:
data = bytes(self._buffer[:n])
del self._buffer[:n]
self._maybe_resume_transport()
return data
def __aiter__(self):
return self
async def __anext__(self):
val = await self.readline()
if val == b'':
raise StopAsyncIteration
return val
| {
"repo_name": "batermj/algorithm-challenger",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/asyncio/streams.py",
"copies": "1",
"size": "27342",
"license": "apache-2.0",
"hash": -1643118778630089000,
"line_mean": 35.0712401055,
"line_max": 79,
"alpha_frac": 0.5889839807,
"autogenerated": false,
"ratio": 4.582202111613876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 758
} |
__all__ = [
'Subscriber',
]
import logging
import nng
import nng.asyncs
from g1.asyncs.bases import queues
from g1.bases import classes
LOG = logging.getLogger(__name__)
class Subscriber:
def __init__(self, message_type, queue, wiredata, *, drop_when_full=True):
self._message_type = message_type
self._queue = queue
self._wiredata = wiredata
self._drop_when_full = drop_when_full
# For convenience, create socket before ``__enter__``.
self.socket = nng.asyncs.Socket(nng.Protocols.SUB0)
# For now we subscribe to empty topic.
self.socket.subscribe(b'')
__repr__ = classes.make_repr('{self.socket!r}')
def __enter__(self):
self.socket.__enter__()
return self
def __exit__(self, exc_type, *args):
messages = self._queue.close(graceful=not exc_type)
if messages:
LOG.warning('drop %d messages', len(messages))
return self.socket.__exit__(exc_type, *args)
async def serve(self):
LOG.info('start subscriber: %r', self)
try:
while True:
try:
raw_message = await self.socket.recv()
except nng.Errors.ETIMEDOUT:
LOG.warning('recv timeout')
continue
try:
message = self._wiredata.to_upper(
self._message_type, raw_message
)
except Exception:
LOG.warning(
'to_upper error: %r', raw_message, exc_info=True
)
continue
if self._drop_when_full:
try:
self._queue.put_nonblocking(message)
except queues.Full:
LOG.warning('queue full; drop message: %r', message)
else:
await self._queue.put(message)
except (queues.Closed, nng.Errors.ECLOSED):
pass
self._queue.close()
LOG.info('stop subscriber: %r', self)
def shutdown(self):
self.socket.close()
| {
"repo_name": "clchiou/garage",
"path": "py/g1/messaging/g1/messaging/pubsub/subscribers.py",
"copies": "1",
"size": "2171",
"license": "mit",
"hash": 4826260640066627000,
"line_mean": 29.5774647887,
"line_max": 78,
"alpha_frac": 0.513127591,
"autogenerated": false,
"ratio": 4.215533980582524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228661571582525,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'supervisor',
]
import logging
from concurrent import futures
from garage.assertions import ASSERT
from garage.threads import actors
LOG = logging.getLogger(__name__)
@actors.OneShotActor.from_func
def supervisor(num_actors, start_new_actor):
"""A supervisor will always keep num_actors long-running actors
alive at any time; however, if half of actors died, it dies, too.
"""
# TODO: Implement more re-start/exit strategy.
ASSERT.greater(num_actors, 0)
LOG.info('start')
actor_futures = {}
target = num_actors
threshold = max(1, num_actors // 2)
num_actors_crashed = 0
while target > 0 and num_actors_crashed < threshold:
if target > len(actor_futures):
# Start actors to meet the target.
for _ in range(target - len(actor_futures)):
stub = start_new_actor()
actor_futures[stub._get_future()] = stub
LOG.info('supervise actor %s', stub._name)
done_actor_futures = futures.wait(
actor_futures,
return_when=futures.FIRST_COMPLETED,
).done
for done_actor_future in done_actor_futures:
stub = actor_futures.pop(done_actor_future)
try:
done_actor_future.result()
except Exception:
# If actor raises, say, SystemExit, supervisor will not
# capture it (and will exit).
LOG.warning(
'actor has crashed: %s',
stub._name, exc_info=True,
)
num_actors_crashed += 1
else:
LOG.debug('actor exited normally: %s', stub._name)
target -= 1
if num_actors_crashed >= threshold:
raise RuntimeError(
'actors have crashed: %d >= %d' % (num_actors_crashed, threshold))
LOG.info('exit')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/threads/supervisors.py",
"copies": "1",
"size": "1909",
"license": "mit",
"hash": -3202491229331243000,
"line_mean": 30.2950819672,
"line_max": 78,
"alpha_frac": 0.5683603981,
"autogenerated": false,
"ratio": 4.0020964360587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50704568341587,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'TableToTimeGrid',
'ReverseImageDataAxii',
'TranslateGridOrigin',
]
__displayname__ = 'Transform'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .. import _helpers, interface
from ..base import FilterBase
###############################################################################
class TableToTimeGrid(FilterBase):
"""A filter to convert a static (no time variance) table to a time varying
grid. This effectively reashapes a table full of data arrays as a 4D array
that is placed onto the CellData of a ``vtkImageData`` object.
"""
__displayname__ = 'Table To Time Grid'
__category__ = 'filter'
def __init__(
self,
extent=(10, 10, 10, 1),
order='C',
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
dims=(0, 1, 2, 3),
dt=1.0,
points=False,
**kwargs
):
FilterBase.__init__(
self,
nInputPorts=1,
nOutputPorts=1,
inputType='vtkTable',
outputType='vtkImageData',
**kwargs
)
if len(extent) != 4:
raise _helpers.PVGeoError('`extent` must be of length 4.')
self.__extent = list(extent)
self.__dims = list(
dims
) # these are indexes for the filter to use on the reshape.
# NOTE: self.__dims[0] is the x axis index, etc., self.__dims[3] is the time axis
self.__spacing = list(spacing) # image data spacing
self.__origin = list(origin) # image data origin
self.__order = list(order) # unpacking order: 'C' or 'F'
self.__data = None # this is where we hold the data so entire filter does
# not execute on every time step. Data will be a disctionary of 4D arrays
# each 4D array will be in (nx, ny, nz, nt) shape
self.__needToRun = True
self.__timesteps = None
self.__dt = dt
# Optional parameter to switch between cell and point data
self.__usePointData = points
self.__needToUpdateOutput = True
def _set_data(self, table):
"""Internal helper to restructure the inpt table arrays"""
self.__data = dict()
dims = np.array([d for d in self.__dims])
sd = dims.argsort()
df = interface.table_to_data_frame(table)
keys = df.keys().tolist()
for k in keys:
# perfrom the reshape properly. using the user given extent
arr = np.reshape(df[k].values, self.__extent, order=self.__order)
# Now order correctly for the image data spatial reference
# this uses the user specified dimension definitions
for i in range(4):
arr = np.moveaxis(arr, sd[i], dims[i])
# Now add to disctionary
self.__data[k] = arr
self.__needToRun = False
return
def _build_image_data(self, img):
"""Internal helper to consturct the output"""
if self.__needToUpdateOutput:
# Clean out the output data object
img.DeepCopy(vtk.vtkImageData())
self.__needToUpdateOutput = False
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if not self.__usePointData:
nx += 1
ny += 1
nz += 1
sx, sy, sz = self.__spacing[0], self.__spacing[1], self.__spacing[2]
ox, oy, oz = self.__origin[0], self.__origin[1], self.__origin[2]
img.SetDimensions(nx, ny, nz)
img.SetSpacing(sx, sy, sz)
img.SetOrigin(ox, oy, oz)
return img
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
nt = self.__extent[self.__dims[3]]
if nt > 1:
self.__timesteps = _helpers.update_time_steps(self, nt, self.__dt)
return 1
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
img = self.GetOutputData(outInfo, 0)
self._build_image_data(img)
# Perfrom task
if self.__needToRun:
self._set_data(table)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
for k, arr in self.__data.items():
# NOTE: Keep order='F' because of the way the grid is already reshaped
# the 3D array has XYZ structure so VTK requires F ordering
narr = interface.convert_array(arr[:, :, :, i].flatten(order='F'), name=k)
if self.__usePointData:
img.GetPointData().AddArray(narr)
else:
img.GetCellData().AddArray(narr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set whole output extent."""
# Setup the ImageData
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if self.__usePointData:
ext = [0, nx - 1, 0, ny - 1, 0, nz - 1]
else:
ext = [0, nx, 0, ny, 0, nz]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
# Now set the number of timesteps:
self._update_time_steps()
return 1
#### Setters / Getters ####
def Modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
if run_again:
self.__needToRun = run_again
self.__needToUpdateOutput = True
FilterBase.Modified(self)
def modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
return self.Modified(run_again=run_again)
def set_extent(self, nx, ny, nz, nt):
"""Set the extent of the output grid"""
if self.__extent != [nx, ny, nz, nt]:
self.__extent = [nx, ny, nz, nt]
self.Modified()
def set_dimensions(self, x, y, z, t):
"""Set the dimensions of the output grid"""
if self.__dims != [x, y, z, t]:
self.__dims = [x, y, z, t]
self.Modified()
def set_spacing(self, dx, dy, dz):
"""Set the spacing for the points along each axial direction"""
if self.__spacing != [dx, dy, dz]:
self.__spacing = [dx, dy, dz]
self.Modified()
def set_origin(self, x0, y0, z0):
"""Set the origin of the output `vtkImageData`"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
def set_order(self, order):
"""Set the reshape order (`'C'` or `'F'`)"""
if self.__order != order:
self.__order = order
self.Modified(run_again=True)
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps on the pipeline."""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def set_time_delta(self, dt):
"""An advanced property to set the time step in seconds."""
if dt != self.__dt:
self.__dt = dt
self.Modified()
def set_use_points(self, flag):
"""Set whether or not to place the data on the nodes/cells of the grid.
True places data on nodes, false places data at cell centers (CellData).
In ParaView, switching can be a bit buggy: be sure to turn the visibility
of this data object OFF on the pipeline when changing between nodes/cells.
"""
if self.__usePointData != flag:
self.__usePointData = flag
self.Modified(run_again=True)
###############################################################################
class ReverseImageDataAxii(FilterBase):
"""This filter will flip ``vtkImageData`` on any of the three cartesian axii.
A checkbox is provided for each axis on which you may desire to flip the data.
"""
__displayname__ = 'Reverse Image Data Axii'
__category__ = 'filter'
def __init__(self, axes=(True, True, True)):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__axes = list(axes[::-1]) # Z Y X (FORTRAN)
def _reverse_grid_axes(self, idi, ido):
"""Internal helper to reverse data along specified axii"""
# Copy over input to output to be flipped around
# Deep copy keeps us from messing with the input data
ox, oy, oz = idi.GetOrigin()
ido.SetOrigin(ox, oy, oz)
sx, sy, sz = idi.GetSpacing()
ido.SetSpacing(sx, sy, sz)
ext = idi.GetExtent()
nx, ny, nz = ext[1] + 1, ext[3] + 1, ext[5] + 1
ido.SetDimensions(nx, ny, nz)
widi = dsa.WrapDataObject(idi)
# Iterate over all array in the PointData
for j in range(idi.GetPointData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.PointData[j]
arr = np.reshape(arr, (nz, ny, nx))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetPointData().GetArrayName(j)
)
ido.GetPointData().AddArray(data)
# Iterate over all array in the CellData
for j in range(idi.GetCellData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.CellData[j]
arr = np.reshape(arr, (nz - 1, ny - 1, nx - 1))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetCellData().GetArrayName(j)
)
ido.GetCellData().AddArray(data)
return ido
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reverse_grid_axes(pdi, pdo)
return 1
#### Seters and Geters ####
def set_flip_x(self, flag):
"""Set the filter to flip th input data along the X-axis"""
if self.__axes[2] != flag:
self.__axes[2] = flag
self.Modified()
def set_flip_y(self, flag):
"""Set the filter to flip th input data along the Y-axis"""
if self.__axes[1] != flag:
self.__axes[1] = flag
self.Modified()
def set_flip_z(self, flag):
"""Set the filter to flip th input data along the Z-axis"""
if self.__axes[0] != flag:
self.__axes[0] = flag
self.Modified()
###############################################################################
# ---- Translate Grid Origin ----#
class TranslateGridOrigin(FilterBase):
"""This filter will translate the origin of `vtkImageData` to any specified
Corner of the data set assuming it is currently in the South West Bottom
Corner (will not work if Corner was moved prior).
"""
__displayname__ = 'Translate Grid Origin'
__category__ = 'filter'
def __init__(self, corner=1):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__corner = corner
def _translate(self, pdi, pdo):
"""Internal helper to translate the inputs origin"""
if pdo is None:
pdo = vtk.vtkImageData()
[nx, ny, nz] = pdi.GetDimensions()
[sx, sy, sz] = pdi.GetSpacing()
[ox, oy, oz] = pdi.GetOrigin()
pdo.DeepCopy(pdi)
xx, yy, zz = 0.0, 0.0, 0.0
if self.__corner == 1:
# South East Bottom
xx = ox - (nx - 1) * sx
yy = oy
zz = oz
elif self.__corner == 2:
# North West Bottom
xx = ox
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 3:
# North East Bottom
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 4:
# South West Top
xx = ox
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 5:
# South East Top
xx = ox - (nx - 1) * sx
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 6:
# North West Top
xx = ox
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
elif self.__corner == 7:
# North East Top
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
pdo.SetOrigin(xx, yy, zz)
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._translate(pdi, pdo)
return 1
#### Seters and Geters ####
def set_corner(self, corner):
"""Set the corner to use
Args:
corner (int) : corner location; see note.
Note:
* 1: South East Bottom
* 2: North West Bottom
* 3: North East Bottom
* 4: South West Top
* 5: South East Top
* 6: North West Top
* 7: North East Top
"""
if self.__corner != corner:
self.__corner = corner
self.Modified()
###############################################################################
| {
"repo_name": "banesullivan/ParaViewGeophysics",
"path": "PVGeo/grids/transform.py",
"copies": "1",
"size": "14373",
"license": "bsd-3-clause",
"hash": -4742312895279404000,
"line_mean": 33.3031026253,
"line_max": 89,
"alpha_frac": 0.5238989772,
"autogenerated": false,
"ratio": 3.8256587702954485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9846351516772169,
"avg_score": 0.0006412461446559955,
"num_lines": 419
} |
__all__ = [
'Task',
]
import inspect
import logging
import sys
from g1.bases import classes
from g1.bases.assertions import ASSERT
from . import errors
from . import traps
LOG = logging.getLogger(__name__)
# Python 3.4 implements PEP 442 for safe ``__del__``.
ASSERT.greater_or_equal(sys.version_info, (3, 4))
class Task:
"""Task object.
A ``Task`` object wraps an coroutine object, and is the basic unit
of scheduling. It is modelled after ``Future` object, which is
commonly used for wrapping a ``Thread`` object. There are a few
notable differences between ``Task`` and ``Future``:
* ``Task`` is cancellable due to its cooperative nature, but
``Future`` is not because threads in general are not cancellable.
* ``get_result`` and ``get_exception`` does not take a ``timeout``
argument. While it is possible to add a ``timeout`` argument, as
a convention we would prefer not to.
NOTE: Although task is cancellable, this should be the last resort
because a cancel only takes effect on the task's next blocking trap,
and this may take much longer than desired; for example, if a task
is sending through a socket and the socket's buffer is somehow never
full, this task may never be blocked and stay running forever.
"""
@staticmethod
def is_coroutine(coro):
# ``types.coroutine`` returns a generator function.
return inspect.iscoroutine(coro) or inspect.isgenerator(coro)
def __init__(self, kernel, coroutine):
# In case ``__init__`` raises.
self._coroutine = None
self._kernel = kernel
self._coroutine = ASSERT.predicate(coroutine, self.is_coroutine)
self._num_ticks = 0
self._completed = False
self._result = None
self._exception = None
self._callbacks = []
self._joined = False
def __del__(self):
# You have to check whether ``__init__`` raises.
if self._coroutine is None:
return
if not self._joined:
# Call ``repr`` to force formatting ``self`` here to avoid
# resurrecting ``self``.
LOG.warning(
'task is garbage-collected but never joined: %s', repr(self)
)
__repr__ = classes.make_repr(
'{self._coroutine!r} ticks={self._num_ticks} '
'{state} {self._result!r} {self._exception!r}',
state=lambda self: 'completed' if self._completed else 'uncompleted',
)
def is_completed(self):
return self._completed
def cancel(self):
# Add ``Task.cancel`` for convenience.
self._kernel.cancel(self)
async def join(self):
self._joined = True
await traps.join(self)
async def get_result(self):
await self.join()
return self.get_result_nonblocking()
async def get_exception(self):
await self.join()
return self.get_exception_nonblocking()
def get_result_nonblocking(self):
ASSERT.true(self.is_completed())
self._joined = True
if self._exception:
raise self._exception
return self._result
def get_exception_nonblocking(self):
ASSERT.true(self.is_completed())
self._joined = True
return self._exception
#
# Package-private interface.
#
def tick(self, trap_result, trap_exception):
"""Run coroutine through the next trap point.
NOTE: ``tick`` catches ``BaseException`` raised from the
coroutine. As a result, ``SystemExit`` does not bubble up to
the kernel event loop. I believe this behavior is similar to
Python threading library and thus more expected (``SystemExit``
raised in non- main thread does not cause CPython process to
exit). If you want raising ``SystemExit`` in a task to be
effective, you have to call ``Task.get_result_nonblocking`` in
the main thread (or implicitly through ``Kernel.run``).
"""
ASSERT.false(self._completed)
if trap_exception:
trap = self._tick(self._coroutine.throw, trap_exception)
else:
trap = self._tick(self._coroutine.send, trap_result)
if trap is not None:
return trap
ASSERT.true(self._completed)
self._call_callbacks()
return None
def abort(self):
"""Close the running coroutine.
This is the last resort for releasing resources acquired by the
coroutine, not a part of normal task cleanup. One good place to
call ``abort`` is when kernel is closing.
"""
if self._completed:
return
LOG.warning('abort task: %r', self)
# ``close`` returns None on success, and raises RuntimeError
# when the coroutine cannot be aborted.
ASSERT.none(self._tick(self._coroutine.close))
if self._completed:
if (
isinstance(self._exception, RuntimeError)
and str(self._exception) == 'coroutine ignored GeneratorExit'
):
LOG.warning('task cannot be aborted: %r', self)
self._completed = False
self._exception = None
else:
self._call_callbacks()
else:
self._completed = True
self._exception = errors.Cancelled('task abort')
self._call_callbacks()
def _tick(self, func, *args):
try:
self._num_ticks += 1
return func(*args)
except errors.TaskCancellation as exc:
self._completed = True
self._exception = errors.Cancelled()
self._exception.__cause__ = exc
except StopIteration as exc:
self._completed = True
self._result = exc.value
except BaseException as exc:
self._completed = True
self._exception = exc
return None
def _call_callbacks(self):
ASSERT.true(self._completed)
callbacks, self._callbacks = self._callbacks, None
for callback in callbacks:
self._call_callback(callback)
def add_callback(self, callback):
if self._completed:
self._call_callback(callback)
else:
self._callbacks.append(callback)
def _call_callback(self, callback):
try:
callback(self)
except Exception:
LOG.exception('callback err: %r, %r', self, callback)
| {
"repo_name": "clchiou/garage",
"path": "py/g1/asyncs/kernels/g1/asyncs/kernels/tasks.py",
"copies": "1",
"size": "6511",
"license": "mit",
"hash": -5474747346646265000,
"line_mean": 32.0507614213,
"line_max": 77,
"alpha_frac": 0.5986791583,
"autogenerated": false,
"ratio": 4.343562374916611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5442241533216611,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'TaskQueue',
'tasklet',
]
import logging
from garage.assertions import ASSERT
from garage.threads import actors
from garage.threads import queues
LOG = logging.getLogger(__name__)
class TaskQueue(queues.ForwardingQueue):
"""A task queue (vs executor) is for scenarios that the number of
total tasks is not known in advance (and thus you do not know when
you may close the queue). This happens when a task may spawn more
tasks depending on the task's outcome.
We implement a simple strategy to determine when a task queue may
safely close itself: a task queue tracks the number of tasks and
running tasklets, and it closes itself when both are zero. If no
tasklet is running, no new tasks will be put into the queue. If at
the same time, there is no task in the queue, we should be safe to
conclude that there will never be new tasks (unless you are still
putting new tasks into the queue - which you shouldn't; see below).
NOTE: The limitation of this simple strategy is that once you put
the initial tasks into the task queue, you should not put any more
tasks into the queue because the queue may have already been closed.
If you do want to put tasks into the queue after tasklets start, you
will have to implement your task queue. (But this simple strategy
should work for most scenarios.)
You may use this auto-close feature to wait for the completion of
all tasks.
"""
def __init__(self, queue):
super().__init__(queue)
self.__num_running_tasklets = 0
def get_task(self):
ASSERT.greater_or_equal(self.__num_running_tasklets, 0)
with self.lock:
task = self.get()
self.__num_running_tasklets += 1
return task
# idle = not running
def notify_tasklet_idle(self):
ASSERT.greater(self.__num_running_tasklets, 0)
with self.lock:
self.__num_running_tasklets -= 1
ASSERT.greater_or_equal(self.__num_running_tasklets, 0)
# We may close the queue when both conditions (no running
# tasklets and no tasks) are met.
if self.__num_running_tasklets == 0 and not self:
self.close()
@actors.OneShotActor.from_func
def tasklet(task_queue):
"""A tasklet consumes task from a task queue, and it exits when the
task queue is closed.
A tasklet notifies the task queue when it has executed the task and
becomes idle again.
"""
LOG.info('start')
while True:
try:
task = task_queue.get_task()
except queues.Closed:
break
try:
task()
finally:
task_queue.notify_tasklet_idle()
del task
LOG.info('exit')
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/threads/tasklets.py",
"copies": "1",
"size": "2796",
"license": "mit",
"hash": -6043468516729608000,
"line_mean": 32.686746988,
"line_max": 72,
"alpha_frac": 0.6512875536,
"autogenerated": false,
"ratio": 4.1117647058823525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 83
} |
__all__ = [
'ballsave',
'ballsearch',
'drops'
'replay',
'scoredisplay',
'trough',
'osc'
]
from ballsave import *
from ballsearch import *
from drops import *
from replay import *
from scoredisplay import *
from trough import *
from osc import *
from ..game import Mode
class TransitionOutHelperMode(Mode):
def __init__(self, game, priority, transition, layer):
super(TransitionOutHelperMode, self).__init__(game=game, priority=priority)
self.layer = layer
self.layer.transition = transition
self.layer.transition.in_out = 'out'
self.layer.transition.completed_handler = self.transition_completed
def mode_started(self):
self.layer.transition.start()
def transition_completed(self):
self.game.modes.remove(self)
class SwitchSequenceRecognizer(Mode):
"""Listens to switch events to detect and act upon sequences."""
switches = {}
switch_log = []
def __init__(self, game, priority):
super(SwitchSequenceRecognizer, self).__init__(game=game, priority=priority)
self.switches = {}
self.switch_log = []
def add_sequence(self, sequence, handler):
unique_switch_names = list(set(map(lambda sw: sw.name, sequence)))
sequence_switch_nums = map(lambda sw: sw.number, sequence)
#sequence_str = self.switch_separator_char.join(sequence_switch_nums)
self.switches[tuple(sequence_switch_nums)] = handler
for sw in unique_switch_names:
# No concern about duplicate switch handlers, as add_switch_handler() protects against this.
self.add_switch_handler(name=sw, event_type='active', delay=None, handler=self.switch_active)
def reset(self):
"""Resets the remembered sequence."""
self.switch_log = []
def switch_active(self, sw):
self.switch_log.append(sw.number)
log_tuple = tuple(self.switch_log)
for sequence, handler in self.switches.items():
if log_tuple[-len(sequence):] == sequence:
handler()
| {
"repo_name": "Curbfeeler/PinbotFromES",
"path": "procgame/modes/__init__.py",
"copies": "1",
"size": "1868",
"license": "mit",
"hash": 7331265064062237000,
"line_mean": 29.1290322581,
"line_max": 96,
"alpha_frac": 0.7184154176,
"autogenerated": false,
"ratio": 3.2318339100346023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8795731861440113,
"avg_score": 0.13090349323889786,
"num_lines": 62
} |
__all__ = [
"CLOEXEC",
"NONBLOCK",
"TIMER_ABSTIME",
"CLOCK_REALTIME",
"CLOCK_MONOTONIC",
"bufsize",
"timespec",
"itimerspec",
"create",
"settime",
"gettime",
"unpack",
]
import ctypes
import ctypes.util
import math
import os
import struct
CLOEXEC = 0o02000000
NONBLOCK = 0o00004000
TIMER_ABSTIME = 0x00000001
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 1
bufsize = 8
libc = ctypes.CDLL(ctypes.util.find_library("c"), use_errno=True)
class timespec(ctypes.Structure):
_fields_ = [
("tv_sec", libc.time.restype),
("tv_nsec", ctypes.c_long),
]
def __init__(self, time=None):
ctypes.Structure.__init__(self)
if time is not None:
self.set_time(time)
def __repr__(self):
return "timerfd.timespec(%s)" % self.get_time()
def set_time(self, time):
fraction, integer = math.modf(time)
self.tv_sec = int(integer)
self.tv_nsec = int(fraction * 1000000000)
def get_time(self):
if self.tv_nsec:
return self.tv_sec + self.tv_nsec / 1000000000.0
else:
return self.tv_sec
class itimerspec(ctypes.Structure):
_fields_ = [
("it_interval", timespec),
("it_value", timespec),
]
def __init__(self, interval=None, value=None):
ctypes.Structure.__init__(self)
if interval is not None:
self.it_interval.set_time(interval)
if value is not None:
self.it_value.set_time(value)
def __repr__(self):
items = [("interval", self.it_interval), ("value", self.it_value)]
args = ["%s=%s" % (name, value.get_time()) for name, value in items]
return "timerfd.itimerspec(%s)" % ", ".join(args)
def set_interval(self, time):
self.it_interval.set_time(time)
def get_interval(self):
return self.it_interval.get_time()
def set_value(self, time):
self.it_value.set_time(time)
def get_value(self):
return self.it_value.get_time()
def errcheck(result, func, arguments):
if result < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return result
libc.timerfd_create.argtypes = [ctypes.c_int, ctypes.c_int]
libc.timerfd_create.errcheck = errcheck
libc.timerfd_settime.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(itimerspec), ctypes.POINTER(itimerspec)]
libc.timerfd_settime.errcheck = errcheck
libc.timerfd_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(itimerspec)]
libc.timerfd_gettime.errcheck = errcheck
def create(clock_id, flags=0):
ret = libc.timerfd_create(clock_id, flags)
if ret == -1:
raise OSError("Creation of timerfd failed: %s" % os.strerror(errno.errorcode))
return ret
def settime(ufd, flags, new_value):
old_value = itimerspec()
ret = libc.timerfd_settime(ufd, flags, ctypes.pointer(new_value), ctypes.pointer(old_value))
if ret == -1:
raise OSError("Setting timer failed: %s" % os.strerror(errno.errorcode))
return old_value
def gettime(ufd):
curr_value = itimerspec()
ret = libc.timerfd_gettime(ufd, ctypes.pointer(curr_value))
if ret == -1:
raise OSError("Getting timer failed: %s" % os.strerror(errno.errorcode))
return curr_value
def unpack(buf):
count, = struct.unpack("Q", buf[:bufsize])
return count
| {
"repo_name": "tsavola/pytimerfd",
"path": "timerfd/__init__.py",
"copies": "1",
"size": "3092",
"license": "mit",
"hash": -3192443325448086500,
"line_mean": 21.4057971014,
"line_max": 116,
"alpha_frac": 0.680465718,
"autogenerated": false,
"ratio": 2.753339269813001,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3933804987813001,
"avg_score": null,
"num_lines": null
} |
__all__ = (
'ConnectionLostError',
'RemoteAmpError',
'TooLongError',
'UnhandledCommandError',
'UnknownRemoteError',
)
class AmpError(Exception):
pass
class ConnectionLostError(Exception):
""" Connection Lost. """
def __init__(self, exc):
self.exception = exc
class RemoteAmpError(AmpError):
def __init__(self, error_code, error_description):
self.error_code = error_code
self.error_description = error_description
UNKNOWN_ERROR_CODE = 'UNKNOWN'
UNHANDLED_ERROR_CODE = 'UNHANDLED'
class UnknownRemoteError(RemoteAmpError):
"""
This means that an error whose type we can't identify was raised from the
other side.
"""
def __init__(self, description):
error_code = UNKNOWN_ERROR_CODE
RemoteAmpError.__init__(self, error_code, description)
class UnhandledCommandError(RemoteAmpError):
"""
A command received via amp could not be dispatched.
"""
def __init__(self, description):
error_code = UNHANDLED_ERROR_CODE
RemoteAmpError.__init__(self, error_code, description)
class TooLongError(RemoteAmpError):
def __init__(self):
pass
| {
"repo_name": "jonathanslenders/asyncio-amp",
"path": "asyncio_amp/exceptions.py",
"copies": "1",
"size": "1147",
"license": "bsd-2-clause",
"hash": 6254469819584584000,
"line_mean": 21.94,
"line_max": 77,
"alpha_frac": 0.6748038361,
"autogenerated": false,
"ratio": 3.688102893890675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48629067299906753,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"Connection",
"ProtocolError",
"receive",
"send",
]
import asyncio
import gc
import struct
from . import core
class ProtocolError(Exception):
pass
class Connection:
def __init__(self, reader, writer):
self._peer = core.Peer()
self._reader = reader
self._writer = writer
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def close(self):
self._writer.close()
@asyncio.coroutine
def receive(self):
obj = yield from receive(self._peer, self._reader)
return obj
@asyncio.coroutine
def send(self, obj):
yield from send(self._peer, self._writer, obj)
@asyncio.coroutine
def receive(peer, reader):
while True:
try:
data = yield from reader.readexactly(4)
except asyncio.IncompleteReadError as e:
if e.partial:
raise
else:
return None
size, = struct.unpack(b"<I", data)
if size < 4:
raise ProtocolError()
data = yield from reader.readexactly(size - 4)
obj = core.unmarshal(peer, data)
if obj is not None:
return obj
@asyncio.coroutine
def send(peer, writer, obj):
gc.collect()
buf = bytearray(4)
core.marshal(peer, buf, obj)
buf[:4] = struct.pack(b"<I", len(buf))
writer.write(buf)
yield from writer.drain()
| {
"repo_name": "tsavola/tap",
"path": "tap/io.py",
"copies": "1",
"size": "1226",
"license": "bsd-2-clause",
"hash": 1131255553685374000,
"line_mean": 16.0277777778,
"line_max": 52,
"alpha_frac": 0.6688417618,
"autogenerated": false,
"ratio": 2.9400479616306954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41088897234306954,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'Terminated',
'Unavailable',
'client',
'server',
]
import logging
import time
import curio
import nanomsg as nn
from garage import asyncs
from garage.assertions import ASSERT
from garage.asyncs import futures
from garage.asyncs import queues
LOG = logging.getLogger(__name__)
class Terminated(Exception):
"""Client agent is terminated."""
class Unavailable(Exception):
"""Service is unavailable."""
def _transform_error(exc):
if isinstance(exc, curio.TaskTimeout):
new_exc = Unavailable()
new_exc.__cause__ = exc
return new_exc
elif isinstance(exc, (nn.EBADF, queues.Closed)):
new_exc = Terminated()
new_exc.__cause__ = exc
return new_exc
else:
return exc
async def client(graceful_exit, sockets, request_queue, timeout=None):
"""Act as client-side in the reqrep protocol.
NOTE: Because we want end-to-end functionality (non-raw sockets), a
socket can only handle one request at a time; to overcome this, we
use a pool of sockets.
In additional to handling requests, this waits for the graceful exit
event and then clean up itself.
When cleaning up, it:
* Close socket so that pump_requests will not send any further
requests.
* Close the queue so that upstream will not enqueue any further
requests.
The requests still in the queue will be "processed", with their
result being set to EBADF, since the socket is closed. This signals
and unblocks all blocked upstream tasks.
"""
for socket in sockets:
ASSERT.equal(socket.options.nn_domain, nn.AF_SP)
ASSERT.equal(socket.options.nn_protocol, nn.NN_REQ)
async def pump_requests(socket):
LOG.info('client: start sending requests to: %s', socket)
while True:
try:
request, response_promise = await request_queue.get()
except queues.Closed:
break
if not response_promise.set_running_or_notify_cancel():
LOG.debug('client: drop request: %r', request)
continue
try:
async with curio.timeout_after(timeout):
await socket.send(request)
with await socket.recv() as message:
response = bytes(message.as_memoryview())
except Exception as exc:
if response_promise.cancelled():
LOG.exception(
'client: err but request is cancelled: %r',
request,
)
else:
response_promise.set_exception(_transform_error(exc))
else:
response_promise.set_result(response)
LOG.info('client: stop sending requests to: %s', socket)
async with asyncs.TaskStack() as stack:
for socket in sockets:
await stack.spawn(pump_requests(socket))
stack.sync_callback(request_queue.close)
for socket in sockets:
stack.sync_callback(socket.close)
await stack.spawn(graceful_exit.wait())
await (await stack.wait_any()).join()
async def server(
graceful_exit,
socket,
request_queue,
timeout=None,
error_handler=None):
"""Act as server-side in the reqrep protocol.
NOTE: error_handler is not asynchronous because you should probably
send back error messages without being blocked indefinitely.
In additional to handling requests, this waits for the graceful exit
event and then clean up itself.
When cleaning up, it:
* Close socket so that the pump_requests will not recv new requests
and will exit.
* Close the queue so that downstream will not dequeue any request.
The requests still in the queue will be dropped (since socket is
closed, their response cannot be sent back to the client).
"""
ASSERT.equal(socket.options.nn_domain, nn.AF_SP_RAW)
ASSERT.equal(socket.options.nn_protocol, nn.NN_REP)
if error_handler is None:
error_handler = lambda *_: None
async def pump_requests(handlers):
LOG.info('server: start receiving requests from: %s', socket)
while True:
try:
message = await socket.recvmsg()
except nn.EBADF:
break
with message:
response_message = nn.Message()
# NOTE: It is important to set control header in the
# response message from the request so that response can
# be correctly routed back to the right sender.
response_message.adopt_control(*message.disown_control())
request = bytes(message.as_memoryview())
# Enqueue request here rather than in handle_request so that
# pump_requests may apply back pressure to socket.
begin_time = time.perf_counter()
try:
response_future = futures.Future()
async with curio.timeout_after(timeout):
await request_queue.put((
request,
response_future.promise(),
))
except Exception as exc:
await on_error(exc, request, response_message)
continue
await handlers.spawn(handle_request(
begin_time,
request,
response_future,
response_message,
))
LOG.info('server: stop receiving requests from: %s', socket)
async def handle_request(
begin_time, request, response_future, response_message):
if timeout is not None:
remaining_time = timeout - (time.perf_counter() - begin_time)
if remaining_time <= 0:
response_future.cancel()
await on_error(
Unavailable(), request, response_message,
exc_info=False,
)
return
else:
remaining_time = None
try:
async with curio.timeout_after(remaining_time), response_future:
response = await response_future.result()
except Exception as exc:
await on_error(exc, request, response_message)
else:
await send_response(request, response, response_message)
async def on_error(exc, request, response_message, *, exc_info=True):
if isinstance(exc, curio.TaskTimeout):
# Timeout is very common is distributed system; whether it
# is an error should be decided at application level, and we
# will just log a warning here.
log = LOG.warning
else:
log = LOG.error
log(
'server: err when processing request: %r',
request, exc_info=exc_info,
)
error_response = error_handler(request, _transform_error(exc))
if error_response is not None:
await send_response(request, error_response, response_message)
async def send_response(request, response, response_message):
response_message.adopt_message(response, len(response), False)
try:
await socket.sendmsg(response_message)
except nn.EBADF:
LOG.debug('server: drop response: %r, %r', request, response)
async def join_handlers(handlers):
async for handler in handlers:
if handler.exception:
LOG.error(
'server: err in request handler',
exc_info=handler.exception,
)
def close_queue():
num_dropped = len(request_queue.close(graceful=False))
if num_dropped:
LOG.info('server: drop %d requests', num_dropped)
async with asyncs.TaskSet() as handlers, asyncs.TaskStack() as stack:
await stack.spawn(join_handlers(handlers))
await stack.spawn(pump_requests(handlers))
stack.sync_callback(close_queue)
stack.sync_callback(socket.close)
await stack.spawn(graceful_exit.wait())
await (await stack.wait_any()).join()
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/asyncs/messaging/reqrep.py",
"copies": "1",
"size": "8257",
"license": "mit",
"hash": 4210575404394658300,
"line_mean": 32.4291497976,
"line_max": 76,
"alpha_frac": 0.5917403415,
"autogenerated": false,
"ratio": 4.480195333695062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010121457489878543,
"num_lines": 247
} |
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
self.soup.object_was_parsed(node.element, parent=self.element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "colobas/gerador-horarios",
"path": "bs4/builder/_html5lib.py",
"copies": "1",
"size": "6730",
"license": "mit",
"hash": -246184191534504060,
"line_mean": 29.3153153153,
"line_max": 150,
"alpha_frac": 0.7230312036,
"autogenerated": false,
"ratio": 3.2829268292682925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45059580328682924,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
TreeBuilder,
XML)
from bs4.dammit import UnicodeDammit
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
@property
def default_parser(self):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
return etree.XMLParser(target=self, strip_cdata=False, recover=True)
def __init__(self, parser=None, empty_element_tags=None):
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
if parser is None:
# Use the default parser.
parser = self.default_parser
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False)
self.parser = parser
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 3-tuple (markup, original encoding, encoding
declared within markup).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
self.parser.feed(data)
while data != '':
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if data != '':
self.parser.feed(data)
self.parser.close()
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
@property
def default_parser(self):
return etree.HTMLParser
def feed(self, markup):
self.parser.feed(markup)
self.parser.close()
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| {
"repo_name": "colobas/gerador-horarios",
"path": "bs4/builder/_lxml.py",
"copies": "1",
"size": "6043",
"license": "mit",
"hash": 1043003929548628500,
"line_mean": 29.3668341709,
"line_max": 76,
"alpha_frac": 0.7099122952,
"autogenerated": false,
"ratio": 3.2212153518123667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4431127647012366,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.