_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q15900 | observer | train | def observer(names_or_instance, names=None, func=None, change_only=False):
"""Specify a callback function that will fire on Property value change
Observer functions on a HasProperties class fire after the observed
Property or Properties have been changed (unlike validator functions
that fire on set before the value is changed).
You can use this method as a decorator inside a HasProperties class
.. code::
@properties.observer('variable_name')
def callback_function(self, change):
print(change)
or you can use it to register a function to a single HasProperties
instance
.. code::
properties.observer(my_has_props, 'variable_name', callback_function)
The variable name must refer to a Property name on the HasProperties
class. A list of Property names may also be used; the same
callback function will fire when any of these Properties change. Also,
:class:`properties.everything <properties.utils.Sentinel>` may | python | {
"resource": ""
} |
q15901 | validator | train | def validator(names_or_instance, names=None, func=None):
"""Specify a callback function to fire on class validation OR property set
This function has two modes of operation:
1. Registering callback functions that validate Property values when
they are set, before the change is saved to the HasProperties instance.
This mode is very similar to the :code:`observer` function.
2. Registering callback functions that fire only when the HasProperties
:code:`validate` method is called. This allows for cross-validation
of Properties that should only fire when all required Properties are
set.
**Mode 1:**
Validator functions on a HasProperties class fire on set but before the
observed Property or Properties have been changed (unlike observer
functions that fire after the value has been changed).
You can use this method as a decorator inside a HasProperties class
.. code::
@properties.validator('variable_name')
def callback_function(self, change):
print(change)
or you can use it to register a function to a single HasProperties
instance
.. code::
properties.validator(my_has_props, 'variable_name', callback_function)
The variable name must refer to a Property name on the HasProperties
class. A list of Property names may also be used; the same
callback function will fire when any of these Properties change. Also,
:class:`properties.everything <properties.utils.Sentinel>` may be
specified instead of the variable name. In that case, the callback
function will fire when any Property changes.
The callback function must take two arguments. The first is the
HasProperties instance; the second is the change notification dictionary.
This dictionary contains:
* 'name' - the name of the changed Property
* 'previous' - the value of the Property | python | {
"resource": ""
} |
q15902 | build_from_bases | train | def build_from_bases(bases, classdict, attr, attr_dict):
"""Helper function to build private HasProperties attributes"""
output = OrderedDict()
output_keys = set()
all_bases = []
# Go through the bases from furthest to nearest ancestor
for base in reversed(bases):
# Only keep the items that are still defined on the bases
if base is not object and isinstance(base, PropertyMetaclass):
output_keys = output_keys.union(getattr(base, attr))
# Collect all bases so we ensure overridden items are assigned
# in the correct order | python | {
"resource": ""
} |
q15903 | HasProperties._reset | train | def _reset(self, name=None):
"""Revert specified property to default value
If no property is specified, all properties are returned to default.
"""
if name is None:
for key in self._props:
if isinstance(self._props[key], basic.Property):
self._reset(key)
return
if name not in self._props:
raise AttributeError("Input name '{}' is not a known "
"property or attribute".format(name))
if not isinstance(self._props[name], basic.Property):
| python | {
"resource": ""
} |
q15904 | HasProperties.validate | train | def validate(self):
"""Call all registered class validator methods
These are all methods decorated with :code:`@properties.validator`.
Validator methods are expected to raise a ValidationError if they
fail.
"""
if getattr(self, '_getting_validated', False):
return True
self._getting_validated = True
self._validation_error_tuples = []
self._non_validation_error = None
try:
for val in itervalues(self._class_validators):
try:
if isinstance(val.func, string_types):
valid = getattr(self, val.func)()
else:
valid = val.func(self)
if valid is False:
raise utils.ValidationError(
'Validation failed', None, None, self
)
except utils.ValidationError as val_err:
self._validation_error_tuples += val_err.error_tuples
except GENERIC_ERRORS as err:
| python | {
"resource": ""
} |
q15905 | HasProperties._deserialize_class | train | def _deserialize_class(cls, input_cls_name, trusted, strict):
"""Returns the HasProperties class to use for deserialization"""
if not input_cls_name or input_cls_name == cls.__name__:
return cls
| python | {
"resource": ""
} |
q15906 | BaseTask.report_status | train | def report_status(self, status):
"""Hook for reporting the task status towards completion"""
status = Instance('', TaskStatus).validate(None, status)
| python | {
"resource": ""
} |
q15907 | HasUID.serialize | train | def serialize(self, include_class=True, save_dynamic=False, **kwargs):
"""Serialize nested HasUID instances to a flat dictionary
**Parameters**:
* **include_class** - If True (the default), the name of the class
will also be saved to the serialized dictionary under key
:code:`'__class__'`
* **save_dynamic** - If True, dynamic properties are written to
the serialized dict (default: False).
* You may also specify a **registry** - This is the flat dictionary
where UID/HasUID pairs are stored. By default, no registry need
be provided; a new dictionary will be created.
* Any other keyword arguments will be passed through to the Property
serializers.
"""
registry = kwargs.pop('registry', None)
if registry is None:
registry = dict()
if not registry:
root = True
registry.update({'__root__': self.uid})
| python | {
"resource": ""
} |
q15908 | HasUID.deserialize | train | def deserialize(cls, value, trusted=False, strict=False,
assert_valid=False, **kwargs):
"""Deserialize nested HasUID instance from flat pointer dictionary
**Parameters**
* **value** - Flat pointer dictionary produced by :code:`serialize`
with UID/HasUID key/value pairs. It also includes a
:code:`__root__` key to specify the root HasUID instance.
* **trusted** - If True (and if the input dictionaries have
:code:`'__class__'` keyword and this class is in the registry), the
new **HasProperties** class will come from the dictionary.
If False (the default), only the **HasProperties** class this
method is called on will be constructed.
* **strict** - Requires :code:`'__class__'`, if present on the input
dictionary, to match the deserialized instance's class. Also
disallows unused properties in the input dictionary. Default
is False.
* **assert_valid** - Require deserialized instance to be valid.
Default is False.
* You may also specify an alternative **root** - This allows a different
HasUID root instance to be specified. It overrides :code:`__root__`
in the input dictionary.
* Any other keyword arguments will be passed through to the Property
deserializers.
.. note::
HasUID instances are constructed with no input arguments
(ie :code:`cls()` is called). This means deserialization will
fail if the init method has been overridden to require
input parameters.
"""
registry = kwargs.pop('registry', None)
if registry is None:
if not isinstance(value, dict):
raise ValueError('HasUID must deserialize from dictionary')
registry = value.copy()
uid = kwargs.get('root', registry.get('__root__'))
else:
uid = value
if uid in cls._INSTANCES and uid not in registry:
| python | {
"resource": ""
} |
q15909 | Pointer.deserialize | train | def deserialize(self, value, **kwargs):
"""Deserialize instance from JSON value
If a deserializer is registered, that is used. Otherwise, if the
instance_class is a HasProperties subclass, an instance can be
deserialized from a dictionary.
"""
kwargs.update({'trusted': kwargs.get('trusted', False)})
| python | {
"resource": ""
} |
q15910 | ImagePNG.validate | train | def validate(self, instance, value):
"""Checks if value is an open PNG file, valid filename, or png.Image
Returns an open bytestream of the image
"""
# Pass if already validated
if getattr(value, '__valid__', False):
return value
# Validate that value is PNG
if isinstance(value, png.Image):
pass
else:
value = super(ImagePNG, self).validate(instance, value)
try:
png.Reader(value).validate_signature()
except png.FormatError:
self.error(instance, value, extra='Open file is not PNG.')
| python | {
"resource": ""
} |
q15911 | ImagePNG.to_json | train | def to_json(value, **kwargs):
"""Convert a PNG Image to base64-encoded JSON
to_json assumes that value has passed validation.
| python | {
"resource": ""
} |
q15912 | ImagePNG.from_json | train | def from_json(value, **kwargs):
"""Convert a PNG Image from base64-encoded JSON"""
if not value.startswith(PNG_PREAMBLE):
raise ValueError('Not a valid base64-encoded PNG image')
infile = BytesIO()
| python | {
"resource": ""
} |
q15913 | validate | train | def validate(schema, value, noun='value'):
"""
Checks the value against the schema, and raises ValidationError if validation
fails.
"""
errors = schema.errors(value)
if errors:
error_details = ''
for error in errors:
if error.pointer:
| python | {
"resource": ""
} |
q15914 | validate_call | train | def validate_call(kwargs, returns, is_method=False):
"""
Decorator which runs validation on a callable's arguments and its return
value. Pass a schema for the kwargs and for the return value. Positional
arguments are not supported.
"""
def decorator(func):
@wraps(func)
def inner(*passed_args, **passed_kwargs):
# Enforce no positional args
# first argument of instance method and class method is always positonal so we need
# to make expception for them. Static methods are still validated according to standard rules
# this check happens before methods are bound, so instance method is still a regular function | python | {
"resource": ""
} |
q15915 | PattonResults.dump | train | def dump(self):
"""Dump to file"""
# NO Dump file selected -> DO NOTHING
if self.running_config.output_file:
# Determinate file format
_, extension = op.splitext(self.running_config.output_file)
extension = extension.replace(".", "")
if extension not in self.ALLOWED_DUMP_FORMATS:
raise PCException(
f"Extension of dump file is not available. "
f"Allowed extensions are: "
f"{', '.join(self.ALLOWED_DUMP_FORMATS)}")
with open(self.running_config.output_file, "w") as f:
if extension == "csv":
csv_writer = csv.writer(f)
csv_writer.writerow(("# Name",
"CPE",
| python | {
"resource": ""
} |
q15916 | on_init | train | def on_init(app): # pylint: disable=unused-argument
"""
Run sphinx-apidoc and swg2rst after Sphinx initialization.
Read the Docs won't run tox or custom shell commands, so we need this to
avoid checking in the generated reStructuredText files.
"""
docs_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.abspath(os.path.join(docs_path, '..'))
apidoc_path = 'sphinx-apidoc'
swg2rst_path = 'swg2rst'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
bin_path = os.path.abspath(os.path.join(sys.prefix, 'bin'))
apidoc_path = os.path.join(bin_path, apidoc_path)
swg2rst_path = os.path.join(bin_path, swg2rst_path)
| python | {
"resource": ""
} |
q15917 | _get_or_create_group_parent | train | def _get_or_create_group_parent(message_body, user_id):
"""
Determine if the given task belongs to a group or not, and if so, get or create a status record for the group.
Arguments:
message_body (dict): The body of the before_task_publish signal for the task in question
user_id (int): The primary key of the user model record for the user who triggered the task.
(If using a custom user model, this may not be an integer.)
Returns
-------
UserTaskStatus: The status record for the containing group, or `None` if there isn't one
"""
parent_id = message_body.get('taskset', None)
if not parent_id:
# Not part of a group
return None
| python | {
"resource": ""
} |
q15918 | _get_user_id | train | def _get_user_id(arguments_dict):
"""
Get and validate the `user_id` argument to a task derived from `UserTaskMixin`.
Arguments:
arguments_dict (dict): The parsed positional and keyword arguments to the task
Returns
-------
int: The primary key of a user record (may not be an int if using a custom user model)
"""
if 'user_id' not in arguments_dict:
raise | python | {
"resource": ""
} |
q15919 | colorbar | train | def colorbar(height, length, colormap):
"""Return the channels of a colorbar.
"""
cbar = np.tile(np.arange(length) * 1.0 / (length - 1), (height, 1))
cbar = (cbar * (colormap.values.max() - | python | {
"resource": ""
} |
q15920 | palettebar | train | def palettebar(height, length, colormap):
"""Return the channels of a palettebar.
"""
cbar = np.tile(np.arange(length) * 1.0 / (length - 1), (height, 1))
cbar = (cbar * (colormap.values.max() + | python | {
"resource": ""
} |
q15921 | Colormap.to_rio | train | def to_rio(self):
"""Converts the colormap to a rasterio colormap.
"""
self.colors = (((self.colors * 1.0 | python | {
"resource": ""
} |
q15922 | StatusViewSet.cancel | train | def cancel(self, request, *args, **kwargs): # pylint: disable=unused-argument
"""
Cancel the task associated with the specified status record.
Arguments:
request (Request): A POST including a task status record | python | {
"resource": ""
} |
q15923 | swagger | train | def swagger(request): # pylint: disable=unused-argument
"""
Render Swagger UI and the underlying Open API | python | {
"resource": ""
} |
q15924 | ConditionalOpenAPIRenderer.render | train | def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the appropriate Open API JSON file.
"""
if 'SWAGGER_JSON_PATH' in os.environ:
with io.open(os.environ['SWAGGER_JSON_PATH'], 'rb') as f:
return | python | {
"resource": ""
} |
q15925 | add_rules | train | def add_rules():
"""
Use the rules provided in this module to implement authorization checks for the ``django-user-tasks`` models.
These rules allow only superusers and the user who triggered a task to view its status or artifacts, cancel the
task, or delete the status information and all its related artifacts. Only superusers are allowed to directly
modify or delete an artifact (or to | python | {
"resource": ""
} |
q15926 | ArtifactFilterBackend.filter_queryset | train | def filter_queryset(self, request, queryset, view):
"""
Filter out any artifacts which the requesting user does not have permission to view.
"""
| python | {
"resource": ""
} |
q15927 | _image2array | train | def _image2array(filepath):
'''
Utility function that converts an image file in 3 np arrays
that can be fed into geo_image.GeoImage in order to generate
a PyTROLL GeoImage object.
'''
im = Pimage.open(filepath).convert('RGB')
(width, height) = im.size
_r = np.array(list(im.getdata(0)))/255.0
| python | {
"resource": ""
} |
q15928 | ycbcr2rgb | train | def ycbcr2rgb(y__, cb_, cr_):
"""Convert the three YCbCr channels to RGB channels.
"""
kb_ = 0.114
| python | {
"resource": ""
} |
q15929 | Image._add_channel | train | def _add_channel(self, chn, color_min, color_max):
"""Adds a channel to the image object
"""
if isinstance(chn, np.ma.core.MaskedArray):
chn_data = chn.data
| python | {
"resource": ""
} |
q15930 | Image.is_empty | train | def is_empty(self):
"""Checks for an empty image.
"""
if(((self.channels == []) and (not self.shape == (0, 0))) or | python | {
"resource": ""
} |
q15931 | Image._pngmeta | train | def _pngmeta(self):
"""It will return GeoImage.tags as a PNG metadata object.
Inspired by:
public domain, Nick Galbreath
http://blog.modp.com/2007/08/python-pil-and-png-metadata-take-2.html
"""
reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')
try:
tags = self.tags
except AttributeError:
tags = {}
# Undocumented class
| python | {
"resource": ""
} |
q15932 | Image._rgb2ycbcr | train | def _rgb2ycbcr(self, mode):
"""Convert the image from RGB mode to YCbCr."""
self._check_modes(("RGB", "RGBA"))
(self.channels[0], self.channels[1], self.channels[2]) = \
rgb2ycbcr(self.channels[0],
self.channels[1],
self.channels[2])
if self.fill_value is not None:
self.fill_value[0:3] | python | {
"resource": ""
} |
q15933 | Image._ycbcr2rgb | train | def _ycbcr2rgb(self, mode):
"""Convert the image from YCbCr mode to RGB.
"""
self._check_modes(("YCbCr", "YCbCrA"))
(self.channels[0], self.channels[1], self.channels[2]) = \
ycbcr2rgb(self.channels[0],
self.channels[1],
self.channels[2])
if self.fill_value is not None:
self.fill_value[0:3] | python | {
"resource": ""
} |
q15934 | Image._to_p | train | def _to_p(self, mode):
"""Convert the image to P or PA mode.
"""
if self.mode.endswith("A"):
chans = self.channels[:-1]
alpha = self.channels[-1]
self._secondary_mode = self.mode[:-1]
else:
chans = self.channels
alpha = None
self._secondary_mode = self.mode
palette = []
selfmask = chans[0].mask
for chn in chans[1:]:
selfmask = np.ma.mask_or(selfmask, chn.mask)
new_chn = np.ma.zeros(self.shape, dtype=int)
color_nb = 0
for i in range(self.height):
for j in range(self.width):
current_col = tuple([chn[i, j] for chn in chans])
try:
next(idx
| python | {
"resource": ""
} |
q15935 | Image._from_p | train | def _from_p(self, mode):
"""Convert the image from P or PA mode.
"""
self._check_modes(("P", "PA"))
if self.mode.endswith("A"):
alpha = self.channels[-1]
else:
alpha = None
chans = []
cdfs = []
color_chan = self.channels[0]
for i in range(len(self.palette[0])):
cdfs.append(np.zeros(len(self.palette)))
for j in range(len(self.palette)):
cdfs[i][j] = self.palette[j][i]
new_chn = np.ma.array(np.interp(color_chan,
np.arange(len(self.palette)),
cdfs[i]),
mask=color_chan.mask)
chans.append(new_chn)
| python | {
"resource": ""
} |
q15936 | Image._rgb2l | train | def _rgb2l(self, mode):
"""Convert from RGB to monochrome L.
"""
self._check_modes(("RGB", "RGBA"))
kb_ = 0.114
kr_ = 0.299
r__ = self.channels[0]
g__ = self.channels[1]
b__ = self.channels[2]
y__ = kr_ * r__ + (1 - kr_ - kb_) * g__ + kb_ * b__
if self.fill_value is not None:
self.fill_value = ([rgb2ycbcr(self.fill_value[0],
| python | {
"resource": ""
} |
q15937 | Image._ycbcr2l | train | def _ycbcr2l(self, mode):
"""Convert from YCbCr to L.
"""
self._check_modes(("YCbCr", "YCbCrA"))
self.channels = [self.channels[0]] + self.channels[3:]
if self.fill_value is not | python | {
"resource": ""
} |
q15938 | Image._l2ycbcr | train | def _l2ycbcr(self, mode):
"""Convert from L to YCbCr.
"""
self._check_modes(("L", "LA"))
luma = self.channels[0]
zeros = np.ma.zeros(luma.shape)
zeros.mask = luma.mask
self.channels = [luma, zeros, zeros] + self.channels[1:]
| python | {
"resource": ""
} |
q15939 | rgb2xyz | train | def rgb2xyz(r__, g__, b__):
"""RGB to XYZ
"""
r2_ = r__ / 255.0
g2_ = g__ / 255.0
b2_ = b__ / 255.0
def f__(arr):
"""Forward
"""
return np.where(arr > 0.04045,
((arr + 0.055) / 1.055) ** 2.4,
arr / 12.92)
r2_ = f__(r2_) * 100
g2_ = f__(g2_) * 100
b2_ = f__(b2_) * 100
| python | {
"resource": ""
} |
q15940 | xyz2rgb | train | def xyz2rgb(x__, y__, z__):
"""XYZ colorspace to RGB
"""
x2_ = x__ / 100.0
y2_ = y__ / 100.0
z2_ = z__ / 100.0
r__ = x2_ * 3.2406 + y2_ * -1.5372 + z2_ * -0.4986
g__ = x2_ * -0.9689 + y2_ * 1.8758 + z2_ * 0.0415
b__ = x2_ * 0.0557 + y2_ * -0.2040 + z2_ * 1.0570
def finv(arr):
| python | {
"resource": ""
} |
q15941 | purge_old_user_tasks | train | def purge_old_user_tasks():
"""
Delete any UserTaskStatus and UserTaskArtifact records older than ``settings.USER_TASKS_MAX_AGE``.
Intended to be run as a scheduled task.
"""
limit = | python | {
"resource": ""
} |
q15942 | color_interp | train | def color_interp(data):
"""Get the color interpretation for this image."""
from rasterio.enums import ColorInterp as ci
modes = {'L': [ci.gray],
'LA': [ci.gray, ci.alpha],
'YCbCr': [ci.Y, ci.Cb, ci.Cr],
'YCbCrA': [ci.Y, ci.Cb, ci.Cr, ci.alpha]}
try:
mode = ''.join(data['bands'].values)
| python | {
"resource": ""
} |
q15943 | XRImage._correct_dims | train | def _correct_dims(data):
"""Standardize dimensions to bands, y, and x."""
if not hasattr(data, 'dims'):
raise TypeError("Data must have a 'dims' attribute.")
# doesn't actually copy the data underneath
# we don't want our operations to change the user's data
data = data.copy()
if 'y' not in data.dims or 'x' not in data.dims:
if data.ndim != 2:
raise ValueError("Data must have a 'y' and 'x' dimension")
# rename dimensions so we can use them
# don't rename 'x' or 'y' if they already exist
if 'y' not in data.dims:
# find a dimension that isn't 'x'
old_dim = [d for d in data.dims if d != 'x'][0]
data = data.rename({old_dim: 'y'})
if 'x' not in data.dims:
| python | {
"resource": ""
} |
q15944 | XRImage._create_alpha | train | def _create_alpha(self, data, fill_value=None):
"""Create an alpha band DataArray object.
If `fill_value` is provided and input data is an integer type
then it is used to determine invalid "null" pixels instead of
xarray's `isnull` and `notnull` methods.
The returned array is 1 where data is valid, 0 where invalid.
"""
not_alpha = [b for b in data.coords['bands'].values if b != 'A']
null_mask = data.sel(bands=not_alpha)
if np.issubdtype(data.dtype, np.integer) and fill_value is not None:
null_mask = null_mask != fill_value
else:
| python | {
"resource": ""
} |
q15945 | XRImage._add_alpha | train | def _add_alpha(self, data, alpha=None):
"""Create an alpha channel and concatenate it to the provided data.
If ``data`` is an integer type then the alpha band will be scaled
to use the smallest (min) value as fully transparent and the largest
(max) value as fully opaque. For float types the alpha band spans
0 to 1.
"""
null_mask = alpha if alpha is not None else self._create_alpha(data)
# if we are using integer data, then alpha needs to be min-int to max-int
# otherwise for floats we want | python | {
"resource": ""
} |
q15946 | XRImage._scale_to_dtype | train | def _scale_to_dtype(self, data, dtype):
"""Scale provided data to dtype range assuming a 0-1 range.
Float input data is assumed to be normalized to a 0 to 1 range.
Integer input data is not scaled, only clipped. A float output
type is not scaled since both outputs and inputs are assumed to
be in the 0-1 range already.
"""
if np.issubdtype(dtype, np.integer):
if np.issubdtype(data, np.integer):
# preserve integer data type
| python | {
"resource": ""
} |
q15947 | XRImage._from_p | train | def _from_p(self, mode):
"""Convert the image from P or PA to RGB or RGBA."""
self._check_modes(("P", "PA"))
if not self.palette:
raise RuntimeError("Can't convert palettized image, missing palette.")
pal = np.array(self.palette)
pal = da.from_array(pal, chunks=pal.shape)
if pal.shape[1] == 4:
# colormap's alpha overrides data alpha
mode = "RGBA"
alpha = None
elif self.mode.endswith("A"):
# add a new/fake 'bands' dimension to the end
alpha = self.data.sel(bands="A").data[..., None]
mode = mode + "A" if not mode.endswith("A") else mode
else:
alpha = None
flat_indexes = self.data.sel(bands='P').data.ravel().astype('int64')
dim_sizes = ((key, val) for key, | python | {
"resource": ""
} |
q15948 | XRImage._finalize | train | def _finalize(self, fill_value=None, dtype=np.uint8, keep_palette=False, cmap=None):
"""Wrapper around 'finalize' method for backwards compatibility."""
import warnings
| python | {
"resource": ""
} |
q15949 | XRImage.finalize | train | def finalize(self, fill_value=None, dtype=np.uint8, keep_palette=False, cmap=None):
"""Finalize the image to be written to an output file.
This adds an alpha band or fills data with a fill_value (if specified).
It also scales float data to the output range of the data type (0-255
for uint8, default). For integer input data this method assumes the
data is already scaled to the proper desired range. It will still fill
in invalid values and add an alpha band if needed. Integer input
data's fill value is determined by a special ``_FillValue`` attribute
in the ``DataArray`` ``.attrs`` dictionary.
"""
if keep_palette and not self.mode.startswith('P'):
keep_palette = False
if not keep_palette:
if self.mode == "P":
return self.convert("RGB").finalize(fill_value=fill_value, dtype=dtype,
keep_palette=keep_palette, cmap=cmap)
if self.mode == "PA":
return self.convert("RGBA").finalize(fill_value=fill_value, dtype=dtype,
keep_palette=keep_palette, cmap=cmap)
if np.issubdtype(dtype, np.floating) and fill_value is None:
| python | {
"resource": ""
} |
q15950 | XRImage.xrify_tuples | train | def xrify_tuples(self, tup):
"""Make xarray.DataArray from tuple."""
return xr.DataArray(tup,
| python | {
"resource": ""
} |
q15951 | XRImage.gamma | train | def gamma(self, gamma=1.0):
"""Apply gamma correction to the channels of the image.
If *gamma* is a
tuple, then it should have as many elements as the channels of the
image, and the gamma correction is applied elementwise. If *gamma* is a
number, the same gamma correction is applied on every channel, if there
are several channels in the image. The behaviour of :func:`gamma` is
undefined outside the normal [0,1] range of the channels.
"""
if isinstance(gamma, (list, tuple)):
gamma = | python | {
"resource": ""
} |
q15952 | XRImage.stretch | train | def stretch(self, stretch="crude", **kwargs):
"""Apply stretching to the current image.
The value of *stretch* sets the type of stretching applied. The values
"histogram", "linear", "crude" (or "crude-stretch") perform respectively
histogram equalization, contrast stretching (with 5% cutoff on both
sides), and contrast stretching without cutoff. The value "logarithmic"
or "log" will do a logarithmic enhancement towards white. If a tuple or
a list of two values is given as input, then a contrast stretching is
performed with the values as cutoff. These values should be normalized
in the range [0.0,1.0].
"""
logger.debug("Applying stretch %s with parameters %s",
stretch, str(kwargs))
# FIXME: do not apply stretch to alpha channel
if isinstance(stretch, (tuple, list)):
if len(stretch) == 2:
self.stretch_linear(cutoffs=stretch)
else:
raise ValueError(
| python | {
"resource": ""
} |
q15953 | XRImage._compute_quantile | train | def _compute_quantile(data, dims, cutoffs):
"""Helper method for stretch_linear.
Dask delayed functions need to be non-internal functions (created
inside a function) to be serializable on a multi-process scheduler.
Quantile requires the data to be loaded since it not supported on
dask arrays yet. | python | {
"resource": ""
} |
q15954 | XRImage.stretch_linear | train | def stretch_linear(self, cutoffs=(0.005, 0.005)):
"""Stretch linearly the contrast of the current image.
Use *cutoffs* for left and right trimming.
"""
logger.debug("Perform a linear contrast stretch.")
logger.debug("Calculate the histogram quantiles: ")
logger.debug("Left and right quantiles: " +
str(cutoffs[0]) + " " + str(cutoffs[1]))
cutoff_type = np.float64
# numpy percentile (which quantile calls) returns 64-bit floats
# unless the value is a higher order float
if np.issubdtype(self.data.dtype, np.floating) and \
np.dtype(self.data.dtype).itemsize > 8:
cutoff_type = self.data.dtype
left, right = dask.delayed(self._compute_quantile, nout=2)(self.data.data, self.data.dims, cutoffs)
left_data = da.from_delayed(left,
shape=(self.data.sizes['bands'],),
dtype=cutoff_type)
left | python | {
"resource": ""
} |
q15955 | XRImage.crude_stretch | train | def crude_stretch(self, min_stretch=None, max_stretch=None):
"""Perform simple linear stretching.
This is done without any cutoff on the current image and normalizes to
the [0,1] range.
"""
if min_stretch is None:
non_band_dims = tuple(x for x in self.data.dims if x != 'bands')
min_stretch = self.data.min(dim=non_band_dims)
if max_stretch is None:
non_band_dims = tuple(x for x in self.data.dims if x != 'bands')
max_stretch = self.data.max(dim=non_band_dims)
if isinstance(min_stretch, (list, tuple)):
min_stretch = self.xrify_tuples(min_stretch)
if isinstance(max_stretch, (list, tuple)):
| python | {
"resource": ""
} |
q15956 | XRImage.stretch_hist_equalize | train | def stretch_hist_equalize(self, approximate=False):
"""Stretch the current image's colors through histogram equalization.
Args:
approximate (bool): Use a faster less-accurate percentile
calculation. At the time of writing the dask
version of `percentile` is not as accurate as
the numpy version. This will likely change in
the future. Current dask version 0.17.
"""
logger.info("Perform a histogram equalized contrast stretch.")
nwidth = 2048.
logger.debug("Make histogram bins having equal amount of data, " +
"using numpy percentile function:")
def _band_hist(band_data):
cdf = da.arange(0., 1., 1. / nwidth, chunks=nwidth)
if approximate:
# need a 1D array
flat_data = band_data.ravel()
# replace with nanpercentile in the future, if available
# dask < 0.17 returns all NaNs for this
bins = da.percentile(flat_data[da.notnull(flat_data)],
| python | {
"resource": ""
} |
q15957 | XRImage.stretch_weber_fechner | train | def stretch_weber_fechner(self, k, s0):
"""Stretch according to the Weber-Fechner law.
p = k.ln(S/S0)
p is perception, S is the stimulus, S0 | python | {
"resource": ""
} |
q15958 | XRImage.colorize | train | def colorize(self, colormap):
"""Colorize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
if self.mode == "LA":
alpha = self.data.sel(bands=['A'])
else:
alpha = None
l_data = self.data.sel(bands=['L'])
def _colorize(l_data, colormap):
# 'l_data' is (1, rows, cols)
# 'channels' will be a list of 3 (RGB) or 4 (RGBA) arrays
channels = colormap.colorize(l_data)
return np.concatenate(channels, axis=0)
new_data = l_data.data.map_blocks(_colorize, colormap,
chunks=(colormap.colors.shape[1],) + l_data.data.chunks[1:],
dtype=np.float64)
if colormap.colors.shape[1] == | python | {
"resource": ""
} |
q15959 | XRImage.palettize | train | def palettize(self, colormap):
"""Palettize the current image using `colormap`.
.. note::
Works only on "L" or "LA" images.
"""
if self.mode not in ("L", "LA"):
raise ValueError("Image should be grayscale to colorize")
l_data = self.data.sel(bands=['L'])
def _palettize(data):
| python | {
"resource": ""
} |
q15960 | _path_from_env | train | def _path_from_env(variable: str, default: Path) -> Path:
"""Read an environment variable as a path.
The environment variable with the specified name is read, and its
value returned as a path. If the environment variable is not set, or
set to the empty string, the default value is returned.
Parameters
----------
variable : str
Name of the environment variable.
default : Path
Default value.
Returns
-------
Path
| python | {
"resource": ""
} |
q15961 | _paths_from_env | train | def _paths_from_env(variable: str, default: List[Path]) -> List[Path]:
"""Read an environment variable as a list of paths.
The environment variable with the specified name is read, and its
value split on colons and returned as a list of paths. If the
environment variable is not set, or set to the empty string, the
default value is returned.
Parameters
----------
variable : str
Name of the environment variable.
default : List[Path]
Default value.
Returns
-------
| python | {
"resource": ""
} |
q15962 | Basic.add | train | def add(self, user, password):
""" Adds a user with password """
if self.__contains__(user):
raise UserExists
| python | {
"resource": ""
} |
q15963 | Basic.pop | train | def pop(self, user):
""" Deletes a user """
if not | python | {
"resource": ""
} |
q15964 | Basic.change_password | train | def change_password(self, user, password):
""" Changes user password """
if not self.__contains__(user):
raise UserNotExists
| python | {
"resource": ""
} |
q15965 | Basic._encrypt_password | train | def _encrypt_password(self, password):
"""encrypt the password for given mode """
if self.encryption_mode.lower() == 'crypt':
return self._crypt_password(password)
elif self.encryption_mode.lower() == 'md5':
return self._md5_password(password)
elif self.encryption_mode.lower() | python | {
"resource": ""
} |
q15966 | Group.add_user | train | def add_user(self, user, group):
""" Adds user to a group """
if self.is_user_in(user, group):
raise | python | {
"resource": ""
} |
q15967 | Group.delete_user | train | def delete_user(self, user, group):
""" Deletes user from group """
if not self.__contains__(group):
raise GroupNotExists
| python | {
"resource": ""
} |
q15968 | Swauth._get_concealed_token | train | def _get_concealed_token(self, token):
"""Returns hashed token to be used as object name in Swift.
Tokens are stored in auth account but object names are visible in Swift
logs. Object names are hashed from token.
"""
| python | {
"resource": ""
} |
q15969 | Swauth.authorize | train | def authorize(self, req):
"""Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
return self.denied_response(req)
user_groups = (req.remote_user or '').split(',')
if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.':
req.environ['swift_owner'] = True
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
return None
if (req.environ.get('swift_sync_key') and
req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', | python | {
"resource": ""
} |
q15970 | Swauth.denied_response | train | def denied_response(self, req):
"""Returns a standard WSGI response callable with the status of 403 or 401
depending on whether the REMOTE_USER is set or not.
"""
if not hasattr(req, 'credentials_valid'):
req.credentials_valid = None
| python | {
"resource": ""
} |
q15971 | Swauth.is_user_reseller_admin | train | def is_user_reseller_admin(self, req, account, user):
"""Returns True if the user is a .reseller_admin.
:param account: account user is part of
:param user: the user
:returns: True if user .reseller_admin, False
if user is not a reseller_admin and | python | {
"resource": ""
} |
q15972 | Swauth.get_itoken | train | def get_itoken(self, env):
"""Returns the current internal token to use for the auth system's own
actions with other services. Each process will create its own
itoken and the token will be deleted and recreated based on the
token_life configuration value. The itoken information is stored in
memcache because the auth process that is asked by Swift to validate
the token may not be the same as the auth process that created the
token.
"""
if not self.itoken or self.itoken_expires < time() or \
env.get('HTTP_X_AUTH_NEW_TOKEN', 'false').lower() in \
TRUE_VALUES:
self.itoken = '%sitk%s' % (self.reseller_prefix, uuid4().hex)
| python | {
"resource": ""
} |
q15973 | Swauth.get_admin_detail | train | def get_admin_detail(self, req):
"""Returns the dict for the user specified as the admin in the request
with the addition of an `account` key set to the admin user's account.
:param req: The swob request to retrieve X-Auth-Admin-User and
X-Auth-Admin-Key from.
:returns: The dict for the admin user with the addition of the
`account` key.
"""
if ':' not in req.headers.get('x-auth-admin-user', ''):
return None
admin_account, admin_user | python | {
"resource": ""
} |
q15974 | Swauth.get_user_detail | train | def get_user_detail(self, req, account, user):
"""Returns the response body of a GET request for the specified user
The body is in JSON format and contains all user information.
:param req: The swob request
:param account: the account the user is a member of
:param user: the user
:returns: A JSON response with the user detail information, None
if the user doesn't exist
"""
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
| python | {
"resource": ""
} |
q15975 | Swauth.credentials_match | train | def credentials_match(self, user_detail, key):
"""Returns True if the key is valid for the user_detail.
It will use auth_encoder type the password was encoded with,
to check for a key match.
:param user_detail: The dict for the user.
:param key: The key to validate for the user.
:returns: True if the key is valid for the user, False if not.
"""
if user_detail:
creds = user_detail.get('auth')
try:
| python | {
"resource": ""
} |
q15976 | Swauth.is_user_changing_own_key | train | def is_user_changing_own_key(self, req, user):
"""Check if the user is changing his own key.
:param req: The swob.Request to check. This contains x-auth-admin-user
and x-auth-admin-key headers which are credentials of the
user sending the request.
:param user: User whose password is to be changed.
:returns: True if user is changing his own key, False if not.
"""
admin_detail = self.get_admin_detail(req)
if not admin_detail:
# The user does not exist
return False
# If user is not admin/reseller_admin and x-auth-user-admin or
# x-auth-user-reseller-admin headers are present in request, he may be
# attempting to escalate himself as admin/reseller_admin!
if '.admin' not in (g['name'] for g in admin_detail['groups']):
| python | {
"resource": ""
} |
q15977 | Swauth.is_super_admin | train | def is_super_admin(self, req):
"""Returns True if the admin specified in the request represents the
.super_admin.
:param req: The swob.Request to check.
| python | {
"resource": ""
} |
q15978 | Swauth.is_reseller_admin | train | def is_reseller_admin(self, req, admin_detail=None):
"""Returns True if the admin specified in the request represents a
.reseller_admin.
:param req: The swob.Request to check.
:param admin_detail: The previously retrieved dict from
:func:`get_admin_detail` or None for this function
to retrieve the admin_detail itself.
:param returns: True if .reseller_admin.
"""
req.credentials_valid = False
if self.is_super_admin(req):
return True
| python | {
"resource": ""
} |
q15979 | validate_creds | train | def validate_creds(creds):
"""Parse and validate user credentials whether format is right
:param creds: User credentials
:returns: Auth_type class instance and parsed user credentials in dict
:raises ValueError: If credential format is wrong (eg: bad auth_type)
"""
try:
auth_type, auth_rest = creds.split(':', 1)
except ValueError:
raise ValueError("Missing ':' in %s" % creds)
authtypes = sys.modules[__name__]
auth_encoder | python | {
"resource": ""
} |
q15980 | Sha1.encode_w_salt | train | def encode_w_salt(self, salt, key):
"""Encodes a user key with salt into a particular format. The result of
this method will be used internally.
:param salt: Salt for hashing
:param key: User's secret key
:returns: A string representing user credentials
| python | {
"resource": ""
} |
q15981 | Sha1.encode | train | def encode(self, key):
"""Encodes a user key into a particular format. The result of this method
will be used by swauth for storing user credentials.
If salt is not manually set in conf file, a random salt will be
generated and used.
:param key: User's secret key
| python | {
"resource": ""
} |
q15982 | Sha1.match | train | def match(self, key, creds, salt, **kwargs):
"""Checks whether the user-provided key matches the user's credentials
:param key: User-supplied key
:param creds: User's stored credentials
:param salt: Salt for hashing
:param kwargs: Extra keyword args for compatibility reason with
| python | {
"resource": ""
} |
q15983 | Sha1.validate | train | def validate(self, auth_rest):
"""Validate user credentials whether format is right for Sha1
:param auth_rest: User credentials' part without auth_type
:return: Dict with a hash and a salt part of user credentials
:raises ValueError: If credentials' part doesn't contain delimiter
between a salt and a hash.
"""
try:
auth_salt, auth_hash = auth_rest.split('$')
except ValueError:
raise ValueError("Missing '$' in %s" % auth_rest)
| python | {
"resource": ""
} |
q15984 | ElecSlp.get_profile | train | def get_profile(self, ann_el_demand_per_sector):
""" Get the profiles for the given annual demand
Parameters
----------
ann_el_demand_per_sector : dictionary
Key: sector, value: annual value
Returns
------- | python | {
"resource": ""
} |
q15985 | HeatBuilding.weighted_temperature | train | def weighted_temperature(self, how='geometric_series'):
r"""
A new temperature vector is generated containing a multi-day
average temperature as needed in the load profile function.
Parameters
----------
how : string
string which type to return ("geometric_series" or "mean")
Notes
-----
Equation for the mathematical series of the average
tempaerature [1]_:
.. math::
T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+
0.125\cdot T_{D-3}}{1+0.5+0.25+0.125}
with :math:`T_D` = Average temperature on the present day
:math:`T_{D-i}` = Average temperature on the day - i
References
----------
.. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_,
BDEW | python | {
"resource": ""
} |
q15986 | HeatBuilding.get_temperature_interval | train | def get_temperature_interval(self):
"""Appoints the corresponding temperature interval to each temperature
in the temperature vector.
"""
intervals = ({
-20: 1, -19: 1, -18: 1, -17: 1, -16: 1, -15: 1, -14: 2,
-13: 2, -12: 2, -11: 2, -10: 2, -9: 3, -8: 3, -7: 3, -6: 3, -5: 3,
-4: 4, -3: 4, -2: 4, -1: 4, 0: 4, 1: 5, 2: 5, 3: 5, 4: 5, 5: 5,
6: 6, 7: 6, 8: 6, 9: 6, 10: 6, 11: 7, 12: 7, 13: 7, 14: 7, 15: 7,
16: 8, 17: 8, 18: 8, 19: 8, 20: 8, 21: 9, 22: 9, 23: 9, 24: | python | {
"resource": ""
} |
q15987 | HeatBuilding.get_sf_values | train | def get_sf_values(self, filename='shlp_hour_factors.csv'):
""" Determine the h-values
Parameters
----------
filename : string
name of file where sigmoid factors are stored
"""
file = os.path.join(self.datapath, filename)
hour_factors = pd.read_csv(file, index_col=0)
hour_factors = hour_factors.query(
'building_class=={0} and shlp_type=="{1}"'.format(
self.building_class, self.shlp_type))
# Join the two DataFrames on the columns 'hour' and 'hour_of_the_day'
# or ['hour' 'weekday'] and ['hour_of_the_day', 'weekday'] if it is
# not a residential slp.
| python | {
"resource": ""
} |
q15988 | HeatBuilding.get_sigmoid_parameters | train | def get_sigmoid_parameters(self, filename='shlp_sigmoid_factors.csv'):
""" Retrieve the sigmoid parameters from csv-files
Parameters
----------
filename : string
name of file where sigmoid factors are stored
"""
file = os.path.join(self.datapath, filename)
sigmoid = pd.read_csv(file, index_col=0)
sigmoid = sigmoid.query(
'building_class=={0} and '.format(self.building_class) +
| python | {
"resource": ""
} |
q15989 | HeatBuilding.get_weekday_parameters | train | def get_weekday_parameters(self, filename='shlp_weekday_factors.csv'):
""" Retrieve the weekday parameter from csv-file
Parameters
----------
filename : string
name of file where sigmoid factors are stored
"""
file = os.path.join(self.datapath, filename)
f_df = pd.read_csv(file, index_col=0)
tmp_df = f_df.query('shlp_type=="{0}"'.format(self.shlp_type)).drop(
| python | {
"resource": ""
} |
q15990 | HeatBuilding.get_normalized_bdew_profile | train | def get_normalized_bdew_profile(self):
""" Calculation of the normalized hourly heat demand
"""
self.df['temperature'] = self.temperature.values
self.df['temperature_geo'] = self.weighted_temperature(
how='geometric_series')
sf = self.get_sf_values()
[a, b, c, d] = self.get_sigmoid_parameters()
| python | {
"resource": ""
} |
q15991 | rename | train | def rename(oldPath, newPath, **kwargs):
"""rename the file oldPath to newPath"""
import os | python | {
"resource": ""
} |
q15992 | chown | train | def chown(path, user=None, group=None):
"""change ownership of path"""
import os
import pwd
import grp
uid = pwd.getpwnam(user).pw_uid if user else -1
| python | {
"resource": ""
} |
q15993 | chmod | train | def chmod(path, mode):
"""change pernmissions of path"""
import os, stat
| python | {
"resource": ""
} |
q15994 | exists | train | def exists(path, **kwargs):
"""Check if file or directory exists"""
import os.path
| python | {
"resource": ""
} |
q15995 | get | train | def get(path):
"""Read an object from file"""
try:
import cPickle as pickle
except:
import pickle
| python | {
"resource": ""
} |
q15996 | put | train | def put(path, obj):
"""Write an object to file"""
try:
import cPickle as pickle
except:
import pickle
| python | {
"resource": ""
} |
q15997 | join | train | def join(*args, **kwargs):
"""Join parts of a path together"""
import os.path | python | {
"resource": ""
} |
q15998 | add_weekdays2df | train | def add_weekdays2df(time_df, holidays=None, holiday_is_sunday=False):
r"""Giving back a DataFrame containing weekdays and optionally holidays for
the given year.
Parameters
----------
time_df : pandas DataFrame
DataFrame to which the weekdays should be added
Optional Parameters
-------------------
holidays : array with information for every hour of the year, if holiday or
not (0: holiday, 1: no holiday)
holiday_is_sunday : boolean
If set to True, all holidays (0) will be set to sundays (7).
Returns
-------
pandas.DataFrame : DataFrame with weekdays
Notes
-----
Using Pandas > 0.16
"""
time_df['weekday'] | python | {
"resource": ""
} |
q15999 | IndustrialLoadProfile.simple_profile | train | def simple_profile(self, annual_demand, **kwargs):
"""
Create industrial load profile
Parameters
----------
annual_demand : float
Total demand.
Other Parameters
----------------
am : datetime.time
beginning of workday
pm : datetime.time
end of workday
week : list
list of weekdays
weekend : list
list of weekend days
profile_factors : dictionary
dictionary with scaling factors for night and day of weekdays and
weekend days
"""
# Day(am to pm), night (pm to am), week day (week),
# weekend day (weekend)
am = kwargs.get('am', settime(7, 00, 0))
pm = kwargs.get('pm', settime(23, 30, 0))
week = kwargs.get('week', [1, 2, 3, 4, 5])
weekend = kwargs.get('weekend', [0, 6, 7])
default_factors = {'week': {'day': 0.8, 'night': 0.6},
'weekend': {'day': 0.9, 'night': 0.7}}
profile_factors = kwargs.get('profile_factors', default_factors)
self.dataframe['ind'] = 0
self.dataframe['ind'].mask(
self.dataframe['weekday'].between_time(am, pm).isin(week),
profile_factors['week']['day'], True)
self.dataframe['ind'].mask(
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.