code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def raster_erosion(rasterfile):
"""Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return "Your rasterfile has a wrong type. Type must be string or " \
"numpy.array or class Raster in pygeoc."
max_value_raster = origin_raster.max()
erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Erode the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
min_pixel_value = max_value_raster
# Find the min pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] <= min_pixel_value:
min_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the min pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the min pixel's value.
erosion_raster[i, j] = min_pixel_value
# Return the result.
return erosion_raster | Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray. | Below is the the instruction that describes the task:
### Input:
Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
### Response:
def raster_erosion(rasterfile):
"""Erode the raster image.
Find the min pixel's value in 8-neighborhood. Then change the compute
pixel's value into the min pixel's value.
Args:
rasterfile: input original raster image, type can be filename(string,
like "test1.tif"), rasterfile(class Raster) or numpy.ndarray.
Returns:
erosion_raster: raster image after erosion, type is numpy.ndarray.
"""
if is_string(rasterfile):
origin_raster = RasterUtilClass.read_raster(str(rasterfile))
elif isinstance(rasterfile, Raster):
origin_raster = rasterfile.data
elif isinstance(rasterfile, numpy.ndarray):
origin_raster = rasterfile
else:
return "Your rasterfile has a wrong type. Type must be string or " \
"numpy.array or class Raster in pygeoc."
max_value_raster = origin_raster.max()
erosion_raster = numpy.zeros((origin_raster.shape[0], origin_raster.shape[1]))
# In order to compute the raster edges, we need to expand the original
# raster's rows and cols. We need to add the edges whose pixels' value is
# the max pixel's value in raster.
add_row = numpy.full((1, origin_raster.shape[1]), max_value_raster)
temp_origin_raster = numpy.vstack((numpy.vstack((add_row, origin_raster)), add_row))
add_col = numpy.full((origin_raster.shape[0] + 2, 1), max_value_raster)
expand_origin_raster = numpy.hstack((numpy.hstack((add_col, temp_origin_raster)), add_col))
# Erode the raster.
for i in range(origin_raster.shape[0]):
for j in range(origin_raster.shape[1]):
min_pixel_value = max_value_raster
# Find the min pixel value in the 8-neighborhood.
for k in range(3):
for l in range(3):
if expand_origin_raster[i + k, j + l] <= min_pixel_value:
min_pixel_value = expand_origin_raster[i + k, j + l]
# After this loop, we get the min pixel's value of the
# 8-neighborhood. Then we change the compute pixel's value into
# the min pixel's value.
erosion_raster[i, j] = min_pixel_value
# Return the result.
return erosion_raster |
def render_q(q, qn, connection):
"""
Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple
"""
joinstr = u' {} '.format(q.connector)
conditions = []
params = []
if DJANGO_MAJOR == 1 and DJANGO_MINOR == 7:
# in Django 1.7 WhereNode.as_sql expects `qn` to have a `compile`
# method (i.e not really expecting a quote names function any more
# they are expecting a django.db.models.sql.compiler.SQLCompiler)
try:
qn = qn.__self__
except AttributeError:
pass
for child in q.children:
if isinstance(child, Q):
# recurse
condition, child_params = render_q(child, qn, connection)
conditions.append(u'({})'.format(condition))
params.extend(child_params)
else:
try:
# Django 1.7
child, joins_used = child
except TypeError:
# Django 1.6
pass
# we expect child to be a WhereNode (see transform_q)
condition, child_params = child.as_sql(qn, connection)
params.extend(child_params)
conditions.append(condition)
rendered = u'({})'.format(joinstr.join(conditions))
if q.negated:
rendered = u'NOT {}'.format(rendered)
return rendered, params | Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple | Below is the the instruction that describes the task:
### Input:
Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple
### Response:
def render_q(q, qn, connection):
"""
Renders the Q object into SQL for the WHEN clause.
Uses as much as possible the Django ORM machinery for SQL generation,
handling table aliases, field quoting, parameter escaping etc.
:param q: Q object representing the filter condition
:param qn: db specific 'quote names' function that was passed into
SQLAggregate.as_sql method by Django
:param connection: Django db connection object that was passed into
SQLAggregate.as_sql method by Django
:returns: (SQL template str, params list) tuple
"""
joinstr = u' {} '.format(q.connector)
conditions = []
params = []
if DJANGO_MAJOR == 1 and DJANGO_MINOR == 7:
# in Django 1.7 WhereNode.as_sql expects `qn` to have a `compile`
# method (i.e not really expecting a quote names function any more
# they are expecting a django.db.models.sql.compiler.SQLCompiler)
try:
qn = qn.__self__
except AttributeError:
pass
for child in q.children:
if isinstance(child, Q):
# recurse
condition, child_params = render_q(child, qn, connection)
conditions.append(u'({})'.format(condition))
params.extend(child_params)
else:
try:
# Django 1.7
child, joins_used = child
except TypeError:
# Django 1.6
pass
# we expect child to be a WhereNode (see transform_q)
condition, child_params = child.as_sql(qn, connection)
params.extend(child_params)
conditions.append(condition)
rendered = u'({})'.format(joinstr.join(conditions))
if q.negated:
rendered = u'NOT {}'.format(rendered)
return rendered, params |
def plot_metadata(
self,
rank="auto",
haxis="Label",
vaxis="simpson",
title=None,
xlabel=None,
ylabel=None,
return_chart=False,
plot_type="auto",
label=None,
):
"""Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'chao1', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergy to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
"""
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if plot_type not in ("auto", "boxplot", "scatter"):
raise OneCodexException("Plot type must be one of: auto, boxplot, scatter")
# alpha diversity is only allowed on vertical axis--horizontal can be magically mapped
df, magic_fields = self._metadata_fetch([haxis, "Label"], label=label)
if vaxis in ("simpson", "chao1", "shannon"):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
else:
# if it's not alpha diversity, vertical axis can also be magically mapped
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
# we require the vertical axis to be numerical otherwise plots get weird
if (
pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_categorical_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_object_dtype(vert_df[vert_magic_fields[vaxis]])
or not pd.api.types.is_numeric_dtype(vert_df[vert_magic_fields[vaxis]])
): # noqa
raise OneCodexException("Metadata field on vertical axis must be numerical")
df = pd.concat([df, vert_df], axis=1).dropna(subset=[vert_magic_fields[vaxis]])
magic_fields.update(vert_magic_fields)
# plots can look different depending on what the horizontal axis contains
if pd.api.types.is_datetime64_any_dtype(df[magic_fields[haxis]]):
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif "date" in magic_fields[haxis].split("_"):
df.loc[:, magic_fields[haxis]] = df.loc[:, magic_fields[haxis]].apply(
pd.to_datetime, utc=True
)
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif (
pd.api.types.is_bool_dtype(df[magic_fields[haxis]])
or pd.api.types.is_categorical_dtype(df[magic_fields[haxis]])
or pd.api.types.is_object_dtype(df[magic_fields[haxis]])
): # noqa
df = df.fillna({field: "N/A" for field in df.columns})
category_type = "N"
if plot_type == "auto":
# if data is categorical but there is only one value per sample, scatter plot instead
if len(df[magic_fields[haxis]].unique()) == len(df[magic_fields[haxis]]):
plot_type = "scatter"
else:
plot_type = "boxplot"
elif pd.api.types.is_numeric_dtype(df[magic_fields[haxis]]):
df = df.dropna(subset=[magic_fields[vaxis]])
category_type = "O"
if plot_type == "auto":
plot_type = "scatter"
else:
raise OneCodexException(
"Unplottable column type for horizontal axis ({})".format(haxis)
)
if xlabel is None:
xlabel = magic_fields[haxis]
if ylabel is None:
ylabel = magic_fields[vaxis]
if plot_type == "scatter":
df = df.reset_index()
alt_kwargs = dict(
x=alt.X(magic_fields[haxis], axis=alt.Axis(title=xlabel)),
y=alt.Y(magic_fields[vaxis], axis=alt.Axis(title=ylabel)),
tooltip=["Label", "{}:Q".format(vaxis)],
href="url:N",
url="https://app.onecodex.com/classification/" + alt.datum.classification_id,
)
chart = (
alt.Chart(df)
.transform_calculate(url=alt_kwargs.pop("url"))
.mark_circle()
.encode(**alt_kwargs)
)
if title:
chart = chart.properties(title=title)
elif plot_type == "boxplot":
chart = boxplot(
df,
magic_fields[haxis],
magic_fields[vaxis],
category_type=category_type,
title=title,
xlabel=xlabel,
ylabel=ylabel,
)
if return_chart:
return chart
else:
chart.interactive().display() | Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'chao1', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergy to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides') | Below is the the instruction that describes the task:
### Input:
Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'chao1', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergy to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
### Response:
def plot_metadata(
self,
rank="auto",
haxis="Label",
vaxis="simpson",
title=None,
xlabel=None,
ylabel=None,
return_chart=False,
plot_type="auto",
label=None,
):
"""Plot an arbitrary metadata field versus an arbitrary quantity as a boxplot or scatter plot.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
haxis : `string`, optional
The metadata field (or tuple containing multiple categorical fields) to be plotted on
the horizontal axis.
vaxis : `string`, optional
Data to be plotted on the vertical axis. Can be any one of the following:
- A metadata field: the name of a metadata field containing numerical data
- {'simpson', 'chao1', 'shannon'}: an alpha diversity statistic to calculate for each sample
- A taxon name: the name of a taxon in the analysis
- A taxon ID: the ID of a taxon in the analysis
title : `string`, optional
Text label at the top of the plot.
xlabel : `string`, optional
Text label along the horizontal axis.
ylabel : `string`, optional
Text label along the vertical axis.
plot_type : {'auto', 'boxplot', 'scatter'}
By default, will determine plot type automatically based on the data. Otherwise, specify
one of 'boxplot' or 'scatter' to set the type of plot manually.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
Examples
--------
Generate a boxplot of the abundance of Bacteroides (genus) of samples grouped by whether the
individuals are allergy to dogs, cats, both, or neither.
>>> plot_metadata(haxis=('allergy_dogs', 'allergy_cats'), vaxis='Bacteroides')
"""
if rank is None:
raise OneCodexException("Please specify a rank or 'auto' to choose automatically")
if plot_type not in ("auto", "boxplot", "scatter"):
raise OneCodexException("Plot type must be one of: auto, boxplot, scatter")
# alpha diversity is only allowed on vertical axis--horizontal can be magically mapped
df, magic_fields = self._metadata_fetch([haxis, "Label"], label=label)
if vaxis in ("simpson", "chao1", "shannon"):
df.loc[:, vaxis] = self.alpha_diversity(vaxis, rank=rank)
magic_fields[vaxis] = vaxis
else:
# if it's not alpha diversity, vertical axis can also be magically mapped
vert_df, vert_magic_fields = self._metadata_fetch([vaxis])
# we require the vertical axis to be numerical otherwise plots get weird
if (
pd.api.types.is_bool_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_categorical_dtype(vert_df[vert_magic_fields[vaxis]])
or pd.api.types.is_object_dtype(vert_df[vert_magic_fields[vaxis]])
or not pd.api.types.is_numeric_dtype(vert_df[vert_magic_fields[vaxis]])
): # noqa
raise OneCodexException("Metadata field on vertical axis must be numerical")
df = pd.concat([df, vert_df], axis=1).dropna(subset=[vert_magic_fields[vaxis]])
magic_fields.update(vert_magic_fields)
# plots can look different depending on what the horizontal axis contains
if pd.api.types.is_datetime64_any_dtype(df[magic_fields[haxis]]):
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif "date" in magic_fields[haxis].split("_"):
df.loc[:, magic_fields[haxis]] = df.loc[:, magic_fields[haxis]].apply(
pd.to_datetime, utc=True
)
category_type = "T"
if plot_type == "auto":
plot_type = "boxplot"
elif (
pd.api.types.is_bool_dtype(df[magic_fields[haxis]])
or pd.api.types.is_categorical_dtype(df[magic_fields[haxis]])
or pd.api.types.is_object_dtype(df[magic_fields[haxis]])
): # noqa
df = df.fillna({field: "N/A" for field in df.columns})
category_type = "N"
if plot_type == "auto":
# if data is categorical but there is only one value per sample, scatter plot instead
if len(df[magic_fields[haxis]].unique()) == len(df[magic_fields[haxis]]):
plot_type = "scatter"
else:
plot_type = "boxplot"
elif pd.api.types.is_numeric_dtype(df[magic_fields[haxis]]):
df = df.dropna(subset=[magic_fields[vaxis]])
category_type = "O"
if plot_type == "auto":
plot_type = "scatter"
else:
raise OneCodexException(
"Unplottable column type for horizontal axis ({})".format(haxis)
)
if xlabel is None:
xlabel = magic_fields[haxis]
if ylabel is None:
ylabel = magic_fields[vaxis]
if plot_type == "scatter":
df = df.reset_index()
alt_kwargs = dict(
x=alt.X(magic_fields[haxis], axis=alt.Axis(title=xlabel)),
y=alt.Y(magic_fields[vaxis], axis=alt.Axis(title=ylabel)),
tooltip=["Label", "{}:Q".format(vaxis)],
href="url:N",
url="https://app.onecodex.com/classification/" + alt.datum.classification_id,
)
chart = (
alt.Chart(df)
.transform_calculate(url=alt_kwargs.pop("url"))
.mark_circle()
.encode(**alt_kwargs)
)
if title:
chart = chart.properties(title=title)
elif plot_type == "boxplot":
chart = boxplot(
df,
magic_fields[haxis],
magic_fields[vaxis],
category_type=category_type,
title=title,
xlabel=xlabel,
ylabel=ylabel,
)
if return_chart:
return chart
else:
chart.interactive().display() |
def deliverTextResults(self):
"""
Deliver the results in a pretty text output.
@return: Pretty text output!
"""
output = "=======================\ntxctools Hotspot Report\n"\
"=======================\n\n"
fileResults = sorted(self.fileCounts.items(),
key=lambda x: x[1]["warning_count"], reverse=True)
output += "Warnings per File\n=================\n"
count = 0
for item in fileResults:
count += 1
output += "#%s - %s - %s\n" % (count, item[0],
item[1]["warning_count"])
output += "\nWarnings Breakdown\n==================\n"
count = 0
warningCount = 0
warningResults = sorted(self.warningCounts.items(),
key=lambda x: x[1]["count"], reverse=True)
for item in warningResults:
warningCount += item[1]["count"]
for warning, winfo in warningResults:
count += 1
output += "#%s - %s - %s (%s%%) - %s\n" % (count, warning,
winfo["count"], int(winfo["count"] / warningCount * 100),
tools.cleanupMessage(warning, winfo))
return output | Deliver the results in a pretty text output.
@return: Pretty text output! | Below is the the instruction that describes the task:
### Input:
Deliver the results in a pretty text output.
@return: Pretty text output!
### Response:
def deliverTextResults(self):
"""
Deliver the results in a pretty text output.
@return: Pretty text output!
"""
output = "=======================\ntxctools Hotspot Report\n"\
"=======================\n\n"
fileResults = sorted(self.fileCounts.items(),
key=lambda x: x[1]["warning_count"], reverse=True)
output += "Warnings per File\n=================\n"
count = 0
for item in fileResults:
count += 1
output += "#%s - %s - %s\n" % (count, item[0],
item[1]["warning_count"])
output += "\nWarnings Breakdown\n==================\n"
count = 0
warningCount = 0
warningResults = sorted(self.warningCounts.items(),
key=lambda x: x[1]["count"], reverse=True)
for item in warningResults:
warningCount += item[1]["count"]
for warning, winfo in warningResults:
count += 1
output += "#%s - %s - %s (%s%%) - %s\n" % (count, warning,
winfo["count"], int(winfo["count"] / warningCount * 100),
tools.cleanupMessage(warning, winfo))
return output |
def get_logical_drives(self):
"""Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects.
"""
logical_drives = []
for controller in self.controllers:
for array in controller.raid_arrays:
for logical_drive in array.logical_drives:
logical_drives.append(logical_drive)
return logical_drives | Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects. | Below is the the instruction that describes the task:
### Input:
Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects.
### Response:
def get_logical_drives(self):
"""Get all the RAID logical drives in the Server.
This method returns all the RAID logical drives on the server
by examining all the controllers.
:returns: a list of LogicalDrive objects.
"""
logical_drives = []
for controller in self.controllers:
for array in controller.raid_arrays:
for logical_drive in array.logical_drives:
logical_drives.append(logical_drive)
return logical_drives |
def delete(self, user):
"""Delete a sub-resource"""
if user:
can_delete = yield self.can_delete(user)
else:
can_delete = False
if not can_delete:
raise exceptions.Unauthorized('User may not delete the resource')
try:
parent = yield self.get_parent()
except couch.NotFound:
msg = '{}_id {} not found'.format(
self.parent_resource.resource_type,
self.parent_id)
raise exceptions.ValidationError(msg)
yield parent.delete_subresource(self) | Delete a sub-resource | Below is the the instruction that describes the task:
### Input:
Delete a sub-resource
### Response:
def delete(self, user):
"""Delete a sub-resource"""
if user:
can_delete = yield self.can_delete(user)
else:
can_delete = False
if not can_delete:
raise exceptions.Unauthorized('User may not delete the resource')
try:
parent = yield self.get_parent()
except couch.NotFound:
msg = '{}_id {} not found'.format(
self.parent_resource.resource_type,
self.parent_id)
raise exceptions.ValidationError(msg)
yield parent.delete_subresource(self) |
def _canon_decode_tag(self, value, mn_tags):
"""
Decode Canon MakerNote tag based on offset within tag.
See http://www.burren.cx/david/canon.html by David Burren
"""
for i in range(1, len(value)):
tag = mn_tags.get(i, ('Unknown', ))
name = tag[0]
if len(tag) > 1:
val = tag[1].get(value[i], 'Unknown')
else:
val = value[i]
try:
logger.debug(" %s %s %s", i, name, hex(value[i]))
except TypeError:
logger.debug(" %s %s %s", i, name, value[i])
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote ' + name] = IfdTag(str(val), None, 0, None,
None, None) | Decode Canon MakerNote tag based on offset within tag.
See http://www.burren.cx/david/canon.html by David Burren | Below is the the instruction that describes the task:
### Input:
Decode Canon MakerNote tag based on offset within tag.
See http://www.burren.cx/david/canon.html by David Burren
### Response:
def _canon_decode_tag(self, value, mn_tags):
"""
Decode Canon MakerNote tag based on offset within tag.
See http://www.burren.cx/david/canon.html by David Burren
"""
for i in range(1, len(value)):
tag = mn_tags.get(i, ('Unknown', ))
name = tag[0]
if len(tag) > 1:
val = tag[1].get(value[i], 'Unknown')
else:
val = value[i]
try:
logger.debug(" %s %s %s", i, name, hex(value[i]))
except TypeError:
logger.debug(" %s %s %s", i, name, value[i])
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote ' + name] = IfdTag(str(val), None, 0, None,
None, None) |
def make_param_dict_from_file(self,path_to_params):
"""
make param dict from a file on disk
"""
# then we were given a path to a parameter file
param_list = list(csv.reader(open(path_to_params,"rb")))
# delete empty elements (if any)
param_file = [x for x in param_list if x != []]
# make dict of [wavenames] = raw_params
name_list = []
param_list = []
# get header names for each param (names of param_list columns)
param_colnames = param_file[0][1:] # 0th element is "Name" or "Wavename"
# start from 1. (row 0 is the header)
for i in np.arange(1, len(param_file)):
name_list.append(param_file[i][0])
param_list.append(param_file[i][1:])
# remove ' ' blank spaces from param_list
param_list = [[x.strip() for x in y] for y in param_list]
param_dict = {}
# i loops through param_colnames, j loops thru param values per wave
for i in np.arange(0, len(param_colnames)):
param_dict[param_colnames[i]] = []
for j in np.arange(0,len(name_list)):
param_dict[param_colnames[i]].append(param_list[j][i])
# now we have param_dict, and name_list
self._param_dict = param_dict
self._row_names = name_list | make param dict from a file on disk | Below is the the instruction that describes the task:
### Input:
make param dict from a file on disk
### Response:
def make_param_dict_from_file(self,path_to_params):
"""
make param dict from a file on disk
"""
# then we were given a path to a parameter file
param_list = list(csv.reader(open(path_to_params,"rb")))
# delete empty elements (if any)
param_file = [x for x in param_list if x != []]
# make dict of [wavenames] = raw_params
name_list = []
param_list = []
# get header names for each param (names of param_list columns)
param_colnames = param_file[0][1:] # 0th element is "Name" or "Wavename"
# start from 1. (row 0 is the header)
for i in np.arange(1, len(param_file)):
name_list.append(param_file[i][0])
param_list.append(param_file[i][1:])
# remove ' ' blank spaces from param_list
param_list = [[x.strip() for x in y] for y in param_list]
param_dict = {}
# i loops through param_colnames, j loops thru param values per wave
for i in np.arange(0, len(param_colnames)):
param_dict[param_colnames[i]] = []
for j in np.arange(0,len(name_list)):
param_dict[param_colnames[i]].append(param_list[j][i])
# now we have param_dict, and name_list
self._param_dict = param_dict
self._row_names = name_list |
def serialize(self, user=None):
"""
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
"""
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
} | Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object | Below is the the instruction that describes the task:
### Input:
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
### Response:
def serialize(self, user=None):
"""
Serializes message for given user.
Note:
Should be called before first save(). Otherwise "is_update" will get wrong value.
Args:
user: User object
Returns:
Dict. JSON serialization ready dictionary object
"""
return {
'content': self.body,
'type': self.typ,
'updated_at': self.updated_at,
'timestamp': self.updated_at,
'is_update': not hasattr(self, 'unsaved'),
'attachments': [attachment.serialize() for attachment in self.attachment_set],
'title': self.msg_title,
'url': self.url,
'sender_name': self.sender.full_name,
'sender_key': self.sender.key,
'channel_key': self.channel.key,
'cmd': 'message',
'avatar_url': self.sender.avatar,
'key': self.key,
} |
def notify_contact(mail, owner, graas_url, repos, first_repo=False):
""" Send an email to the contact with the details to access
the Kibana dashboard """
footer = """
--
Bitergia Cauldron Team
http://bitergia.com
"""
twitter_txt = "Check Cauldron.io dashboard for %s at %s/dashboards/%s" % (owner, graas_url, owner)
twitter_url = "https://twitter.com/intent/tweet?text=" + quote_plus(twitter_txt)
twitter_url += "&via=bitergia"
if first_repo:
logging.info("Sending first email to %s" % (mail))
subject = "First repository for %s already in the Cauldron" % (owner)
else:
logging.info("Sending last email to %s" % (mail))
subject = "Your Cauldron %s dashboard is ready!" % (owner)
if first_repo:
# body = "%s/dashboards/%s\n\n" % (graas_url, owner)
# body += "First repository analized: %s\n" % (repos[0]['html_url'])
body = """
First repository has been analyzed and it's already in the Cauldron. Be patient, we have just started, step by step.
We will notify you when everything is ready.
Meanwhile, check latest dashboards in %s
Thanks,
%s
""" % (graas_url, footer)
else:
body = """
Check it at: %s/dashboards/%s
Play with it, and send us feedback:
https://github.com/Bitergia/cauldron.io/issues/new
Share it on Twitter:
%s
Thank you very much,
%s
""" % (graas_url, owner, twitter_url, footer)
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = 'info@bitergia.com'
msg['To'] = mail
try:
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
except ConnectionRefusedError:
logging.error("Can not notify user. Can not connect to email server.") | Send an email to the contact with the details to access
the Kibana dashboard | Below is the the instruction that describes the task:
### Input:
Send an email to the contact with the details to access
the Kibana dashboard
### Response:
def notify_contact(mail, owner, graas_url, repos, first_repo=False):
""" Send an email to the contact with the details to access
the Kibana dashboard """
footer = """
--
Bitergia Cauldron Team
http://bitergia.com
"""
twitter_txt = "Check Cauldron.io dashboard for %s at %s/dashboards/%s" % (owner, graas_url, owner)
twitter_url = "https://twitter.com/intent/tweet?text=" + quote_plus(twitter_txt)
twitter_url += "&via=bitergia"
if first_repo:
logging.info("Sending first email to %s" % (mail))
subject = "First repository for %s already in the Cauldron" % (owner)
else:
logging.info("Sending last email to %s" % (mail))
subject = "Your Cauldron %s dashboard is ready!" % (owner)
if first_repo:
# body = "%s/dashboards/%s\n\n" % (graas_url, owner)
# body += "First repository analized: %s\n" % (repos[0]['html_url'])
body = """
First repository has been analyzed and it's already in the Cauldron. Be patient, we have just started, step by step.
We will notify you when everything is ready.
Meanwhile, check latest dashboards in %s
Thanks,
%s
""" % (graas_url, footer)
else:
body = """
Check it at: %s/dashboards/%s
Play with it, and send us feedback:
https://github.com/Bitergia/cauldron.io/issues/new
Share it on Twitter:
%s
Thank you very much,
%s
""" % (graas_url, owner, twitter_url, footer)
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = 'info@bitergia.com'
msg['To'] = mail
try:
s = smtplib.SMTP('localhost')
s.send_message(msg)
s.quit()
except ConnectionRefusedError:
logging.error("Can not notify user. Can not connect to email server.") |
def real(self):
"""Real part"""
return self.__class__.create(self.term.real, *self.ranges) | Real part | Below is the the instruction that describes the task:
### Input:
Real part
### Response:
def real(self):
"""Real part"""
return self.__class__.create(self.term.real, *self.ranges) |
def handle(self, client, subhooks=()):
"""Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
"""
new_data = self.fetch(client)
# Holds the list of updated fields.
updated = {}
if not subhooks:
# We always want to compare to previous values.
subhooks = [self.name]
for subhook in subhooks:
new_key = self.extract_key(new_data, subhook)
if new_key != self.previous_keys.get(subhook):
updated[subhook] = new_key
if updated:
logger.debug("Hook %s: data changed from %r to %r", self.name, self.previous_keys, updated)
self.previous_keys.update(updated)
return (True, new_data)
return (False, None) | Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value. | Below is the the instruction that describes the task:
### Input:
Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
### Response:
def handle(self, client, subhooks=()):
"""Handle a new update.
Fetches new data from the client, then compares it to the previous
lookup.
Returns:
(bool, new_data): whether changes occurred, and the new value.
"""
new_data = self.fetch(client)
# Holds the list of updated fields.
updated = {}
if not subhooks:
# We always want to compare to previous values.
subhooks = [self.name]
for subhook in subhooks:
new_key = self.extract_key(new_data, subhook)
if new_key != self.previous_keys.get(subhook):
updated[subhook] = new_key
if updated:
logger.debug("Hook %s: data changed from %r to %r", self.name, self.previous_keys, updated)
self.previous_keys.update(updated)
return (True, new_data)
return (False, None) |
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32) | Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f | Below is the the instruction that describes the task:
### Input:
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
### Response:
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32) |
def draw_pdf(buffer, invoice):
""" Draws the invoice """
canvas = Canvas(buffer, pagesize=A4)
canvas.translate(0, 29.7 * cm)
canvas.setFont('Helvetica', 10)
canvas.saveState()
header_func(canvas)
canvas.restoreState()
canvas.saveState()
footer_func(canvas)
canvas.restoreState()
canvas.saveState()
address_func(canvas)
canvas.restoreState()
# Client address
textobject = canvas.beginText(1.5 * cm, -2.5 * cm)
try:
if invoice.address.invoice_contact_name:
textobject.textLine(invoice.address.invoice_contact_name)
textobject.textLine(invoice.address.invoice_address_one)
if invoice.address.invoice_address_two:
textobject.textLine(invoice.address.invoice_address_two)
textobject.textLine(invoice.address.invoice_town)
if invoice.address.invoice_county:
textobject.textLine(invoice.address.invoice_county)
textobject.textLine(invoice.address.invoice_postcode)
textobject.textLine(invoice.address.country.invoice_name)
except:
pass
canvas.drawText(textobject)
# Info
textobject = canvas.beginText(1.5 * cm, -6.75 * cm)
textobject.textLine(u'Invoice ID: %s' % invoice.invoice_id)
textobject.textLine(u'Invoice Date: %s' % invoice.invoice_date.strftime(
'%d %b %Y'))
canvas.drawText(textobject)
# Items
data = [[u'Quantity', u'Description', u'Amount', u'Total'], ]
for item in invoice.items.all():
data.append([
item.quantity,
item.description,
format_currency(item.unit_price, invoice.currency),
format_currency(item.total(), invoice.currency)
])
data.append([u'', u'', u'Total:', format_currency(invoice.total(),
invoice.currency)])
table = Table(data, colWidths=[2 * cm, 11 * cm, 3 * cm, 3 * cm])
table.setStyle([
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONTSIZE', (0, 0), (-1, -1), 10),
('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),
('GRID', (0, 0), (-1, -2), 1, (0.7, 0.7, 0.7)),
('GRID', (-2, -1), (-1, -1), 1, (0.7, 0.7, 0.7)),
('ALIGN', (-2, 0), (-1, -1), 'RIGHT'),
('BACKGROUND', (0, 0), (-1, 0), (0.8, 0.8, 0.8)),
])
tw, th, = table.wrapOn(canvas, 15 * cm, 19 * cm)
table.drawOn(canvas, 1 * cm, -8 * cm - th)
canvas.showPage()
canvas.save()
return canvas | Draws the invoice | Below is the the instruction that describes the task:
### Input:
Draws the invoice
### Response:
def draw_pdf(buffer, invoice):
""" Draws the invoice """
canvas = Canvas(buffer, pagesize=A4)
canvas.translate(0, 29.7 * cm)
canvas.setFont('Helvetica', 10)
canvas.saveState()
header_func(canvas)
canvas.restoreState()
canvas.saveState()
footer_func(canvas)
canvas.restoreState()
canvas.saveState()
address_func(canvas)
canvas.restoreState()
# Client address
textobject = canvas.beginText(1.5 * cm, -2.5 * cm)
try:
if invoice.address.invoice_contact_name:
textobject.textLine(invoice.address.invoice_contact_name)
textobject.textLine(invoice.address.invoice_address_one)
if invoice.address.invoice_address_two:
textobject.textLine(invoice.address.invoice_address_two)
textobject.textLine(invoice.address.invoice_town)
if invoice.address.invoice_county:
textobject.textLine(invoice.address.invoice_county)
textobject.textLine(invoice.address.invoice_postcode)
textobject.textLine(invoice.address.country.invoice_name)
except:
pass
canvas.drawText(textobject)
# Info
textobject = canvas.beginText(1.5 * cm, -6.75 * cm)
textobject.textLine(u'Invoice ID: %s' % invoice.invoice_id)
textobject.textLine(u'Invoice Date: %s' % invoice.invoice_date.strftime(
'%d %b %Y'))
canvas.drawText(textobject)
# Items
data = [[u'Quantity', u'Description', u'Amount', u'Total'], ]
for item in invoice.items.all():
data.append([
item.quantity,
item.description,
format_currency(item.unit_price, invoice.currency),
format_currency(item.total(), invoice.currency)
])
data.append([u'', u'', u'Total:', format_currency(invoice.total(),
invoice.currency)])
table = Table(data, colWidths=[2 * cm, 11 * cm, 3 * cm, 3 * cm])
table.setStyle([
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONTSIZE', (0, 0), (-1, -1), 10),
('TEXTCOLOR', (0, 0), (-1, -1), (0.2, 0.2, 0.2)),
('GRID', (0, 0), (-1, -2), 1, (0.7, 0.7, 0.7)),
('GRID', (-2, -1), (-1, -1), 1, (0.7, 0.7, 0.7)),
('ALIGN', (-2, 0), (-1, -1), 'RIGHT'),
('BACKGROUND', (0, 0), (-1, 0), (0.8, 0.8, 0.8)),
])
tw, th, = table.wrapOn(canvas, 15 * cm, 19 * cm)
table.drawOn(canvas, 1 * cm, -8 * cm - th)
canvas.showPage()
canvas.save()
return canvas |
def pop():
"""Remove instance from instance list"""
pid = os.getpid()
thread = threading.current_thread()
Wdb._instances.pop((pid, thread)) | Remove instance from instance list | Below is the the instruction that describes the task:
### Input:
Remove instance from instance list
### Response:
def pop():
"""Remove instance from instance list"""
pid = os.getpid()
thread = threading.current_thread()
Wdb._instances.pop((pid, thread)) |
def from_api_repr(cls, resource):
"""Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
"""
from google.cloud.bigquery.dataset import DatasetReference
project = resource["projectId"]
dataset_id = resource["datasetId"]
table_id = resource["tableId"]
return cls(DatasetReference(project, dataset_id), table_id) | Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``. | Below is the the instruction that describes the task:
### Input:
Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
### Response:
def from_api_repr(cls, resource):
"""Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
"""
from google.cloud.bigquery.dataset import DatasetReference
project = resource["projectId"]
dataset_id = resource["datasetId"]
table_id = resource["tableId"]
return cls(DatasetReference(project, dataset_id), table_id) |
def _send_command_wrapper(self, cmd):
"""
Send command to the remote device with a caching feature to avoid sending the same command
twice based on the SSH_MAPPER_BASE dict cmd key.
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
Returns
-------
response : str
The response from the remote device.
"""
cached_results = self._results_cache.get(cmd)
if not cached_results:
response = self._send_command(cmd)
self._results_cache[cmd] = response
return response
else:
return cached_results | Send command to the remote device with a caching feature to avoid sending the same command
twice based on the SSH_MAPPER_BASE dict cmd key.
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
Returns
-------
response : str
The response from the remote device. | Below is the the instruction that describes the task:
### Input:
Send command to the remote device with a caching feature to avoid sending the same command
twice based on the SSH_MAPPER_BASE dict cmd key.
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
Returns
-------
response : str
The response from the remote device.
### Response:
def _send_command_wrapper(self, cmd):
"""
Send command to the remote device with a caching feature to avoid sending the same command
twice based on the SSH_MAPPER_BASE dict cmd key.
Parameters
----------
cmd : str
The command to send to the remote device after checking cache.
Returns
-------
response : str
The response from the remote device.
"""
cached_results = self._results_cache.get(cmd)
if not cached_results:
response = self._send_command(cmd)
self._results_cache[cmd] = response
return response
else:
return cached_results |
def netconf_config_change_edit_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
edit = ET.SubElement(netconf_config_change, "edit")
operation = ET.SubElement(edit, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def netconf_config_change_edit_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_config_change = ET.SubElement(config, "netconf-config-change", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-notifications")
edit = ET.SubElement(netconf_config_change, "edit")
operation = ET.SubElement(edit, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e)) | Create a new link pointing to link_destination in remote_path position. | Below is the the instruction that describes the task:
### Input:
Create a new link pointing to link_destination in remote_path position.
### Response:
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e)) |
def center(self):
'''Obtain the center from the average of all points'''
points = np.array(self._points)
return np.mean(points[:, COLS.XYZ], axis=0) | Obtain the center from the average of all points | Below is the the instruction that describes the task:
### Input:
Obtain the center from the average of all points
### Response:
def center(self):
'''Obtain the center from the average of all points'''
points = np.array(self._points)
return np.mean(points[:, COLS.XYZ], axis=0) |
def send_credit_note_email(self, credit_note_it, email_dict):
"""
Sends an credit note by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}}
:param credit_note_it: the credit note id
:param email_dict: the email dict
:return dict
"""
return self._create_post_request(
resource=CREDIT_NOTES,
billomat_id=credit_note_it,
send_data=email_dict,
command=EMAIL,
) | Sends an credit note by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}}
:param credit_note_it: the credit note id
:param email_dict: the email dict
:return dict | Below is the the instruction that describes the task:
### Input:
Sends an credit note by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}}
:param credit_note_it: the credit note id
:param email_dict: the email dict
:return dict
### Response:
def send_credit_note_email(self, credit_note_it, email_dict):
"""
Sends an credit note by email
If you want to send your email to more than one persons do:
'recipients': {'to': ['bykof@me.com', 'mbykovski@seibert-media.net']}}
:param credit_note_it: the credit note id
:param email_dict: the email dict
:return dict
"""
return self._create_post_request(
resource=CREDIT_NOTES,
billomat_id=credit_note_it,
send_data=email_dict,
command=EMAIL,
) |
def legal_status(self):
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
'''
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, Method='COMBINED')
return self.__legal_status | r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'} | Below is the the instruction that describes the task:
### Input:
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
### Response:
def legal_status(self):
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
'''
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, Method='COMBINED')
return self.__legal_status |
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
"""
if isinstance(string, basestring):
print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)]))
if isinstance(string, Text):
print("\n\n".join([table(sentence, fill=column) for sentence in string]))
if isinstance(string, Sentence):
print(table(string, fill=column)) | Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object. | Below is the the instruction that describes the task:
### Input:
Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
### Response:
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
"""
if isinstance(string, basestring):
print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)]))
if isinstance(string, Text):
print("\n\n".join([table(sentence, fill=column) for sentence in string]))
if isinstance(string, Sentence):
print(table(string, fill=column)) |
def get_comments_are_moderated(instance):
"""
Check if comments are moderated for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no moderation
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(mod, None, instance, None) | Check if comments are moderated for the instance | Below is the the instruction that describes the task:
### Input:
Check if comments are moderated for the instance
### Response:
def get_comments_are_moderated(instance):
"""
Check if comments are moderated for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no moderation
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(mod, None, instance, None) |
def replication_group_exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a replication group exists.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.replication_group_exists myelasticache
'''
return bool(describe_replication_groups(name=name, region=region, key=key, keyid=keyid,
profile=profile)) | Check to see if a replication group exists.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.replication_group_exists myelasticache | Below is the the instruction that describes the task:
### Input:
Check to see if a replication group exists.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.replication_group_exists myelasticache
### Response:
def replication_group_exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a replication group exists.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.replication_group_exists myelasticache
'''
return bool(describe_replication_groups(name=name, region=region, key=key, keyid=keyid,
profile=profile)) |
def convolve_comb_lines(lines_wave, lines_flux, sigma,
crpix1, crval1, cdelt1, naxis1):
"""Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel.
"""
# generate wavelengths for output spectrum
xwave = crval1 + (np.arange(naxis1) + 1 - crpix1) * cdelt1
# initialize output spectrum
spectrum = np.zeros(naxis1)
# convolve each line
for wave, flux in zip(lines_wave, lines_flux):
sp_tmp = gauss_box_model(x=xwave, amplitude=flux, mean=wave,
stddev=sigma)
spectrum += sp_tmp
return xwave, spectrum | Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel. | Below is the the instruction that describes the task:
### Input:
Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel.
### Response:
def convolve_comb_lines(lines_wave, lines_flux, sigma,
crpix1, crval1, cdelt1, naxis1):
"""Convolve a set of lines of known wavelengths and flux.
Parameters
----------
lines_wave : array like
Input array with wavelengths
lines_flux : array like
Input array with fluxes
sigma : float
Sigma of the broadening gaussian to be applied.
crpix1 : float
CRPIX1 of the desired wavelength calibration.
crval1 : float
CRVAL1 of the desired wavelength calibration.
cdelt1 : float
CDELT1 of the desired wavelength calibration.
naxis1 : integer
NAXIS1 of the output spectrum.
Returns
-------
xwave : array like
Array with wavelengths for the output spectrum.
spectrum : array like
Array with the expected fluxes at each pixel.
"""
# generate wavelengths for output spectrum
xwave = crval1 + (np.arange(naxis1) + 1 - crpix1) * cdelt1
# initialize output spectrum
spectrum = np.zeros(naxis1)
# convolve each line
for wave, flux in zip(lines_wave, lines_flux):
sp_tmp = gauss_box_model(x=xwave, amplitude=flux, mean=wave,
stddev=sigma)
spectrum += sp_tmp
return xwave, spectrum |
def count_by(records: Sequence[Dict], field_name: str) -> defaultdict:
"""
Frequency each value occurs in a record sequence for a given field name.
"""
counter = defaultdict(int)
for record in records:
name = record[field_name]
counter[name] += 1
return counter | Frequency each value occurs in a record sequence for a given field name. | Below is the the instruction that describes the task:
### Input:
Frequency each value occurs in a record sequence for a given field name.
### Response:
def count_by(records: Sequence[Dict], field_name: str) -> defaultdict:
"""
Frequency each value occurs in a record sequence for a given field name.
"""
counter = defaultdict(int)
for record in records:
name = record[field_name]
counter[name] += 1
return counter |
def get_cipher(key=None, keyfile=None):
"""
Get cipher object, and then you can invoke:
des = get_cipher()
d = des.encrpy('Hello')
print des.descrpy(d)
"""
des_func = import_attr(settings.SECRETKEY.CIPHER_CLS)
kwargs = settings.SECRETKEY.CIPHER_ARGS
if not key:
key = functions.get_cipher_key(keyfile)
cipher = des_func(key, **kwargs)
return cipher | Get cipher object, and then you can invoke:
des = get_cipher()
d = des.encrpy('Hello')
print des.descrpy(d) | Below is the the instruction that describes the task:
### Input:
Get cipher object, and then you can invoke:
des = get_cipher()
d = des.encrpy('Hello')
print des.descrpy(d)
### Response:
def get_cipher(key=None, keyfile=None):
"""
Get cipher object, and then you can invoke:
des = get_cipher()
d = des.encrpy('Hello')
print des.descrpy(d)
"""
des_func = import_attr(settings.SECRETKEY.CIPHER_CLS)
kwargs = settings.SECRETKEY.CIPHER_ARGS
if not key:
key = functions.get_cipher_key(keyfile)
cipher = des_func(key, **kwargs)
return cipher |
def create_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, AutomaticFailoverEnabled=None, NumCacheClusters=None, PreferredCacheClusterAZs=None, NumNodeGroups=None, ReplicasPerNodeGroup=None, NodeGroupConfiguration=None, CacheNodeType=None, Engine=None, EngineVersion=None, CacheParameterGroupName=None, CacheSubnetGroupName=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, Tags=None, SnapshotArns=None, SnapshotName=None, PreferredMaintenanceWindow=None, Port=None, NotificationTopicArn=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, AuthToken=None):
"""
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
A Redis (cluster mode disabled) replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide .
See also: AWS API Documentation
:example: response = client.create_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
AutomaticFailoverEnabled=True|False,
NumCacheClusters=123,
PreferredCacheClusterAZs=[
'string',
],
NumNodeGroups=123,
ReplicasPerNodeGroup=123,
NodeGroupConfiguration=[
{
'Slots': 'string',
'ReplicaCount': 123,
'PrimaryAvailabilityZone': 'string',
'ReplicaAvailabilityZones': [
'string',
]
},
],
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The replication group identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: [REQUIRED]
A user-created description for the replication group.
:type PrimaryClusterId: string
:param PrimaryClusterId: The identifier of the cache cluster that serves as the primary for this replication group. This cache cluster must already exist and have a status of available .
This parameter is not required if NumCacheClusters , NumNodeGroups , or ReplicasPerNodeGroup is specified.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.
If true , Multi-AZ is enabled for this replication group. If false , Multi-AZ is disabled for this replication group.
AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.
Default: false
Note
ElastiCache Multi-AZ replication groups is not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled): T1 and T2 node types. Redis (cluster mode enabled): T2 node types.
:type NumCacheClusters: integer
:param NumCacheClusters: The number of clusters this replication group initially has.
This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.
If AutomaticFailoverEnabled is true , the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.
The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
:type PreferredCacheClusterAZs: list
:param PreferredCacheClusterAZs: A list of EC2 Availability Zones in which the replication group's cache clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.
This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead.
Note
If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in Availability Zones associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheClusters .
Default: system chosen Availability Zones.
(string) --
:type NumNodeGroups: integer
:param NumNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.
Default: 1
:type ReplicasPerNodeGroup: integer
:param ReplicasPerNodeGroup: An optional parameter that specifies the number of replica nodes in each node group (shard). Valid values are 0 to 5.
:type NodeGroupConfiguration: list
:param NodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter.
(dict) --node group (shard) configuration options. Each node group (shard) configuration has the following: Slots , PrimaryAvailabilityZone , ReplicaAvailabilityZones , ReplicaCount .
Slots (string) --A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey .
Example: '0-3999'
ReplicaCount (integer) --The number of read replica nodes in this node group (shard).
PrimaryAvailabilityZone (string) --The Availability Zone where the primary node of this node group (shard) is launched.
ReplicaAvailabilityZones (list) --A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of ReplicaCount or ReplicasPerNodeGroup if not specified.
(string) --
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for the cache clusters in this replication group.
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ) in the ElastiCache User Guide , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2 .
To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on .
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the cache subnet group to be used for the replication group.
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to associate with this replication group.
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more Amazon VPC security groups associated with this replication group.
Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each member of the replication group accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note
This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
"""
pass | Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
A Redis (cluster mode disabled) replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide .
See also: AWS API Documentation
:example: response = client.create_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
AutomaticFailoverEnabled=True|False,
NumCacheClusters=123,
PreferredCacheClusterAZs=[
'string',
],
NumNodeGroups=123,
ReplicasPerNodeGroup=123,
NodeGroupConfiguration=[
{
'Slots': 'string',
'ReplicaCount': 123,
'PrimaryAvailabilityZone': 'string',
'ReplicaAvailabilityZones': [
'string',
]
},
],
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The replication group identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: [REQUIRED]
A user-created description for the replication group.
:type PrimaryClusterId: string
:param PrimaryClusterId: The identifier of the cache cluster that serves as the primary for this replication group. This cache cluster must already exist and have a status of available .
This parameter is not required if NumCacheClusters , NumNodeGroups , or ReplicasPerNodeGroup is specified.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.
If true , Multi-AZ is enabled for this replication group. If false , Multi-AZ is disabled for this replication group.
AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.
Default: false
Note
ElastiCache Multi-AZ replication groups is not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled): T1 and T2 node types. Redis (cluster mode enabled): T2 node types.
:type NumCacheClusters: integer
:param NumCacheClusters: The number of clusters this replication group initially has.
This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.
If AutomaticFailoverEnabled is true , the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.
The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
:type PreferredCacheClusterAZs: list
:param PreferredCacheClusterAZs: A list of EC2 Availability Zones in which the replication group's cache clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.
This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead.
Note
If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in Availability Zones associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheClusters .
Default: system chosen Availability Zones.
(string) --
:type NumNodeGroups: integer
:param NumNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.
Default: 1
:type ReplicasPerNodeGroup: integer
:param ReplicasPerNodeGroup: An optional parameter that specifies the number of replica nodes in each node group (shard). Valid values are 0 to 5.
:type NodeGroupConfiguration: list
:param NodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter.
(dict) --node group (shard) configuration options. Each node group (shard) configuration has the following: Slots , PrimaryAvailabilityZone , ReplicaAvailabilityZones , ReplicaCount .
Slots (string) --A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey .
Example: '0-3999'
ReplicaCount (integer) --The number of read replica nodes in this node group (shard).
PrimaryAvailabilityZone (string) --The Availability Zone where the primary node of this node group (shard) is launched.
ReplicaAvailabilityZones (list) --A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of ReplicaCount or ReplicasPerNodeGroup if not specified.
(string) --
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for the cache clusters in this replication group.
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ) in the ElastiCache User Guide , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2 .
To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on .
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the cache subnet group to be used for the replication group.
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to associate with this replication group.
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more Amazon VPC security groups associated with this replication group.
Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each member of the replication group accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note
This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. | Below is the the instruction that describes the task:
### Input:
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
A Redis (cluster mode disabled) replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide .
See also: AWS API Documentation
:example: response = client.create_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
AutomaticFailoverEnabled=True|False,
NumCacheClusters=123,
PreferredCacheClusterAZs=[
'string',
],
NumNodeGroups=123,
ReplicasPerNodeGroup=123,
NodeGroupConfiguration=[
{
'Slots': 'string',
'ReplicaCount': 123,
'PrimaryAvailabilityZone': 'string',
'ReplicaAvailabilityZones': [
'string',
]
},
],
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The replication group identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: [REQUIRED]
A user-created description for the replication group.
:type PrimaryClusterId: string
:param PrimaryClusterId: The identifier of the cache cluster that serves as the primary for this replication group. This cache cluster must already exist and have a status of available .
This parameter is not required if NumCacheClusters , NumNodeGroups , or ReplicasPerNodeGroup is specified.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.
If true , Multi-AZ is enabled for this replication group. If false , Multi-AZ is disabled for this replication group.
AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.
Default: false
Note
ElastiCache Multi-AZ replication groups is not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled): T1 and T2 node types. Redis (cluster mode enabled): T2 node types.
:type NumCacheClusters: integer
:param NumCacheClusters: The number of clusters this replication group initially has.
This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.
If AutomaticFailoverEnabled is true , the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.
The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
:type PreferredCacheClusterAZs: list
:param PreferredCacheClusterAZs: A list of EC2 Availability Zones in which the replication group's cache clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.
This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead.
Note
If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in Availability Zones associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheClusters .
Default: system chosen Availability Zones.
(string) --
:type NumNodeGroups: integer
:param NumNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.
Default: 1
:type ReplicasPerNodeGroup: integer
:param ReplicasPerNodeGroup: An optional parameter that specifies the number of replica nodes in each node group (shard). Valid values are 0 to 5.
:type NodeGroupConfiguration: list
:param NodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter.
(dict) --node group (shard) configuration options. Each node group (shard) configuration has the following: Slots , PrimaryAvailabilityZone , ReplicaAvailabilityZones , ReplicaCount .
Slots (string) --A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey .
Example: '0-3999'
ReplicaCount (integer) --The number of read replica nodes in this node group (shard).
PrimaryAvailabilityZone (string) --The Availability Zone where the primary node of this node group (shard) is launched.
ReplicaAvailabilityZones (list) --A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of ReplicaCount or ReplicasPerNodeGroup if not specified.
(string) --
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for the cache clusters in this replication group.
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ) in the ElastiCache User Guide , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2 .
To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on .
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the cache subnet group to be used for the replication group.
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to associate with this replication group.
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more Amazon VPC security groups associated with this replication group.
Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each member of the replication group accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note
This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
### Response:
def create_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, AutomaticFailoverEnabled=None, NumCacheClusters=None, PreferredCacheClusterAZs=None, NumNodeGroups=None, ReplicasPerNodeGroup=None, NodeGroupConfiguration=None, CacheNodeType=None, Engine=None, EngineVersion=None, CacheParameterGroupName=None, CacheSubnetGroupName=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, Tags=None, SnapshotArns=None, SnapshotName=None, PreferredMaintenanceWindow=None, Port=None, NotificationTopicArn=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, AuthToken=None):
"""
Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group.
A Redis (cluster mode disabled) replication group is a collection of cache clusters, where one of the cache clusters is a read/write primary and the others are read-only replicas. Writes to the primary are asynchronously propagated to the replicas.
A Redis (cluster mode enabled) replication group is a collection of 1 to 15 node groups (shards). Each node group (shard) has one read/write primary node and up to 5 read-only replica nodes. Writes to the primary are asynchronously propagated to the replicas. Redis (cluster mode enabled) replication groups partition the data across node groups (shards).
When a Redis (cluster mode disabled) replication group has been successfully created, you can add one or more read replicas to it, up to a total of 5 read replicas. You cannot alter a Redis (cluster mode enabled) replication group after it has been created. However, if you need to increase or decrease the number of node groups (console: shards), you can avail yourself of ElastiCache for Redis' enhanced backup and restore. For more information, see Restoring From a Backup with Cluster Resizing in the ElastiCache User Guide .
See also: AWS API Documentation
:example: response = client.create_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
AutomaticFailoverEnabled=True|False,
NumCacheClusters=123,
PreferredCacheClusterAZs=[
'string',
],
NumNodeGroups=123,
ReplicasPerNodeGroup=123,
NodeGroupConfiguration=[
{
'Slots': 'string',
'ReplicaCount': 123,
'PrimaryAvailabilityZone': 'string',
'ReplicaAvailabilityZones': [
'string',
]
},
],
CacheNodeType='string',
Engine='string',
EngineVersion='string',
CacheParameterGroupName='string',
CacheSubnetGroupName='string',
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
SnapshotArns=[
'string',
],
SnapshotName='string',
PreferredMaintenanceWindow='string',
Port=123,
NotificationTopicArn='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
AuthToken='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The replication group identifier. This parameter is stored as a lowercase string.
Constraints:
A name must contain from 1 to 20 alphanumeric characters or hyphens.
The first character must be a letter.
A name cannot end with a hyphen or contain two consecutive hyphens.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: [REQUIRED]
A user-created description for the replication group.
:type PrimaryClusterId: string
:param PrimaryClusterId: The identifier of the cache cluster that serves as the primary for this replication group. This cache cluster must already exist and have a status of available .
This parameter is not required if NumCacheClusters , NumNodeGroups , or ReplicasPerNodeGroup is specified.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.
If true , Multi-AZ is enabled for this replication group. If false , Multi-AZ is disabled for this replication group.
AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) replication groups.
Default: false
Note
ElastiCache Multi-AZ replication groups is not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled): T1 and T2 node types. Redis (cluster mode enabled): T2 node types.
:type NumCacheClusters: integer
:param NumCacheClusters: The number of clusters this replication group initially has.
This parameter is not used if there is more than one node group (shard). You should use ReplicasPerNodeGroup instead.
If AutomaticFailoverEnabled is true , the value of this parameter must be at least 2. If AutomaticFailoverEnabled is false you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.
The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
:type PreferredCacheClusterAZs: list
:param PreferredCacheClusterAZs: A list of EC2 Availability Zones in which the replication group's cache clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.
This parameter is not used if there is more than one node group (shard). You should use NodeGroupConfiguration instead.
Note
If you are creating your replication group in an Amazon VPC (recommended), you can only locate cache clusters in Availability Zones associated with the subnets in the selected subnet group.
The number of Availability Zones listed must equal the value of NumCacheClusters .
Default: system chosen Availability Zones.
(string) --
:type NumNodeGroups: integer
:param NumNodeGroups: An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.
Default: 1
:type ReplicasPerNodeGroup: integer
:param ReplicasPerNodeGroup: An optional parameter that specifies the number of replica nodes in each node group (shard). Valid values are 0 to 5.
:type NodeGroupConfiguration: list
:param NodeGroupConfiguration: A list of node group (shard) configuration options. Each node group (shard) configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones, ReplicaCount.
If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter to individually configure each node group (shard), or you can omit this parameter.
(dict) --node group (shard) configuration options. Each node group (shard) configuration has the following: Slots , PrimaryAvailabilityZone , ReplicaAvailabilityZones , ReplicaCount .
Slots (string) --A string that specifies the keyspace for a particular node group. Keyspaces range from 0 to 16,383. The string is in the format startkey-endkey .
Example: '0-3999'
ReplicaCount (integer) --The number of read replica nodes in this node group (shard).
PrimaryAvailabilityZone (string) --The Availability Zone where the primary node of this node group (shard) is launched.
ReplicaAvailabilityZones (list) --A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of ReplicaCount or ReplicasPerNodeGroup if not specified.
(string) --
:type CacheNodeType: string
:param CacheNodeType: The compute and memory capacity of the nodes in the node group (shard).
Valid node types are as follows:
General purpose:
Current generation: cache.t2.micro , cache.t2.small , cache.t2.medium , cache.m3.medium , cache.m3.large , cache.m3.xlarge , cache.m3.2xlarge , cache.m4.large , cache.m4.xlarge , cache.m4.2xlarge , cache.m4.4xlarge , cache.m4.10xlarge
Previous generation: cache.t1.micro , cache.m1.small , cache.m1.medium , cache.m1.large , cache.m1.xlarge
Compute optimized: cache.c1.xlarge
Memory optimized:
Current generation: cache.r3.large , cache.r3.xlarge , cache.r3.2xlarge , cache.r3.4xlarge , cache.r3.8xlarge
Previous generation: cache.m2.xlarge , cache.m2.2xlarge , cache.m2.4xlarge
Notes:
All T2 instances are created in an Amazon Virtual Private Cloud (Amazon VPC).
Redis backup/restore is not supported for Redis (cluster mode disabled) T1 and T2 instances. Backup/restore is supported on Redis (cluster mode enabled) T2 instances.
Redis Append-only files (AOF) functionality is not supported for T1 or T2 instances.
For a complete listing of node types and specifications, see Amazon ElastiCache Product Features and Details and either Cache Node Type-Specific Parameters for Memcached or Cache Node Type-Specific Parameters for Redis .
:type Engine: string
:param Engine: The name of the cache engine to be used for the cache clusters in this replication group.
:type EngineVersion: string
:param EngineVersion: The version number of the cache engine to be used for the cache clusters in this replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ) in the ElastiCache User Guide , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cache cluster or replication group and create it anew with the earlier engine version.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
If you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.
To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2 .
To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on .
:type CacheSubnetGroupName: string
:param CacheSubnetGroupName: The name of the cache subnet group to be used for the replication group.
Warning
If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups .
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to associate with this replication group.
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: One or more Amazon VPC security groups associated with this replication group.
Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type Tags: list
:param Tags: A list of cost allocation tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value.
(dict) --A cost allocation Tag that can be added to an ElastiCache cluster or replication group. Tags are composed of a Key/Value pair. A tag with a null Value is permitted.
Key (string) --The key for the tag. May not be null.
Value (string) --The tag's value. May be null.
:type SnapshotArns: list
:param SnapshotArns: A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration regardless of the number of ARNs specified here.
Note
This parameter is only valid if the Engine parameter is redis .
Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
(string) --
:type SnapshotName: string
:param SnapshotName: The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to restoring while the new replication group is being created.
Note
This parameter is only valid if the Engine parameter is redis .
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cache cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for ddd are:
Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type Port: integer
:param Port: The port number on which each member of the replication group accepts connections.
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.
Note
The Amazon SNS topic owner must be the same as the cache cluster owner.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Note
This parameter is only valid if the Engine parameter is redis .
Default: 0 (i.e., automatic backups are disabled for this cache cluster).
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
Note
This parameter is only valid if the Engine parameter is redis .
:type AuthToken: string
:param AuthToken:
Reserved parameter. The password used to access a password protected server.
Password constraints:
Must be only printable ASCII characters.
Must be at least 16 characters and no more than 128 characters in length.
Cannot contain any of the following characters: '/', ''', or '@'.
For more information, see AUTH password at Redis.
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
"""
pass |
def nexson_tree_preorder_iter(tree_proxy, node_id=None, node=None, edge_id=None, edge=None):
"""Takes a tree in "By ID" NexSON (v1.2). provides and iterator over:
NexsonNodeProxy object
where the edge of the object is the edge connectin the node to the parent.
The first node will be the root and will have None as it's edge
"""
tree = tree_proxy._nexson_tree
ebsid = tree['edgeBySourceId']
nbid = tree['nodeById']
if edge_id is not None:
assert edge is not None
if node_id is None:
node_id = edge['@target']
else:
assert node_id == edge['@target']
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id, edge, node_id=node_id, node=node)
root_id = node_id
elif node_id is not None:
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=node_id, node=node)
root_id = node_id
else:
root_id = tree['^ot:rootNodeId']
root = nbid[root_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=root_id, node=root)
stack = []
new_stack = [(i['@target'], edge_id, i) for edge_id, i in ebsid[root_id].items()]
stack.extend(new_stack)
while stack:
target_node_id, edge_id, edge = stack.pop()
node = nbid[target_node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id=edge_id, edge=edge, node_id=target_node_id)
daughter_edges = ebsid.get(target_node_id)
if daughter_edges is not None:
new_stack = [(i['@target'], edge_id, i) for edge_id, i in daughter_edges.items()]
stack.extend(new_stack) | Takes a tree in "By ID" NexSON (v1.2). provides and iterator over:
NexsonNodeProxy object
where the edge of the object is the edge connectin the node to the parent.
The first node will be the root and will have None as it's edge | Below is the the instruction that describes the task:
### Input:
Takes a tree in "By ID" NexSON (v1.2). provides and iterator over:
NexsonNodeProxy object
where the edge of the object is the edge connectin the node to the parent.
The first node will be the root and will have None as it's edge
### Response:
def nexson_tree_preorder_iter(tree_proxy, node_id=None, node=None, edge_id=None, edge=None):
"""Takes a tree in "By ID" NexSON (v1.2). provides and iterator over:
NexsonNodeProxy object
where the edge of the object is the edge connectin the node to the parent.
The first node will be the root and will have None as it's edge
"""
tree = tree_proxy._nexson_tree
ebsid = tree['edgeBySourceId']
nbid = tree['nodeById']
if edge_id is not None:
assert edge is not None
if node_id is None:
node_id = edge['@target']
else:
assert node_id == edge['@target']
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id, edge, node_id=node_id, node=node)
root_id = node_id
elif node_id is not None:
if node is None:
node = nbid[node_id]
else:
assert node == nbid[node_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=node_id, node=node)
root_id = node_id
else:
root_id = tree['^ot:rootNodeId']
root = nbid[root_id]
yield tree_proxy._create_node_proxy_from_edge(None, None, node_id=root_id, node=root)
stack = []
new_stack = [(i['@target'], edge_id, i) for edge_id, i in ebsid[root_id].items()]
stack.extend(new_stack)
while stack:
target_node_id, edge_id, edge = stack.pop()
node = nbid[target_node_id]
yield tree_proxy._create_node_proxy_from_edge(edge_id=edge_id, edge=edge, node_id=target_node_id)
daughter_edges = ebsid.get(target_node_id)
if daughter_edges is not None:
new_stack = [(i['@target'], edge_id, i) for edge_id, i in daughter_edges.items()]
stack.extend(new_stack) |
def set_parent(self, new_parent, init=False):
"Store the gui/wx object parent for this component"
# set init=True if this is called from the constructor
self._parent = get(new_parent, init) | Store the gui/wx object parent for this component | Below is the the instruction that describes the task:
### Input:
Store the gui/wx object parent for this component
### Response:
def set_parent(self, new_parent, init=False):
"Store the gui/wx object parent for this component"
# set init=True if this is called from the constructor
self._parent = get(new_parent, init) |
def accept(self): # type: () -> str
"""The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable.
"""
accept = self.headers.get('Accept')
if not accept or accept == _content_types.ANY:
return self._default_accept
else:
return accept | The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable. | Below is the the instruction that describes the task:
### Input:
The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable.
### Response:
def accept(self): # type: () -> str
"""The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable.
"""
accept = self.headers.get('Accept')
if not accept or accept == _content_types.ANY:
return self._default_accept
else:
return accept |
def define_unit(
symbol, value, tex_repr=None, offset=None, prefixable=False, registry=None
):
"""
Define a new unit and add it to the specified unit registry.
Parameters
----------
symbol : string
The symbol for the new unit.
value : tuple or :class:`unyt.array.unyt_quantity`
The definition of the new unit in terms of some other units. For
example, one would define a new "mph" unit with ``(1.0, "mile/hr")``
or with ``1.0*unyt.mile/unyt.hr``
tex_repr : string, optional
The LaTeX representation of the new unit. If one is not supplied, it
will be generated automatically based on the symbol string.
offset : float, optional
The default offset for the unit. If not set, an offset of 0 is assumed.
prefixable : boolean, optional
Whether or not the new unit can use SI prefixes. Default: False
registry : :class:`unyt.unit_registry.UnitRegistry` or None
The unit registry to add the unit to. If None, then defaults to the
global default unit registry. If registry is set to None then the
unit object will be added as an attribute to the top-level :mod:`unyt`
namespace to ease working with the newly defined unit. See the example
below.
Examples
--------
>>> from unyt import day
>>> two_weeks = 14.0*day
>>> one_day = 1.0*day
>>> define_unit("two_weeks", two_weeks)
>>> from unyt import two_weeks
>>> print((3*two_weeks)/one_day)
42.0 dimensionless
"""
from unyt.array import unyt_quantity, _iterable
import unyt
if registry is None:
registry = default_unit_registry
if symbol in registry:
raise RuntimeError(
"Unit symbol '%s' already exists in the provided " "registry" % symbol
)
if not isinstance(value, unyt_quantity):
if _iterable(value) and len(value) == 2:
value = unyt_quantity(value[0], value[1], registry=registry)
else:
raise RuntimeError(
'"value" needs to be a quantity or ' "(value, unit) tuple!"
)
base_value = float(value.in_base(unit_system="mks"))
dimensions = value.units.dimensions
registry.add(
symbol,
base_value,
dimensions,
prefixable=prefixable,
tex_repr=tex_repr,
offset=offset,
)
if registry is default_unit_registry:
u = Unit(symbol, registry=registry)
setattr(unyt, symbol, u) | Define a new unit and add it to the specified unit registry.
Parameters
----------
symbol : string
The symbol for the new unit.
value : tuple or :class:`unyt.array.unyt_quantity`
The definition of the new unit in terms of some other units. For
example, one would define a new "mph" unit with ``(1.0, "mile/hr")``
or with ``1.0*unyt.mile/unyt.hr``
tex_repr : string, optional
The LaTeX representation of the new unit. If one is not supplied, it
will be generated automatically based on the symbol string.
offset : float, optional
The default offset for the unit. If not set, an offset of 0 is assumed.
prefixable : boolean, optional
Whether or not the new unit can use SI prefixes. Default: False
registry : :class:`unyt.unit_registry.UnitRegistry` or None
The unit registry to add the unit to. If None, then defaults to the
global default unit registry. If registry is set to None then the
unit object will be added as an attribute to the top-level :mod:`unyt`
namespace to ease working with the newly defined unit. See the example
below.
Examples
--------
>>> from unyt import day
>>> two_weeks = 14.0*day
>>> one_day = 1.0*day
>>> define_unit("two_weeks", two_weeks)
>>> from unyt import two_weeks
>>> print((3*two_weeks)/one_day)
42.0 dimensionless | Below is the the instruction that describes the task:
### Input:
Define a new unit and add it to the specified unit registry.
Parameters
----------
symbol : string
The symbol for the new unit.
value : tuple or :class:`unyt.array.unyt_quantity`
The definition of the new unit in terms of some other units. For
example, one would define a new "mph" unit with ``(1.0, "mile/hr")``
or with ``1.0*unyt.mile/unyt.hr``
tex_repr : string, optional
The LaTeX representation of the new unit. If one is not supplied, it
will be generated automatically based on the symbol string.
offset : float, optional
The default offset for the unit. If not set, an offset of 0 is assumed.
prefixable : boolean, optional
Whether or not the new unit can use SI prefixes. Default: False
registry : :class:`unyt.unit_registry.UnitRegistry` or None
The unit registry to add the unit to. If None, then defaults to the
global default unit registry. If registry is set to None then the
unit object will be added as an attribute to the top-level :mod:`unyt`
namespace to ease working with the newly defined unit. See the example
below.
Examples
--------
>>> from unyt import day
>>> two_weeks = 14.0*day
>>> one_day = 1.0*day
>>> define_unit("two_weeks", two_weeks)
>>> from unyt import two_weeks
>>> print((3*two_weeks)/one_day)
42.0 dimensionless
### Response:
def define_unit(
symbol, value, tex_repr=None, offset=None, prefixable=False, registry=None
):
"""
Define a new unit and add it to the specified unit registry.
Parameters
----------
symbol : string
The symbol for the new unit.
value : tuple or :class:`unyt.array.unyt_quantity`
The definition of the new unit in terms of some other units. For
example, one would define a new "mph" unit with ``(1.0, "mile/hr")``
or with ``1.0*unyt.mile/unyt.hr``
tex_repr : string, optional
The LaTeX representation of the new unit. If one is not supplied, it
will be generated automatically based on the symbol string.
offset : float, optional
The default offset for the unit. If not set, an offset of 0 is assumed.
prefixable : boolean, optional
Whether or not the new unit can use SI prefixes. Default: False
registry : :class:`unyt.unit_registry.UnitRegistry` or None
The unit registry to add the unit to. If None, then defaults to the
global default unit registry. If registry is set to None then the
unit object will be added as an attribute to the top-level :mod:`unyt`
namespace to ease working with the newly defined unit. See the example
below.
Examples
--------
>>> from unyt import day
>>> two_weeks = 14.0*day
>>> one_day = 1.0*day
>>> define_unit("two_weeks", two_weeks)
>>> from unyt import two_weeks
>>> print((3*two_weeks)/one_day)
42.0 dimensionless
"""
from unyt.array import unyt_quantity, _iterable
import unyt
if registry is None:
registry = default_unit_registry
if symbol in registry:
raise RuntimeError(
"Unit symbol '%s' already exists in the provided " "registry" % symbol
)
if not isinstance(value, unyt_quantity):
if _iterable(value) and len(value) == 2:
value = unyt_quantity(value[0], value[1], registry=registry)
else:
raise RuntimeError(
'"value" needs to be a quantity or ' "(value, unit) tuple!"
)
base_value = float(value.in_base(unit_system="mks"))
dimensions = value.units.dimensions
registry.add(
symbol,
base_value,
dimensions,
prefixable=prefixable,
tex_repr=tex_repr,
offset=offset,
)
if registry is default_unit_registry:
u = Unit(symbol, registry=registry)
setattr(unyt, symbol, u) |
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals) | Return a MATCH query where all eligible locations are valid as query start locations. | Below is the the instruction that describes the task:
### Input:
Return a MATCH query where all eligible locations are valid as query start locations.
### Response:
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals) |
def get_gzh_article_by_hot(text):
"""从 首页热门搜索 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
首页热门搜索 页 中 某一页 的文本
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
page = etree.HTML(text)
lis = page.xpath('/html/body/li')
gzh_article_list = []
for li in lis:
url = get_first_of_element(li, 'div[1]/h4/a/@href')
title = get_first_of_element(li, 'div[1]/h4/a/div/text()')
abstract = get_first_of_element(li, 'div[1]/p[1]/text()')
xpath_time = get_first_of_element(li, 'div[1]/p[2]')
open_id = get_first_of_element(xpath_time, 'span/@data-openid')
headimage = get_first_of_element(xpath_time, 'span/@data-headimage')
gzh_name = get_first_of_element(xpath_time, 'span/text()')
send_time = xpath_time.xpath('a/span/@data-lastmodified')
main_img = get_first_of_element(li, 'div[2]/a/img/@src')
try:
send_time = int(send_time[0])
except ValueError:
send_time = send_time[0]
gzh_article_list.append({
'gzh': {
'headimage': headimage,
'wechat_name': gzh_name,
},
'article': {
'url': url,
'title': title,
'abstract': abstract,
'time': send_time,
'open_id': open_id,
'main_img': main_img
}
})
return gzh_article_list | 从 首页热门搜索 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
首页热门搜索 页 中 某一页 的文本
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
} | Below is the the instruction that describes the task:
### Input:
从 首页热门搜索 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
首页热门搜索 页 中 某一页 的文本
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
### Response:
def get_gzh_article_by_hot(text):
"""从 首页热门搜索 提取公众号信息 和 文章列表信息
Parameters
----------
text : str or unicode
首页热门搜索 页 中 某一页 的文本
Returns
-------
list[dict]
{
'gzh': {
'headimage': str, # 公众号头像
'wechat_name': str, # 公众号名称
},
'article': {
'url': str, # 文章临时链接
'title': str, # 文章标题
'abstract': str, # 文章摘要
'time': int, # 推送时间,10位时间戳
'open_id': str, # open id
'main_img': str # 封面图片
}
}
"""
page = etree.HTML(text)
lis = page.xpath('/html/body/li')
gzh_article_list = []
for li in lis:
url = get_first_of_element(li, 'div[1]/h4/a/@href')
title = get_first_of_element(li, 'div[1]/h4/a/div/text()')
abstract = get_first_of_element(li, 'div[1]/p[1]/text()')
xpath_time = get_first_of_element(li, 'div[1]/p[2]')
open_id = get_first_of_element(xpath_time, 'span/@data-openid')
headimage = get_first_of_element(xpath_time, 'span/@data-headimage')
gzh_name = get_first_of_element(xpath_time, 'span/text()')
send_time = xpath_time.xpath('a/span/@data-lastmodified')
main_img = get_first_of_element(li, 'div[2]/a/img/@src')
try:
send_time = int(send_time[0])
except ValueError:
send_time = send_time[0]
gzh_article_list.append({
'gzh': {
'headimage': headimage,
'wechat_name': gzh_name,
},
'article': {
'url': url,
'title': title,
'abstract': abstract,
'time': send_time,
'open_id': open_id,
'main_img': main_img
}
})
return gzh_article_list |
def get(self):
""" :return valid :mq::flopsy::Publisher instance """
if len(self.publishers) == 0:
return Publisher(name=self.name, parent_pool=self)
else:
return self.publishers.pop() | :return valid :mq::flopsy::Publisher instance | Below is the the instruction that describes the task:
### Input:
:return valid :mq::flopsy::Publisher instance
### Response:
def get(self):
""" :return valid :mq::flopsy::Publisher instance """
if len(self.publishers) == 0:
return Publisher(name=self.name, parent_pool=self)
else:
return self.publishers.pop() |
def idle_task(self):
'''called in idle time'''
try:
datagram = self.port.recvfrom(self.BUFFER_SIZE)
data = json.loads(datagram[0])
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return
raise
for key in data.keys():
self.data[key] = data[key]
try:
self.master.mav.gps_input_send(
self.data['time_usec'],
self.data['gps_id'],
self.data['ignore_flags'],
self.data['time_week_ms'],
self.data['time_week'],
self.data['fix_type'],
self.data['lat'],
self.data['lon'],
self.data['alt'],
self.data['hdop'],
self.data['vdop'],
self.data['vn'],
self.data['ve'],
self.data['vd'],
self.data['speed_accuracy'],
self.data['horiz_accuracy'],
self.data['vert_accuracy'],
self.data['satellites_visible'])
except Exception as e:
print("GPS Input Failed:", e) | called in idle time | Below is the the instruction that describes the task:
### Input:
called in idle time
### Response:
def idle_task(self):
'''called in idle time'''
try:
datagram = self.port.recvfrom(self.BUFFER_SIZE)
data = json.loads(datagram[0])
except socket.error as e:
if e.errno in [ errno.EAGAIN, errno.EWOULDBLOCK ]:
return
raise
for key in data.keys():
self.data[key] = data[key]
try:
self.master.mav.gps_input_send(
self.data['time_usec'],
self.data['gps_id'],
self.data['ignore_flags'],
self.data['time_week_ms'],
self.data['time_week'],
self.data['fix_type'],
self.data['lat'],
self.data['lon'],
self.data['alt'],
self.data['hdop'],
self.data['vdop'],
self.data['vn'],
self.data['ve'],
self.data['vd'],
self.data['speed_accuracy'],
self.data['horiz_accuracy'],
self.data['vert_accuracy'],
self.data['satellites_visible'])
except Exception as e:
print("GPS Input Failed:", e) |
def create_folder_courses(self, name, course_id, hidden=None, lock_at=None, locked=None, parent_folder_id=None, parent_folder_path=None, position=None, unlock_at=None):
"""
Create folder.
Creates a folder in the specified context
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - name
"""The name of the folder"""
data["name"] = name
# OPTIONAL - parent_folder_id
"""The id of the folder to store the file in. If this and parent_folder_path are sent an error will be returned. If neither is given, a default folder will be used."""
if parent_folder_id is not None:
data["parent_folder_id"] = parent_folder_id
# OPTIONAL - parent_folder_path
"""The path of the folder to store the new folder in. The path separator is the forward slash `/`, never a back slash. The parent folder will be created if it does not already exist. This parameter only applies to new folders in a context that has folders, such as a user, a course, or a group. If this and parent_folder_id are sent an error will be returned. If neither is given, a default folder will be used."""
if parent_folder_path is not None:
data["parent_folder_path"] = parent_folder_path
# OPTIONAL - lock_at
"""The datetime to lock the folder at"""
if lock_at is not None:
data["lock_at"] = lock_at
# OPTIONAL - unlock_at
"""The datetime to unlock the folder at"""
if unlock_at is not None:
data["unlock_at"] = unlock_at
# OPTIONAL - locked
"""Flag the folder as locked"""
if locked is not None:
data["locked"] = locked
# OPTIONAL - hidden
"""Flag the folder as hidden"""
if hidden is not None:
data["hidden"] = hidden
# OPTIONAL - position
"""Set an explicit sort position for the folder"""
if position is not None:
data["position"] = position
self.logger.debug("POST /api/v1/courses/{course_id}/folders with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/folders".format(**path), data=data, params=params, single_item=True) | Create folder.
Creates a folder in the specified context | Below is the the instruction that describes the task:
### Input:
Create folder.
Creates a folder in the specified context
### Response:
def create_folder_courses(self, name, course_id, hidden=None, lock_at=None, locked=None, parent_folder_id=None, parent_folder_path=None, position=None, unlock_at=None):
"""
Create folder.
Creates a folder in the specified context
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - name
"""The name of the folder"""
data["name"] = name
# OPTIONAL - parent_folder_id
"""The id of the folder to store the file in. If this and parent_folder_path are sent an error will be returned. If neither is given, a default folder will be used."""
if parent_folder_id is not None:
data["parent_folder_id"] = parent_folder_id
# OPTIONAL - parent_folder_path
"""The path of the folder to store the new folder in. The path separator is the forward slash `/`, never a back slash. The parent folder will be created if it does not already exist. This parameter only applies to new folders in a context that has folders, such as a user, a course, or a group. If this and parent_folder_id are sent an error will be returned. If neither is given, a default folder will be used."""
if parent_folder_path is not None:
data["parent_folder_path"] = parent_folder_path
# OPTIONAL - lock_at
"""The datetime to lock the folder at"""
if lock_at is not None:
data["lock_at"] = lock_at
# OPTIONAL - unlock_at
"""The datetime to unlock the folder at"""
if unlock_at is not None:
data["unlock_at"] = unlock_at
# OPTIONAL - locked
"""Flag the folder as locked"""
if locked is not None:
data["locked"] = locked
# OPTIONAL - hidden
"""Flag the folder as hidden"""
if hidden is not None:
data["hidden"] = hidden
# OPTIONAL - position
"""Set an explicit sort position for the folder"""
if position is not None:
data["position"] = position
self.logger.debug("POST /api/v1/courses/{course_id}/folders with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/courses/{course_id}/folders".format(**path), data=data, params=params, single_item=True) |
def check_csrf_token():
"""Checks that token is correct, aborting if not"""
if request.method in ("GET",): # not exhaustive list
return
token = request.form.get("csrf_token")
if token is None:
app.logger.warning("Expected CSRF Token: not present")
abort(400)
if not safe_str_cmp(token, csrf_token()):
app.logger.warning("CSRF Token incorrect")
abort(400) | Checks that token is correct, aborting if not | Below is the the instruction that describes the task:
### Input:
Checks that token is correct, aborting if not
### Response:
def check_csrf_token():
"""Checks that token is correct, aborting if not"""
if request.method in ("GET",): # not exhaustive list
return
token = request.form.get("csrf_token")
if token is None:
app.logger.warning("Expected CSRF Token: not present")
abort(400)
if not safe_str_cmp(token, csrf_token()):
app.logger.warning("CSRF Token incorrect")
abort(400) |
def emitRecordMiddleDoubleClicked(self, item):
"""
Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem>
"""
# emit that the record has been double clicked
if isinstance(item, XOrbRecordItem) and not self.signalsBlocked():
self.recordMiddleDoubleClicked.emit(item.record()) | Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem> | Below is the the instruction that describes the task:
### Input:
Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem>
### Response:
def emitRecordMiddleDoubleClicked(self, item):
"""
Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem>
"""
# emit that the record has been double clicked
if isinstance(item, XOrbRecordItem) and not self.signalsBlocked():
self.recordMiddleDoubleClicked.emit(item.record()) |
def warn(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'WARN'."""
if six.PY3:
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs) | Logs 'msg % args' with severity 'WARN'. | Below is the the instruction that describes the task:
### Input:
Logs 'msg % args' with severity 'WARN'.
### Response:
def warn(self, msg, *args, **kwargs):
"""Logs 'msg % args' with severity 'WARN'."""
if six.PY3:
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs) |
def GetFileObject(self, data_stream_name=''):
"""Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
TSKFileIO: file-like object or None.
"""
data_stream_names = [
data_stream.name for data_stream in self._GetDataStreams()]
if data_stream_name and data_stream_name not in data_stream_names:
return None
path_spec = copy.deepcopy(self.path_spec)
if data_stream_name:
# For HFS DECOMP fork name is exposed however libtsk 4.6.0 seems to handle
# these differently when opened and the correct behavior seems to be
# treating this as the default (nameless) fork instead. For context libtsk
# 4.5.0 is unable to read the data steam and yields an error.
if self._file_system.IsHFS() and data_stream_name == 'DECOMP':
data_stream_name = ''
setattr(path_spec, 'data_stream', data_stream_name)
return resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context) | Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
TSKFileIO: file-like object or None. | Below is the the instruction that describes the task:
### Input:
Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
TSKFileIO: file-like object or None.
### Response:
def GetFileObject(self, data_stream_name=''):
"""Retrieves the file-like object.
Args:
data_stream_name (Optional[str]): data stream name, where an empty
string represents the default data stream.
Returns:
TSKFileIO: file-like object or None.
"""
data_stream_names = [
data_stream.name for data_stream in self._GetDataStreams()]
if data_stream_name and data_stream_name not in data_stream_names:
return None
path_spec = copy.deepcopy(self.path_spec)
if data_stream_name:
# For HFS DECOMP fork name is exposed however libtsk 4.6.0 seems to handle
# these differently when opened and the correct behavior seems to be
# treating this as the default (nameless) fork instead. For context libtsk
# 4.5.0 is unable to read the data steam and yields an error.
if self._file_system.IsHFS() and data_stream_name == 'DECOMP':
data_stream_name = ''
setattr(path_spec, 'data_stream', data_stream_name)
return resolver.Resolver.OpenFileObject(
path_spec, resolver_context=self._resolver_context) |
def runner(self, load):
'''
Send a master control function back to the runner system
'''
# All runner opts pass through eauth
auth_type, err_name, key = self._prep_auth_info(load)
# Authenticate
auth_check = self.loadauth.check_authentication(load, auth_type)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
load['fun'],
load['kwarg']
)
username = auth_check.get('username')
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred '
'for user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# Authorized. Do the job!
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
load.get('kwarg', {}),
username)
except Exception as exc:
log.exception('Exception occurred while introspecting %s')
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}} | Send a master control function back to the runner system | Below is the the instruction that describes the task:
### Input:
Send a master control function back to the runner system
### Response:
def runner(self, load):
'''
Send a master control function back to the runner system
'''
# All runner opts pass through eauth
auth_type, err_name, key = self._prep_auth_info(load)
# Authenticate
auth_check = self.loadauth.check_authentication(load, auth_type)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
load['fun'],
load['kwarg']
)
username = auth_check.get('username')
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred '
'for user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# Authorized. Do the job!
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
load.get('kwarg', {}),
username)
except Exception as exc:
log.exception('Exception occurred while introspecting %s')
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}} |
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np | Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it. | Below is the the instruction that describes the task:
### Input:
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
### Response:
def draw_text(img, y, x, text, color=(0, 255, 0), size=25):
"""
Draw text on an image.
This uses by default DejaVuSans as its font, which is included in this library.
dtype support::
* ``uint8``: yes; fully tested
* ``uint16``: no
* ``uint32``: no
* ``uint64``: no
* ``int8``: no
* ``int16``: no
* ``int32``: no
* ``int64``: no
* ``float16``: no
* ``float32``: yes; not tested
* ``float64``: no
* ``float128``: no
* ``bool``: no
TODO check if other dtypes could be enabled
Parameters
----------
img : (H,W,3) ndarray
The image array to draw text on.
Expected to be of dtype uint8 or float32 (value range 0.0 to 255.0).
y : int
x-coordinate of the top left corner of the text.
x : int
y- coordinate of the top left corner of the text.
text : str
The text to draw.
color : iterable of int, optional
Color of the text to draw. For RGB-images this is expected to be an RGB color.
size : int, optional
Font size of the text to draw.
Returns
-------
img_np : (H,W,3) ndarray
Input image with text drawn on it.
"""
do_assert(img.dtype in [np.uint8, np.float32])
input_dtype = img.dtype
if img.dtype == np.float32:
img = img.astype(np.uint8)
img = PIL_Image.fromarray(img)
font = PIL_ImageFont.truetype(DEFAULT_FONT_FP, size)
context = PIL_ImageDraw.Draw(img)
context.text((x, y), text, fill=tuple(color), font=font)
img_np = np.asarray(img)
# PIL/asarray returns read only array
if not img_np.flags["WRITEABLE"]:
try:
# this seems to no longer work with np 1.16 (or was pillow updated?)
img_np.setflags(write=True)
except ValueError as ex:
if "cannot set WRITEABLE flag to True of this array" in str(ex):
img_np = np.copy(img_np)
if img_np.dtype != input_dtype:
img_np = img_np.astype(input_dtype)
return img_np |
def content_type(self, content_type):
"""Sets the content_type of this Notificant.
The value of the Content-Type header of the webhook POST request. # noqa: E501
:param content_type: The content_type of this Notificant. # noqa: E501
:type: str
"""
allowed_values = ["application/json", "text/html", "text/plain", "application/x-www-form-urlencoded", ""] # noqa: E501
if content_type not in allowed_values:
raise ValueError(
"Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501
.format(content_type, allowed_values)
)
self._content_type = content_type | Sets the content_type of this Notificant.
The value of the Content-Type header of the webhook POST request. # noqa: E501
:param content_type: The content_type of this Notificant. # noqa: E501
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the content_type of this Notificant.
The value of the Content-Type header of the webhook POST request. # noqa: E501
:param content_type: The content_type of this Notificant. # noqa: E501
:type: str
### Response:
def content_type(self, content_type):
"""Sets the content_type of this Notificant.
The value of the Content-Type header of the webhook POST request. # noqa: E501
:param content_type: The content_type of this Notificant. # noqa: E501
:type: str
"""
allowed_values = ["application/json", "text/html", "text/plain", "application/x-www-form-urlencoded", ""] # noqa: E501
if content_type not in allowed_values:
raise ValueError(
"Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501
.format(content_type, allowed_values)
)
self._content_type = content_type |
def _rsaes_oaep_decrypt(self, C, h=None, mgf=None, L=None):
"""
Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
7.1.2 of RFC 3447. Not intended to be used directly. Please, see
encrypt() method for type "OAEP".
Input:
C : ciphertext to be decrypted, an octet string of length k, where
k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
and hLen the length in octets of the hash function output)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). 'sha1' is used if none is provided.
mgf: the mask generation function f : seed, maskLen -> mask
L : optional label whose association with the message is to be
verified; the default value for L, if not provided is the empty
string.
Output:
message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
On error, None is returned.
"""
# The steps below are the one described in Sect. 7.1.2 of RFC 3447.
# 1) Length Checking
# 1.a) is not done
if h is None:
h = "sha1"
if not h in _hashFuncParams:
warning("Key._rsaes_oaep_decrypt(): unknown hash function %s.", h)
return None
hLen = _hashFuncParams[h][0]
hFun = _hashFuncParams[h][1]
k = self.modulusLen / 8
cLen = len(C)
if cLen != k: # 1.b)
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(cLen != k)")
return None
if k < 2*hLen + 2:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(k < 2*hLen + 2)")
return None
# 2) RSA decryption
c = pkcs_os2ip(C) # 2.a)
m = self._rsadp(c) # 2.b)
EM = pkcs_i2osp(m, k) # 2.c)
# 3) EME-OAEP decoding
if L is None: # 3.a)
L = ""
lHash = hFun(L)
Y = EM[:1] # 3.b)
if Y != '\x00':
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(Y is not zero)")
return None
maskedSeed = EM[1:1+hLen]
maskedDB = EM[1+hLen:]
if mgf is None:
mgf = lambda x,y: pkcs_mgf1(x, y, h)
seedMask = mgf(maskedDB, hLen) # 3.c)
seed = strxor(maskedSeed, seedMask) # 3.d)
dbMask = mgf(seed, k - hLen - 1) # 3.e)
DB = strxor(maskedDB, dbMask) # 3.f)
# I am aware of the note at the end of 7.1.2 regarding error
# conditions reporting but the one provided below are for _local_
# debugging purposes. --arno
lHashPrime = DB[:hLen] # 3.g)
tmp = DB[hLen:].split('\x01', 1)
if len(tmp) != 2:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(0x01 separator not found)")
return None
PS, M = tmp
if PS != '\x00'*len(PS):
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(invalid padding string)")
return None
if lHash != lHashPrime:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(invalid hash)")
return None
return M | Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
7.1.2 of RFC 3447. Not intended to be used directly. Please, see
encrypt() method for type "OAEP".
Input:
C : ciphertext to be decrypted, an octet string of length k, where
k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
and hLen the length in octets of the hash function output)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). 'sha1' is used if none is provided.
mgf: the mask generation function f : seed, maskLen -> mask
L : optional label whose association with the message is to be
verified; the default value for L, if not provided is the empty
string.
Output:
message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
On error, None is returned. | Below is the the instruction that describes the task:
### Input:
Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
7.1.2 of RFC 3447. Not intended to be used directly. Please, see
encrypt() method for type "OAEP".
Input:
C : ciphertext to be decrypted, an octet string of length k, where
k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
and hLen the length in octets of the hash function output)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). 'sha1' is used if none is provided.
mgf: the mask generation function f : seed, maskLen -> mask
L : optional label whose association with the message is to be
verified; the default value for L, if not provided is the empty
string.
Output:
message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
On error, None is returned.
### Response:
def _rsaes_oaep_decrypt(self, C, h=None, mgf=None, L=None):
"""
Internal method providing RSAES-OAEP-DECRYPT as defined in Sect.
7.1.2 of RFC 3447. Not intended to be used directly. Please, see
encrypt() method for type "OAEP".
Input:
C : ciphertext to be decrypted, an octet string of length k, where
k = 2*hLen + 2 (k denotes the length in octets of the RSA modulus
and hLen the length in octets of the hash function output)
h : hash function name (in 'md2', 'md4', 'md5', 'sha1', 'tls',
'sha256', 'sha384'). 'sha1' is used if none is provided.
mgf: the mask generation function f : seed, maskLen -> mask
L : optional label whose association with the message is to be
verified; the default value for L, if not provided is the empty
string.
Output:
message, an octet string of length k mLen, where mLen <= k - 2*hLen - 2
On error, None is returned.
"""
# The steps below are the one described in Sect. 7.1.2 of RFC 3447.
# 1) Length Checking
# 1.a) is not done
if h is None:
h = "sha1"
if not h in _hashFuncParams:
warning("Key._rsaes_oaep_decrypt(): unknown hash function %s.", h)
return None
hLen = _hashFuncParams[h][0]
hFun = _hashFuncParams[h][1]
k = self.modulusLen / 8
cLen = len(C)
if cLen != k: # 1.b)
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(cLen != k)")
return None
if k < 2*hLen + 2:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(k < 2*hLen + 2)")
return None
# 2) RSA decryption
c = pkcs_os2ip(C) # 2.a)
m = self._rsadp(c) # 2.b)
EM = pkcs_i2osp(m, k) # 2.c)
# 3) EME-OAEP decoding
if L is None: # 3.a)
L = ""
lHash = hFun(L)
Y = EM[:1] # 3.b)
if Y != '\x00':
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(Y is not zero)")
return None
maskedSeed = EM[1:1+hLen]
maskedDB = EM[1+hLen:]
if mgf is None:
mgf = lambda x,y: pkcs_mgf1(x, y, h)
seedMask = mgf(maskedDB, hLen) # 3.c)
seed = strxor(maskedSeed, seedMask) # 3.d)
dbMask = mgf(seed, k - hLen - 1) # 3.e)
DB = strxor(maskedDB, dbMask) # 3.f)
# I am aware of the note at the end of 7.1.2 regarding error
# conditions reporting but the one provided below are for _local_
# debugging purposes. --arno
lHashPrime = DB[:hLen] # 3.g)
tmp = DB[hLen:].split('\x01', 1)
if len(tmp) != 2:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(0x01 separator not found)")
return None
PS, M = tmp
if PS != '\x00'*len(PS):
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(invalid padding string)")
return None
if lHash != lHashPrime:
warning("Key._rsaes_oaep_decrypt(): decryption error. "
"(invalid hash)")
return None
return M |
def chain(first_converter, second_converter, strict: bool):
"""
Utility method to chain two converters. If any of them is already a ConversionChain, this method "unpacks" it
first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different
from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict'
chain, to produce a 'non-strict' chain.
:param first_converter:
:param second_converter:
:param strict:
:return:
"""
if isinstance(first_converter, ConversionChain):
if isinstance(second_converter, ConversionChain):
# BOTH are chains
if (first_converter.strict == strict) and (second_converter.strict == strict):
return first_converter.add_conversion_steps(second_converter._converters_list)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=first_converter._converters_list,
strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError('Trying to chain conversion chains with different strict modes than expected')
else:
# FIRST is a chain
if strict == first_converter.strict:
return first_converter.add_conversion_step(second_converter)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=[second_converter], strict_chaining=False) \
.insert_conversion_steps_at_beginning(first_converter._converters_list)
else:
raise ValueError('Trying to chain after a conversion chain that has different strict mode than '
'expected')
else:
if isinstance(second_converter, ConversionChain):
# SECOND is a chain
if strict == second_converter.strict:
return second_converter.insert_conversion_step_at_beginning(first_converter)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=[first_converter], strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError(
'Trying to chain before a conversion chain that has different strict mode than '
'expected')
else:
# Neither is a chain
return ConversionChain([first_converter, second_converter], strict) | Utility method to chain two converters. If any of them is already a ConversionChain, this method "unpacks" it
first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different
from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict'
chain, to produce a 'non-strict' chain.
:param first_converter:
:param second_converter:
:param strict:
:return: | Below is the the instruction that describes the task:
### Input:
Utility method to chain two converters. If any of them is already a ConversionChain, this method "unpacks" it
first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different
from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict'
chain, to produce a 'non-strict' chain.
:param first_converter:
:param second_converter:
:param strict:
:return:
### Response:
def chain(first_converter, second_converter, strict: bool):
"""
Utility method to chain two converters. If any of them is already a ConversionChain, this method "unpacks" it
first. Note: the created conversion chain is created with the provided 'strict' flag, that may be different
from the ones of the converters (if compliant). For example you may chain a 'strict' chain with a 'non-strict'
chain, to produce a 'non-strict' chain.
:param first_converter:
:param second_converter:
:param strict:
:return:
"""
if isinstance(first_converter, ConversionChain):
if isinstance(second_converter, ConversionChain):
# BOTH are chains
if (first_converter.strict == strict) and (second_converter.strict == strict):
return first_converter.add_conversion_steps(second_converter._converters_list)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=first_converter._converters_list,
strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError('Trying to chain conversion chains with different strict modes than expected')
else:
# FIRST is a chain
if strict == first_converter.strict:
return first_converter.add_conversion_step(second_converter)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=[second_converter], strict_chaining=False) \
.insert_conversion_steps_at_beginning(first_converter._converters_list)
else:
raise ValueError('Trying to chain after a conversion chain that has different strict mode than '
'expected')
else:
if isinstance(second_converter, ConversionChain):
# SECOND is a chain
if strict == second_converter.strict:
return second_converter.insert_conversion_step_at_beginning(first_converter)
else:
if not strict:
# create a non-strict chain
return ConversionChain(initial_converters=[first_converter], strict_chaining=False) \
.add_conversion_steps(second_converter._converters_list)
else:
raise ValueError(
'Trying to chain before a conversion chain that has different strict mode than '
'expected')
else:
# Neither is a chain
return ConversionChain([first_converter, second_converter], strict) |
def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose) | Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict | Below is the the instruction that describes the task:
### Input:
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
### Response:
def loads_json(p_str, custom=None, meta=False, verbose=0):
"""
Given a json string it creates a dictionary of sfsi objects
:param ffp: str, Full file path to json file
:param custom: dict, used to load custom objects, {model type: custom object}
:param meta: bool, if true then also return all ecp meta data in separate dict
:param verbose: int, console output
:return: dict
"""
data = json.loads(p_str)
if meta:
md = {}
for item in data:
if item != "models":
md[item] = data[item]
return ecp_dict_to_objects(data, custom, verbose=verbose), md
else:
return ecp_dict_to_objects(data, custom, verbose=verbose) |
def correct(text: str, matches: [Match]) -> str:
"""Automatically apply suggestions to the text."""
ltext = list(text)
matches = [match for match in matches if match.replacements]
errors = [ltext[match.offset:match.offset + match.errorlength]
for match in matches]
correct_offset = 0
for n, match in enumerate(matches):
frompos, topos = (correct_offset + match.offset,
correct_offset + match.offset + match.errorlength)
if ltext[frompos:topos] != errors[n]:
continue
repl = match.replacements[0]
ltext[frompos:topos] = list(repl)
correct_offset += len(repl) - len(errors[n])
return ''.join(ltext) | Automatically apply suggestions to the text. | Below is the the instruction that describes the task:
### Input:
Automatically apply suggestions to the text.
### Response:
def correct(text: str, matches: [Match]) -> str:
"""Automatically apply suggestions to the text."""
ltext = list(text)
matches = [match for match in matches if match.replacements]
errors = [ltext[match.offset:match.offset + match.errorlength]
for match in matches]
correct_offset = 0
for n, match in enumerate(matches):
frompos, topos = (correct_offset + match.offset,
correct_offset + match.offset + match.errorlength)
if ltext[frompos:topos] != errors[n]:
continue
repl = match.replacements[0]
ltext[frompos:topos] = list(repl)
correct_offset += len(repl) - len(errors[n])
return ''.join(ltext) |
def create_key_filter(properties: Dict[str, list]) -> List[Tuple]:
"""Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')]
"""
combinations = (product([k], v) for k, v in properties.items())
return chain.from_iterable(combinations) | Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')] | Below is the the instruction that describes the task:
### Input:
Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')]
### Response:
def create_key_filter(properties: Dict[str, list]) -> List[Tuple]:
"""Generate combinations of key, value pairs for each key in properties.
Examples
--------
properties = {'ent': ['geo_rev', 'supply_chain'], 'own', 'fi'}
>> create_key_filter(properties)
--> [('ent', 'geo_rev'), ('ent', 'supply_chain'), ('own', 'fi')]
"""
combinations = (product([k], v) for k, v in properties.items())
return chain.from_iterable(combinations) |
def new(self, name, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, contains_phi=None, tags=None,
properties=None, bill_to=None, **kwargs):
"""
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
"""
input_hash = {}
input_hash["name"] = name
if summary is not None:
input_hash["summary"] = summary
if description is not None:
input_hash["description"] = description
if protected is not None:
input_hash["protected"] = protected
if restricted is not None:
input_hash["restricted"] = restricted
if download_restricted is not None:
input_hash["downloadRestricted"] = download_restricted
if contains_phi is not None:
input_hash["containsPHI"] = contains_phi
if bill_to is not None:
input_hash["billTo"] = bill_to
if tags is not None:
input_hash["tags"] = tags
if properties is not None:
input_hash["properties"] = properties
self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"])
self._desc = {}
return self._dxid | :param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info. | Below is the the instruction that describes the task:
### Input:
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
### Response:
def new(self, name, summary=None, description=None, protected=None,
restricted=None, download_restricted=None, contains_phi=None, tags=None,
properties=None, bill_to=None, **kwargs):
"""
:param name: The name of the project
:type name: string
:param summary: If provided, a short summary of what the project contains
:type summary: string
:param description: If provided, the new project description
:type name: string
:param protected: If provided, whether the project should be protected
:type protected: boolean
:param restricted: If provided, whether the project should be restricted
:type restricted: boolean
:param download_restricted: If provided, whether external downloads should be restricted
:type download_restricted: boolean
:param contains_phi: If provided, whether the project should be marked as containing protected health information (PHI)
:type contains_phi: boolean
:param tags: If provided, tags to associate with the project
:type tags: list of strings
:param properties: If provided, properties to associate with the project
:type properties: dict
:param bill_to: If provided, ID of the entity to which any costs associated with this project will be billed; must be the ID of the requesting user or an org of which the requesting user is a member with allowBillableActivities permission
:type bill_to: string
Creates a new project. Initially only the user performing this action
will be in the permissions/member list, with ADMINISTER access.
See the API documentation for the `/project/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Projects#API-method%3A-%2Fproject%2Fnew>`_
method for more info.
"""
input_hash = {}
input_hash["name"] = name
if summary is not None:
input_hash["summary"] = summary
if description is not None:
input_hash["description"] = description
if protected is not None:
input_hash["protected"] = protected
if restricted is not None:
input_hash["restricted"] = restricted
if download_restricted is not None:
input_hash["downloadRestricted"] = download_restricted
if contains_phi is not None:
input_hash["containsPHI"] = contains_phi
if bill_to is not None:
input_hash["billTo"] = bill_to
if tags is not None:
input_hash["tags"] = tags
if properties is not None:
input_hash["properties"] = properties
self.set_id(dxpy.api.project_new(input_hash, **kwargs)["id"])
self._desc = {}
return self._dxid |
def add_specimen(self, spec_name, samp_name=None, er_data=None, pmag_data=None):
"""
Create a Specimen object and add it to self.specimens.
If a sample name is provided, add the specimen to sample.specimens as well.
"""
if samp_name:
sample = self.find_by_name(samp_name, self.samples)
if not sample:
print("""-W- {} is not a currently existing sample.
Creating a new sample named: {} """.format(samp_name, samp_name))
sample = self.add_sample(samp_name)
else:
sample = None
specimen = Specimen(spec_name, sample, self.data_model, er_data, pmag_data)
self.specimens.append(specimen)
if sample:
sample.specimens.append(specimen)
return specimen | Create a Specimen object and add it to self.specimens.
If a sample name is provided, add the specimen to sample.specimens as well. | Below is the the instruction that describes the task:
### Input:
Create a Specimen object and add it to self.specimens.
If a sample name is provided, add the specimen to sample.specimens as well.
### Response:
def add_specimen(self, spec_name, samp_name=None, er_data=None, pmag_data=None):
"""
Create a Specimen object and add it to self.specimens.
If a sample name is provided, add the specimen to sample.specimens as well.
"""
if samp_name:
sample = self.find_by_name(samp_name, self.samples)
if not sample:
print("""-W- {} is not a currently existing sample.
Creating a new sample named: {} """.format(samp_name, samp_name))
sample = self.add_sample(samp_name)
else:
sample = None
specimen = Specimen(spec_name, sample, self.data_model, er_data, pmag_data)
self.specimens.append(specimen)
if sample:
sample.specimens.append(specimen)
return specimen |
def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8')
tree = defusedxml.ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree) | Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class. | Below is the the instruction that describes the task:
### Input:
Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
### Response:
def create_class_from_xml_string(target_class, xml_string):
"""Creates an instance of the target class from a string.
:param target_class: The class which will be instantiated and populated
with the contents of the XML. This class must have a c_tag and a
c_namespace class variable.
:param xml_string: A string which contains valid XML. The root element
of the XML string should match the tag and namespace of the desired
class.
:return: An instance of the target class with members assigned according to
the contents of the XML - or None if the root XML tag and namespace did
not match those of the target class.
"""
if not isinstance(xml_string, six.binary_type):
xml_string = xml_string.encode('utf-8')
tree = defusedxml.ElementTree.fromstring(xml_string)
return create_class_from_element_tree(target_class, tree) |
def get_status(self, hosts, services):
"""Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
"""
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
4: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state | Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
### Response:
def get_status(self, hosts, services):
"""Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
"""
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
4: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state |
def google_app_engine_ndb_delete_expired_sessions(dormant_for=86400, limit=500):
"""
Deletes expired sessions
A session is expired if it expires date is set and has passed or
if it has not been accessed for a given period of time.
:param dormant_for: seconds since last access to delete sessions, defaults to 24 hours.
:type dormant_for: int
:param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500
:type limit: int
"""
from vishnu.backend.client.google_app_engine_ndb import VishnuSession
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
now = datetime.utcnow()
last_accessed = now - timedelta(seconds=dormant_for)
query = VishnuSession.query(ndb.OR(
ndb.AND(VishnuSession.expires <= now, VishnuSession.expires != None),
VishnuSession.last_accessed <= last_accessed
))
results = query.fetch(keys_only=True, limit=limit)
ndb.delete_multi(results)
return len(results) < limit | Deletes expired sessions
A session is expired if it expires date is set and has passed or
if it has not been accessed for a given period of time.
:param dormant_for: seconds since last access to delete sessions, defaults to 24 hours.
:type dormant_for: int
:param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500
:type limit: int | Below is the the instruction that describes the task:
### Input:
Deletes expired sessions
A session is expired if it expires date is set and has passed or
if it has not been accessed for a given period of time.
:param dormant_for: seconds since last access to delete sessions, defaults to 24 hours.
:type dormant_for: int
:param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500
:type limit: int
### Response:
def google_app_engine_ndb_delete_expired_sessions(dormant_for=86400, limit=500):
"""
Deletes expired sessions
A session is expired if it expires date is set and has passed or
if it has not been accessed for a given period of time.
:param dormant_for: seconds since last access to delete sessions, defaults to 24 hours.
:type dormant_for: int
:param limit: amount to delete in one call of the method, the maximum and default for this is the NDB fetch limit of 500
:type limit: int
"""
from vishnu.backend.client.google_app_engine_ndb import VishnuSession
from google.appengine.ext import ndb
from datetime import datetime
from datetime import timedelta
now = datetime.utcnow()
last_accessed = now - timedelta(seconds=dormant_for)
query = VishnuSession.query(ndb.OR(
ndb.AND(VishnuSession.expires <= now, VishnuSession.expires != None),
VishnuSession.last_accessed <= last_accessed
))
results = query.fetch(keys_only=True, limit=limit)
ndb.delete_multi(results)
return len(results) < limit |
def pinyin_syllable_to_ipa(s):
"""Convert Pinyin syllable *s* to an IPA syllable."""
pinyin_syllable, tone = _parse_pinyin_syllable(s)
try:
ipa_syllable = _PINYIN_MAP[pinyin_syllable.lower()]['IPA']
except KeyError:
raise ValueError('Not a valid syllable: %s' % s)
return ipa_syllable + _IPA_TONES[tone] | Convert Pinyin syllable *s* to an IPA syllable. | Below is the the instruction that describes the task:
### Input:
Convert Pinyin syllable *s* to an IPA syllable.
### Response:
def pinyin_syllable_to_ipa(s):
"""Convert Pinyin syllable *s* to an IPA syllable."""
pinyin_syllable, tone = _parse_pinyin_syllable(s)
try:
ipa_syllable = _PINYIN_MAP[pinyin_syllable.lower()]['IPA']
except KeyError:
raise ValueError('Not a valid syllable: %s' % s)
return ipa_syllable + _IPA_TONES[tone] |
def set_matrix(self, X, input_type):
"""Set the data matrix given the (string) input_type"""
if input_type == 'data':
self.set_data_matrix(X)
elif input_type == 'adjacency':
self.set_adjacency_matrix(X)
elif input_type == 'affinity':
self.set_affinity_matrix(X)
else:
raise ValueError("Unrecognized input_type: {0}".format(input_type)) | Set the data matrix given the (string) input_type | Below is the the instruction that describes the task:
### Input:
Set the data matrix given the (string) input_type
### Response:
def set_matrix(self, X, input_type):
"""Set the data matrix given the (string) input_type"""
if input_type == 'data':
self.set_data_matrix(X)
elif input_type == 'adjacency':
self.set_adjacency_matrix(X)
elif input_type == 'affinity':
self.set_affinity_matrix(X)
else:
raise ValueError("Unrecognized input_type: {0}".format(input_type)) |
def bleu_advanced(y_true: List[Any], y_predicted: List[Any],
weights: Tuple=(1,), smoothing_function=SMOOTH.method1,
auto_reweigh=False, penalty=True) -> float:
"""Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
"""
bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh)
hyp_len = len(y_predicted)
hyp_lengths = hyp_len
ref_lengths = closest_ref_length([y_true], hyp_len)
bpenalty = brevity_penalty(ref_lengths, hyp_lengths)
if penalty is True or bpenalty == 0:
return bleu_measure
return bleu_measure/bpenalty | Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score | Below is the the instruction that describes the task:
### Input:
Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
### Response:
def bleu_advanced(y_true: List[Any], y_predicted: List[Any],
weights: Tuple=(1,), smoothing_function=SMOOTH.method1,
auto_reweigh=False, penalty=True) -> float:
"""Calculate BLEU score
Parameters:
y_true: list of reference tokens
y_predicted: list of query tokens
weights: n-gram weights
smoothing_function: SmoothingFunction
auto_reweigh: Option to re-normalize the weights uniformly
penalty: either enable brevity penalty or not
Return:
BLEU score
"""
bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh)
hyp_len = len(y_predicted)
hyp_lengths = hyp_len
ref_lengths = closest_ref_length([y_true], hyp_len)
bpenalty = brevity_penalty(ref_lengths, hyp_lengths)
if penalty is True or bpenalty == 0:
return bleu_measure
return bleu_measure/bpenalty |
def group_nodes_by_annotation(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation."""
result = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
result[d[ANNOTATIONS][annotation]].add(u)
result[d[ANNOTATIONS][annotation]].add(v)
return dict(result) | Group the nodes occurring in edges by the given annotation. | Below is the the instruction that describes the task:
### Input:
Group the nodes occurring in edges by the given annotation.
### Response:
def group_nodes_by_annotation(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation."""
result = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
result[d[ANNOTATIONS][annotation]].add(u)
result[d[ANNOTATIONS][annotation]].add(v)
return dict(result) |
def get_statements(self):
"""Return a list of all Statements in a single list.
Returns
-------
stmts : list[indra.statements.Statement]
A list of all the INDRA Statements in the model.
"""
stmt_lists = [v for k, v in self.stmts.items()]
stmts = []
for s in stmt_lists:
stmts += s
return stmts | Return a list of all Statements in a single list.
Returns
-------
stmts : list[indra.statements.Statement]
A list of all the INDRA Statements in the model. | Below is the the instruction that describes the task:
### Input:
Return a list of all Statements in a single list.
Returns
-------
stmts : list[indra.statements.Statement]
A list of all the INDRA Statements in the model.
### Response:
def get_statements(self):
"""Return a list of all Statements in a single list.
Returns
-------
stmts : list[indra.statements.Statement]
A list of all the INDRA Statements in the model.
"""
stmt_lists = [v for k, v in self.stmts.items()]
stmts = []
for s in stmt_lists:
stmts += s
return stmts |
def get_streams(self, game=None, channels=None, limit=25, offset=0):
"""Return a list of streams queried by a number of parameters
sorted by number of viewers descending
:param game: the game or name of the game
:type game: :class:`str` | :class:`models.Game`
:param channels: list of models.Channels or channel names (can be mixed)
:type channels: :class:`list` of :class:`models.Channel` or :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream`
:raises: None
"""
if isinstance(game, models.Game):
game = game.name
channelnames = []
cparam = None
if channels:
for c in channels:
if isinstance(c, models.Channel):
c = c.name
channelnames.append(c)
cparam = ','.join(channelnames)
params = {'limit': limit,
'offset': offset,
'game': game,
'channel': cparam}
r = self.kraken_request('GET', 'streams', params=params)
return models.Stream.wrap_search(r) | Return a list of streams queried by a number of parameters
sorted by number of viewers descending
:param game: the game or name of the game
:type game: :class:`str` | :class:`models.Game`
:param channels: list of models.Channels or channel names (can be mixed)
:type channels: :class:`list` of :class:`models.Channel` or :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream`
:raises: None | Below is the the instruction that describes the task:
### Input:
Return a list of streams queried by a number of parameters
sorted by number of viewers descending
:param game: the game or name of the game
:type game: :class:`str` | :class:`models.Game`
:param channels: list of models.Channels or channel names (can be mixed)
:type channels: :class:`list` of :class:`models.Channel` or :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream`
:raises: None
### Response:
def get_streams(self, game=None, channels=None, limit=25, offset=0):
"""Return a list of streams queried by a number of parameters
sorted by number of viewers descending
:param game: the game or name of the game
:type game: :class:`str` | :class:`models.Game`
:param channels: list of models.Channels or channel names (can be mixed)
:type channels: :class:`list` of :class:`models.Channel` or :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of streams
:rtype: :class:`list` of :class:`models.Stream`
:raises: None
"""
if isinstance(game, models.Game):
game = game.name
channelnames = []
cparam = None
if channels:
for c in channels:
if isinstance(c, models.Channel):
c = c.name
channelnames.append(c)
cparam = ','.join(channelnames)
params = {'limit': limit,
'offset': offset,
'game': game,
'channel': cparam}
r = self.kraken_request('GET', 'streams', params=params)
return models.Stream.wrap_search(r) |
def set_uservar(self, user, name, value):
"""Set a variable for a user.
This is like the ``<set>`` tag in RiveScript code.
:param str user: The user ID to set a variable for.
:param str name: The name of the variable to set.
:param str value: The value to set there.
"""
self._session.set(user, {name: value}) | Set a variable for a user.
This is like the ``<set>`` tag in RiveScript code.
:param str user: The user ID to set a variable for.
:param str name: The name of the variable to set.
:param str value: The value to set there. | Below is the the instruction that describes the task:
### Input:
Set a variable for a user.
This is like the ``<set>`` tag in RiveScript code.
:param str user: The user ID to set a variable for.
:param str name: The name of the variable to set.
:param str value: The value to set there.
### Response:
def set_uservar(self, user, name, value):
"""Set a variable for a user.
This is like the ``<set>`` tag in RiveScript code.
:param str user: The user ID to set a variable for.
:param str name: The name of the variable to set.
:param str value: The value to set there.
"""
self._session.set(user, {name: value}) |
def ekuced(handle, segno, recno, column, nvals, dvals, isnull):
"""
Update a double precision column entry in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekuced_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record to which data is to be updated.
:type recno: int
:param column: Column name.
:type column: str
:param nvals: Number of values in new column entry.
:type nvals: int
:param dvals: Double precision values comprising new column entry.
:type dvals: Array of floats
:param isnull: Flag indicating whether column entry is null.
:type isnull: bool
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int(recno)
column = stypes.stringToCharP(column)
nvals = ctypes.c_int(nvals)
dvals = stypes.toDoubleVector(dvals)
isnull = ctypes.c_int(isnull)
libspice.ekaced_c(handle, segno, recno, column, nvals, dvals, isnull) | Update a double precision column entry in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekuced_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record to which data is to be updated.
:type recno: int
:param column: Column name.
:type column: str
:param nvals: Number of values in new column entry.
:type nvals: int
:param dvals: Double precision values comprising new column entry.
:type dvals: Array of floats
:param isnull: Flag indicating whether column entry is null.
:type isnull: bool | Below is the the instruction that describes the task:
### Input:
Update a double precision column entry in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekuced_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record to which data is to be updated.
:type recno: int
:param column: Column name.
:type column: str
:param nvals: Number of values in new column entry.
:type nvals: int
:param dvals: Double precision values comprising new column entry.
:type dvals: Array of floats
:param isnull: Flag indicating whether column entry is null.
:type isnull: bool
### Response:
def ekuced(handle, segno, recno, column, nvals, dvals, isnull):
"""
Update a double precision column entry in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekuced_c.html
:param handle: EK file handle.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record to which data is to be updated.
:type recno: int
:param column: Column name.
:type column: str
:param nvals: Number of values in new column entry.
:type nvals: int
:param dvals: Double precision values comprising new column entry.
:type dvals: Array of floats
:param isnull: Flag indicating whether column entry is null.
:type isnull: bool
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int(recno)
column = stypes.stringToCharP(column)
nvals = ctypes.c_int(nvals)
dvals = stypes.toDoubleVector(dvals)
isnull = ctypes.c_int(isnull)
libspice.ekaced_c(handle, segno, recno, column, nvals, dvals, isnull) |
def production_variant(model_name, instance_type, initial_instance_count=1, variant_name='AllTraffic',
initial_weight=1, accelerator_type=None):
"""Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a
``CreateEndpointConfig`` request.
Args:
model_name (str): The name of the SageMaker model this production variant references.
instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'.
initial_instance_count (int): The initial instance count for this production variant (default: 1).
variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic').
initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1).
accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
dict[str, str]: An SageMaker ``ProductionVariant`` description
"""
production_variant_configuration = {
'ModelName': model_name,
'InstanceType': instance_type,
'InitialInstanceCount': initial_instance_count,
'VariantName': variant_name,
'InitialVariantWeight': initial_weight
}
if accelerator_type:
production_variant_configuration['AcceleratorType'] = accelerator_type
return production_variant_configuration | Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a
``CreateEndpointConfig`` request.
Args:
model_name (str): The name of the SageMaker model this production variant references.
instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'.
initial_instance_count (int): The initial instance count for this production variant (default: 1).
variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic').
initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1).
accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
dict[str, str]: An SageMaker ``ProductionVariant`` description | Below is the the instruction that describes the task:
### Input:
Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a
``CreateEndpointConfig`` request.
Args:
model_name (str): The name of the SageMaker model this production variant references.
instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'.
initial_instance_count (int): The initial instance count for this production variant (default: 1).
variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic').
initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1).
accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
dict[str, str]: An SageMaker ``ProductionVariant`` description
### Response:
def production_variant(model_name, instance_type, initial_instance_count=1, variant_name='AllTraffic',
initial_weight=1, accelerator_type=None):
"""Create a production variant description suitable for use in a ``ProductionVariant`` list as part of a
``CreateEndpointConfig`` request.
Args:
model_name (str): The name of the SageMaker model this production variant references.
instance_type (str): The EC2 instance type for this production variant. For example, 'ml.c4.8xlarge'.
initial_instance_count (int): The initial instance count for this production variant (default: 1).
variant_name (string): The ``VariantName`` of this production variant (default: 'AllTraffic').
initial_weight (int): The relative ``InitialVariantWeight`` of this production variant (default: 1).
accelerator_type (str): Type of Elastic Inference accelerator for this production variant. For example,
'ml.eia1.medium'. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
Returns:
dict[str, str]: An SageMaker ``ProductionVariant`` description
"""
production_variant_configuration = {
'ModelName': model_name,
'InstanceType': instance_type,
'InitialInstanceCount': initial_instance_count,
'VariantName': variant_name,
'InitialVariantWeight': initial_weight
}
if accelerator_type:
production_variant_configuration['AcceleratorType'] = accelerator_type
return production_variant_configuration |
def check_subdomain(fqn):
"""
Verify that the given fqn is a subdomain
>>> check_subdomain('a.b.c')
True
>>> check_subdomain(123)
False
>>> check_subdomain('a.b.c.d')
False
>>> check_subdomain('A.b.c')
False
>>> check_subdomain('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a.b')
True
>>> check_subdomain('a.abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a')
False
>>> check_subdomain('a.b.cdabcdabcdabcdabcdabcdabcdabcdabcd')
False
>>> check_subdomain('a.b')
False
"""
if type(fqn) not in [str, unicode]:
return False
if not is_subdomain(fqn):
return False
return True | Verify that the given fqn is a subdomain
>>> check_subdomain('a.b.c')
True
>>> check_subdomain(123)
False
>>> check_subdomain('a.b.c.d')
False
>>> check_subdomain('A.b.c')
False
>>> check_subdomain('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a.b')
True
>>> check_subdomain('a.abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a')
False
>>> check_subdomain('a.b.cdabcdabcdabcdabcdabcdabcdabcdabcd')
False
>>> check_subdomain('a.b')
False | Below is the the instruction that describes the task:
### Input:
Verify that the given fqn is a subdomain
>>> check_subdomain('a.b.c')
True
>>> check_subdomain(123)
False
>>> check_subdomain('a.b.c.d')
False
>>> check_subdomain('A.b.c')
False
>>> check_subdomain('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a.b')
True
>>> check_subdomain('a.abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a')
False
>>> check_subdomain('a.b.cdabcdabcdabcdabcdabcdabcdabcdabcd')
False
>>> check_subdomain('a.b')
False
### Response:
def check_subdomain(fqn):
"""
Verify that the given fqn is a subdomain
>>> check_subdomain('a.b.c')
True
>>> check_subdomain(123)
False
>>> check_subdomain('a.b.c.d')
False
>>> check_subdomain('A.b.c')
False
>>> check_subdomain('abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a.b')
True
>>> check_subdomain('a.abcdabcdabcdabcdabcdabcdabcdabcdabcdabcd.a')
False
>>> check_subdomain('a.b.cdabcdabcdabcdabcdabcdabcdabcdabcd')
False
>>> check_subdomain('a.b')
False
"""
if type(fqn) not in [str, unicode]:
return False
if not is_subdomain(fqn):
return False
return True |
def rollback(self, label, plane):
"""Rollback config."""
cm_label = 'condoor-{}'.format(int(time.time()))
self.device.send(self.rollback_cmd.format(label), timeout=120)
return cm_label | Rollback config. | Below is the the instruction that describes the task:
### Input:
Rollback config.
### Response:
def rollback(self, label, plane):
"""Rollback config."""
cm_label = 'condoor-{}'.format(int(time.time()))
self.device.send(self.rollback_cmd.format(label), timeout=120)
return cm_label |
async def _receive_chunk(self, chunk):
"""
Handle an incoming chunk.
"""
self.__log_debug('< %s', chunk)
# common
if isinstance(chunk, DataChunk):
await self._receive_data_chunk(chunk)
elif isinstance(chunk, SackChunk):
await self._receive_sack_chunk(chunk)
elif isinstance(chunk, ForwardTsnChunk):
await self._receive_forward_tsn_chunk(chunk)
elif isinstance(chunk, HeartbeatChunk):
ack = HeartbeatAckChunk()
ack.params = chunk.params
await self._send_chunk(ack)
elif isinstance(chunk, AbortChunk):
self.__log_debug('x Association was aborted by remote party')
self._set_state(self.State.CLOSED)
elif isinstance(chunk, ShutdownChunk):
self._t2_cancel()
self._set_state(self.State.SHUTDOWN_RECEIVED)
ack = ShutdownAckChunk()
await self._send_chunk(ack)
self._t2_start(ack)
self._set_state(self.State.SHUTDOWN_ACK_SENT)
elif (isinstance(chunk, ShutdownCompleteChunk) and
self._association_state == self.State.SHUTDOWN_ACK_SENT):
self._t2_cancel()
self._set_state(self.State.CLOSED)
elif (isinstance(chunk, ReconfigChunk) and
self._association_state == self.State.ESTABLISHED):
for param in chunk.params:
cls = RECONFIG_PARAM_TYPES.get(param[0])
if cls:
await self._receive_reconfig_param(cls.parse(param[1]))
# server
elif isinstance(chunk, InitChunk) and self.is_server:
self._last_received_tsn = tsn_minus_one(chunk.initial_tsn)
self._reconfig_response_seq = tsn_minus_one(chunk.initial_tsn)
self._remote_verification_tag = chunk.initiate_tag
self._ssthresh = chunk.advertised_rwnd
self._get_extensions(chunk.params)
self.__log_debug('- Peer supports %d outbound streams, %d max inbound streams',
chunk.outbound_streams, chunk.inbound_streams)
self._inbound_streams_count = min(chunk.outbound_streams, self._inbound_streams_max)
self._outbound_streams_count = min(self._outbound_streams_count, chunk.inbound_streams)
ack = InitAckChunk()
ack.initiate_tag = self._local_verification_tag
ack.advertised_rwnd = self._advertised_rwnd
ack.outbound_streams = self._outbound_streams_count
ack.inbound_streams = self._inbound_streams_max
ack.initial_tsn = self._local_tsn
self._set_extensions(ack.params)
# generate state cookie
cookie = pack('!L', self._get_timestamp())
cookie += hmac.new(self._hmac_key, cookie, 'sha1').digest()
ack.params.append((SCTP_STATE_COOKIE, cookie))
await self._send_chunk(ack)
elif isinstance(chunk, CookieEchoChunk) and self.is_server:
# check state cookie MAC
cookie = chunk.body
if (len(cookie) != COOKIE_LENGTH or
hmac.new(self._hmac_key, cookie[0:4], 'sha1').digest() != cookie[4:]):
self.__log_debug('x State cookie is invalid')
return
# check state cookie lifetime
now = self._get_timestamp()
stamp = unpack_from('!L', cookie)[0]
if stamp < now - COOKIE_LIFETIME or stamp > now:
self.__log_debug('x State cookie has expired')
error = ErrorChunk()
error.params.append((SCTP_CAUSE_STALE_COOKIE, b'\x00' * 8))
await self._send_chunk(error)
return
ack = CookieAckChunk()
await self._send_chunk(ack)
self._set_state(self.State.ESTABLISHED)
# client
elif isinstance(chunk, InitAckChunk) and self._association_state == self.State.COOKIE_WAIT:
# cancel T1 timer and process chunk
self._t1_cancel()
self._last_received_tsn = tsn_minus_one(chunk.initial_tsn)
self._reconfig_response_seq = tsn_minus_one(chunk.initial_tsn)
self._remote_verification_tag = chunk.initiate_tag
self._ssthresh = chunk.advertised_rwnd
self._get_extensions(chunk.params)
self.__log_debug('- Peer supports %d outbound streams, %d max inbound streams',
chunk.outbound_streams, chunk.inbound_streams)
self._inbound_streams_count = min(chunk.outbound_streams, self._inbound_streams_max)
self._outbound_streams_count = min(self._outbound_streams_count, chunk.inbound_streams)
echo = CookieEchoChunk()
for k, v in chunk.params:
if k == SCTP_STATE_COOKIE:
echo.body = v
break
await self._send_chunk(echo)
# start T1 timer and enter COOKIE-ECHOED state
self._t1_start(echo)
self._set_state(self.State.COOKIE_ECHOED)
elif (isinstance(chunk, CookieAckChunk) and
self._association_state == self.State.COOKIE_ECHOED):
# cancel T1 timer and enter ESTABLISHED state
self._t1_cancel()
self._set_state(self.State.ESTABLISHED)
elif (isinstance(chunk, ErrorChunk) and
self._association_state in [self.State.COOKIE_WAIT, self.State.COOKIE_ECHOED]):
self._t1_cancel()
self._set_state(self.State.CLOSED)
self.__log_debug('x Could not establish association')
return | Handle an incoming chunk. | Below is the the instruction that describes the task:
### Input:
Handle an incoming chunk.
### Response:
async def _receive_chunk(self, chunk):
"""
Handle an incoming chunk.
"""
self.__log_debug('< %s', chunk)
# common
if isinstance(chunk, DataChunk):
await self._receive_data_chunk(chunk)
elif isinstance(chunk, SackChunk):
await self._receive_sack_chunk(chunk)
elif isinstance(chunk, ForwardTsnChunk):
await self._receive_forward_tsn_chunk(chunk)
elif isinstance(chunk, HeartbeatChunk):
ack = HeartbeatAckChunk()
ack.params = chunk.params
await self._send_chunk(ack)
elif isinstance(chunk, AbortChunk):
self.__log_debug('x Association was aborted by remote party')
self._set_state(self.State.CLOSED)
elif isinstance(chunk, ShutdownChunk):
self._t2_cancel()
self._set_state(self.State.SHUTDOWN_RECEIVED)
ack = ShutdownAckChunk()
await self._send_chunk(ack)
self._t2_start(ack)
self._set_state(self.State.SHUTDOWN_ACK_SENT)
elif (isinstance(chunk, ShutdownCompleteChunk) and
self._association_state == self.State.SHUTDOWN_ACK_SENT):
self._t2_cancel()
self._set_state(self.State.CLOSED)
elif (isinstance(chunk, ReconfigChunk) and
self._association_state == self.State.ESTABLISHED):
for param in chunk.params:
cls = RECONFIG_PARAM_TYPES.get(param[0])
if cls:
await self._receive_reconfig_param(cls.parse(param[1]))
# server
elif isinstance(chunk, InitChunk) and self.is_server:
self._last_received_tsn = tsn_minus_one(chunk.initial_tsn)
self._reconfig_response_seq = tsn_minus_one(chunk.initial_tsn)
self._remote_verification_tag = chunk.initiate_tag
self._ssthresh = chunk.advertised_rwnd
self._get_extensions(chunk.params)
self.__log_debug('- Peer supports %d outbound streams, %d max inbound streams',
chunk.outbound_streams, chunk.inbound_streams)
self._inbound_streams_count = min(chunk.outbound_streams, self._inbound_streams_max)
self._outbound_streams_count = min(self._outbound_streams_count, chunk.inbound_streams)
ack = InitAckChunk()
ack.initiate_tag = self._local_verification_tag
ack.advertised_rwnd = self._advertised_rwnd
ack.outbound_streams = self._outbound_streams_count
ack.inbound_streams = self._inbound_streams_max
ack.initial_tsn = self._local_tsn
self._set_extensions(ack.params)
# generate state cookie
cookie = pack('!L', self._get_timestamp())
cookie += hmac.new(self._hmac_key, cookie, 'sha1').digest()
ack.params.append((SCTP_STATE_COOKIE, cookie))
await self._send_chunk(ack)
elif isinstance(chunk, CookieEchoChunk) and self.is_server:
# check state cookie MAC
cookie = chunk.body
if (len(cookie) != COOKIE_LENGTH or
hmac.new(self._hmac_key, cookie[0:4], 'sha1').digest() != cookie[4:]):
self.__log_debug('x State cookie is invalid')
return
# check state cookie lifetime
now = self._get_timestamp()
stamp = unpack_from('!L', cookie)[0]
if stamp < now - COOKIE_LIFETIME or stamp > now:
self.__log_debug('x State cookie has expired')
error = ErrorChunk()
error.params.append((SCTP_CAUSE_STALE_COOKIE, b'\x00' * 8))
await self._send_chunk(error)
return
ack = CookieAckChunk()
await self._send_chunk(ack)
self._set_state(self.State.ESTABLISHED)
# client
elif isinstance(chunk, InitAckChunk) and self._association_state == self.State.COOKIE_WAIT:
# cancel T1 timer and process chunk
self._t1_cancel()
self._last_received_tsn = tsn_minus_one(chunk.initial_tsn)
self._reconfig_response_seq = tsn_minus_one(chunk.initial_tsn)
self._remote_verification_tag = chunk.initiate_tag
self._ssthresh = chunk.advertised_rwnd
self._get_extensions(chunk.params)
self.__log_debug('- Peer supports %d outbound streams, %d max inbound streams',
chunk.outbound_streams, chunk.inbound_streams)
self._inbound_streams_count = min(chunk.outbound_streams, self._inbound_streams_max)
self._outbound_streams_count = min(self._outbound_streams_count, chunk.inbound_streams)
echo = CookieEchoChunk()
for k, v in chunk.params:
if k == SCTP_STATE_COOKIE:
echo.body = v
break
await self._send_chunk(echo)
# start T1 timer and enter COOKIE-ECHOED state
self._t1_start(echo)
self._set_state(self.State.COOKIE_ECHOED)
elif (isinstance(chunk, CookieAckChunk) and
self._association_state == self.State.COOKIE_ECHOED):
# cancel T1 timer and enter ESTABLISHED state
self._t1_cancel()
self._set_state(self.State.ESTABLISHED)
elif (isinstance(chunk, ErrorChunk) and
self._association_state in [self.State.COOKIE_WAIT, self.State.COOKIE_ECHOED]):
self._t1_cancel()
self._set_state(self.State.CLOSED)
self.__log_debug('x Could not establish association')
return |
def convert_name(s):
"""
Converts a line of axplorer format into androguard method signature + permission
:param s:
:return:
"""
m = re.compile(r"^(.*)\.(.*)\((.*)\)(.*) :: (.*)$")
res = m.search(s)
if res:
clname, methodname, all_args, ret, perm = res.groups()
args = " ".join(map(name_to_androguard, all_args.split(",")))
clname = name_to_androguard(clname)
ret = name_to_androguard(ret)
# perm is actually a comma separated list of permissions
return "{}-{}-({}){}".format(clname, methodname, args, ret), perm.split(", ")
else:
raise ValueError("what?") | Converts a line of axplorer format into androguard method signature + permission
:param s:
:return: | Below is the the instruction that describes the task:
### Input:
Converts a line of axplorer format into androguard method signature + permission
:param s:
:return:
### Response:
def convert_name(s):
"""
Converts a line of axplorer format into androguard method signature + permission
:param s:
:return:
"""
m = re.compile(r"^(.*)\.(.*)\((.*)\)(.*) :: (.*)$")
res = m.search(s)
if res:
clname, methodname, all_args, ret, perm = res.groups()
args = " ".join(map(name_to_androguard, all_args.split(",")))
clname = name_to_androguard(clname)
ret = name_to_androguard(ret)
# perm is actually a comma separated list of permissions
return "{}-{}-({}){}".format(clname, methodname, args, ret), perm.split(", ")
else:
raise ValueError("what?") |
def mtime(self, key):
"""Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents."""
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
return node.mtime | Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents. | Below is the the instruction that describes the task:
### Input:
Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents.
### Response:
def mtime(self, key):
"""Return the last modification time for the cache record with key.
May be useful for cache instances where the stored values can get
'stale', such as caching file or network resource contents."""
if key not in self.__dict:
raise CacheKeyError(key)
else:
node = self.__dict[key]
return node.mtime |
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]] | returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call | Below is the the instruction that describes the task:
### Input:
returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call
### Response:
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]] |
def validate(source, scheme=None, format=None):
'''Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported.
'''
# Get scheme and format
detected_scheme, detected_format = helpers.detect_scheme_and_format(source)
scheme = scheme or detected_scheme
format = format or detected_format
# Validate scheme and format
if scheme is not None:
if scheme not in config.LOADERS:
raise exceptions.SchemeError('Scheme "%s" is not supported' % scheme)
if format not in config.PARSERS:
raise exceptions.FormatError('Format "%s" is not supported' % format)
return True | Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported. | Below is the the instruction that describes the task:
### Input:
Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported.
### Response:
def validate(source, scheme=None, format=None):
'''Check if tabulator is able to load the source.
Args:
source (Union[str, IO]): The source path or IO object.
scheme (str, optional): The source scheme. Auto-detect by default.
format (str, optional): The source file format. Auto-detect by default.
Returns:
bool: Whether tabulator is able to load the source file.
Raises:
`tabulator.exceptions.SchemeError`: The file scheme is not supported.
`tabulator.exceptions.FormatError`: The file format is not supported.
'''
# Get scheme and format
detected_scheme, detected_format = helpers.detect_scheme_and_format(source)
scheme = scheme or detected_scheme
format = format or detected_format
# Validate scheme and format
if scheme is not None:
if scheme not in config.LOADERS:
raise exceptions.SchemeError('Scheme "%s" is not supported' % scheme)
if format not in config.PARSERS:
raise exceptions.FormatError('Format "%s" is not supported' % format)
return True |
def get_omim_panel_genes(genemap2_lines, mim2gene_lines, alias_genes):
"""Return all genes that should be included in the OMIM-AUTO panel
Return the hgnc symbols
Genes that have at least one 'established' or 'provisional' phenotype connection
are included in the gene panel
Args:
genemap2_lines(iterable)
mim2gene_lines(iterable)
alias_genes(dict): A dictionary that maps hgnc_symbol to hgnc_id
Yields:
hgnc_symbol(str)
"""
parsed_genes = get_mim_genes(genemap2_lines, mim2gene_lines)
STATUS_TO_ADD = set(['established', 'provisional'])
for hgnc_symbol in parsed_genes:
try:
gene = parsed_genes[hgnc_symbol]
keep = False
for phenotype_info in gene.get('phenotypes',[]):
if phenotype_info['status'] in STATUS_TO_ADD:
keep = True
break
if keep:
hgnc_id_info = alias_genes.get(hgnc_symbol)
if not hgnc_id_info:
for symbol in gene.get('hgnc_symbols', []):
if symbol in alias_genes:
hgnc_id_info = alias_genes[symbol]
break
if hgnc_id_info:
yield {
'hgnc_id': hgnc_id_info['true'],
'hgnc_symbol': hgnc_symbol,
}
else:
LOG.warning("Gene symbol %s does not exist", hgnc_symbol)
except KeyError:
pass | Return all genes that should be included in the OMIM-AUTO panel
Return the hgnc symbols
Genes that have at least one 'established' or 'provisional' phenotype connection
are included in the gene panel
Args:
genemap2_lines(iterable)
mim2gene_lines(iterable)
alias_genes(dict): A dictionary that maps hgnc_symbol to hgnc_id
Yields:
hgnc_symbol(str) | Below is the the instruction that describes the task:
### Input:
Return all genes that should be included in the OMIM-AUTO panel
Return the hgnc symbols
Genes that have at least one 'established' or 'provisional' phenotype connection
are included in the gene panel
Args:
genemap2_lines(iterable)
mim2gene_lines(iterable)
alias_genes(dict): A dictionary that maps hgnc_symbol to hgnc_id
Yields:
hgnc_symbol(str)
### Response:
def get_omim_panel_genes(genemap2_lines, mim2gene_lines, alias_genes):
"""Return all genes that should be included in the OMIM-AUTO panel
Return the hgnc symbols
Genes that have at least one 'established' or 'provisional' phenotype connection
are included in the gene panel
Args:
genemap2_lines(iterable)
mim2gene_lines(iterable)
alias_genes(dict): A dictionary that maps hgnc_symbol to hgnc_id
Yields:
hgnc_symbol(str)
"""
parsed_genes = get_mim_genes(genemap2_lines, mim2gene_lines)
STATUS_TO_ADD = set(['established', 'provisional'])
for hgnc_symbol in parsed_genes:
try:
gene = parsed_genes[hgnc_symbol]
keep = False
for phenotype_info in gene.get('phenotypes',[]):
if phenotype_info['status'] in STATUS_TO_ADD:
keep = True
break
if keep:
hgnc_id_info = alias_genes.get(hgnc_symbol)
if not hgnc_id_info:
for symbol in gene.get('hgnc_symbols', []):
if symbol in alias_genes:
hgnc_id_info = alias_genes[symbol]
break
if hgnc_id_info:
yield {
'hgnc_id': hgnc_id_info['true'],
'hgnc_symbol': hgnc_symbol,
}
else:
LOG.warning("Gene symbol %s does not exist", hgnc_symbol)
except KeyError:
pass |
def consume_normals(self):
"""Consumes all consecutive texture coordinate lines"""
# The first iteration processes the current/first vn statement.
# The loop continues until there are no more vn-statements or StopIteration is raised by generator
while True:
yield (
float(self.values[1]),
float(self.values[2]),
float(self.values[3]),
)
try:
self.next_line()
except StopIteration:
break
if not self.values:
break
if self.values[0] != "vn":
break | Consumes all consecutive texture coordinate lines | Below is the the instruction that describes the task:
### Input:
Consumes all consecutive texture coordinate lines
### Response:
def consume_normals(self):
"""Consumes all consecutive texture coordinate lines"""
# The first iteration processes the current/first vn statement.
# The loop continues until there are no more vn-statements or StopIteration is raised by generator
while True:
yield (
float(self.values[1]),
float(self.values[2]),
float(self.values[3]),
)
try:
self.next_line()
except StopIteration:
break
if not self.values:
break
if self.values[0] != "vn":
break |
async def _notify_observers(self, delta, old_obj, new_obj):
"""Call observing callbacks, notifying them of a change in model state
:param delta: The raw change from the watcher
(:class:`juju.client.overrides.Delta`)
:param old_obj: The object in the model that this delta updates.
May be None.
:param new_obj: The object in the model that is created or updated
by applying this delta.
"""
if new_obj and not old_obj:
delta.type = 'add'
log.debug(
'Model changed: %s %s %s',
delta.entity, delta.type, delta.get_id())
for o in self._observers:
if o.cares_about(delta):
asyncio.ensure_future(o(delta, old_obj, new_obj, self),
loop=self._connector.loop) | Call observing callbacks, notifying them of a change in model state
:param delta: The raw change from the watcher
(:class:`juju.client.overrides.Delta`)
:param old_obj: The object in the model that this delta updates.
May be None.
:param new_obj: The object in the model that is created or updated
by applying this delta. | Below is the the instruction that describes the task:
### Input:
Call observing callbacks, notifying them of a change in model state
:param delta: The raw change from the watcher
(:class:`juju.client.overrides.Delta`)
:param old_obj: The object in the model that this delta updates.
May be None.
:param new_obj: The object in the model that is created or updated
by applying this delta.
### Response:
async def _notify_observers(self, delta, old_obj, new_obj):
"""Call observing callbacks, notifying them of a change in model state
:param delta: The raw change from the watcher
(:class:`juju.client.overrides.Delta`)
:param old_obj: The object in the model that this delta updates.
May be None.
:param new_obj: The object in the model that is created or updated
by applying this delta.
"""
if new_obj and not old_obj:
delta.type = 'add'
log.debug(
'Model changed: %s %s %s',
delta.entity, delta.type, delta.get_id())
for o in self._observers:
if o.cares_about(delta):
asyncio.ensure_future(o(delta, old_obj, new_obj, self),
loop=self._connector.loop) |
def unitcube_overlap(self, ndraws=10000, rstate=None):
"""Using `ndraws` Monte Carlo draws, estimate the fraction of
overlap between the ellipsoid and the unit cube."""
if rstate is None:
rstate = np.random
samples = [self.sample(rstate=rstate) for i in range(ndraws)]
nin = sum([unitcheck(x) for x in samples])
return 1. * nin / ndraws | Using `ndraws` Monte Carlo draws, estimate the fraction of
overlap between the ellipsoid and the unit cube. | Below is the the instruction that describes the task:
### Input:
Using `ndraws` Monte Carlo draws, estimate the fraction of
overlap between the ellipsoid and the unit cube.
### Response:
def unitcube_overlap(self, ndraws=10000, rstate=None):
"""Using `ndraws` Monte Carlo draws, estimate the fraction of
overlap between the ellipsoid and the unit cube."""
if rstate is None:
rstate = np.random
samples = [self.sample(rstate=rstate) for i in range(ndraws)]
nin = sum([unitcheck(x) for x in samples])
return 1. * nin / ndraws |
def _jit_predict_fun(model_predict, num_devices):
"""Use jit on model_predict if required."""
def predict(x, params=(), rng=None):
"""Predict function jited and parallelized as requested."""
# On one device, jit and run.
if num_devices == 1:
return backend.jit(model_predict)(x, params, rng=rng)
# Multi-devices, pmap and run.
@functools.partial(backend.pmap, axis_name="batch")
def mapped_predict(x, params, rng):
return model_predict(x, params, rng=rng)
pred = mapped_predict(
reshape_by_device(x, num_devices),
params,
jax_random.split(rng, num_devices))
# Need to reduce the [device, per-device-batch, ...] tensors back to
# a [batch, ...] tensor. The tensors may be nested.
if not isinstance(x, (list, tuple)): # Not nested.
batch_size = x.shape[0]
return np.reshape(pred, [batch_size] + list(pred.shape[2:]))
batch_size = x[0].shape[0]
return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred]
return predict | Use jit on model_predict if required. | Below is the the instruction that describes the task:
### Input:
Use jit on model_predict if required.
### Response:
def _jit_predict_fun(model_predict, num_devices):
"""Use jit on model_predict if required."""
def predict(x, params=(), rng=None):
"""Predict function jited and parallelized as requested."""
# On one device, jit and run.
if num_devices == 1:
return backend.jit(model_predict)(x, params, rng=rng)
# Multi-devices, pmap and run.
@functools.partial(backend.pmap, axis_name="batch")
def mapped_predict(x, params, rng):
return model_predict(x, params, rng=rng)
pred = mapped_predict(
reshape_by_device(x, num_devices),
params,
jax_random.split(rng, num_devices))
# Need to reduce the [device, per-device-batch, ...] tensors back to
# a [batch, ...] tensor. The tensors may be nested.
if not isinstance(x, (list, tuple)): # Not nested.
batch_size = x.shape[0]
return np.reshape(pred, [batch_size] + list(pred.shape[2:]))
batch_size = x[0].shape[0]
return [np.reshape(p, [batch_size] + list(p.shape[2:])) for p in pred]
return predict |
def explode(self, hostgroups, contactgroups):
"""Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
for template in list(self.templates.values()):
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(template, contactgroups)
# Register host in the hostgroups
for host in self:
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(host, contactgroups)
if hasattr(host, 'host_name') and hasattr(host, 'hostgroups'):
hname = host.host_name
for hostgroup in host.hostgroups:
hostgroups.add_member(hname, hostgroup.strip()) | Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None | Below is the the instruction that describes the task:
### Input:
Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
### Response:
def explode(self, hostgroups, contactgroups):
"""Explode hosts with hostgroups, contactgroups::
* Add contact from contactgroups to host contacts
* Add host into their hostgroups as hostgroup members
:param hostgroups: Hostgroups to explode
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:param contactgroups: Contactgorups to explode
:type contactgroups: alignak.objects.contactgroup.Contactgroups
:return: None
"""
for template in list(self.templates.values()):
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(template, contactgroups)
# Register host in the hostgroups
for host in self:
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(host, contactgroups)
if hasattr(host, 'host_name') and hasattr(host, 'hostgroups'):
hname = host.host_name
for hostgroup in host.hostgroups:
hostgroups.add_member(hname, hostgroup.strip()) |
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close()
for func in self._on_close:
func() | Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement. | Below is the the instruction that describes the task:
### Input:
Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
### Response:
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close()
for func in self._on_close:
func() |
def get_changes(self, factory_name, global_factory=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes this refactoring makes
`factory_name` indicates the name of the factory function to
be added. If `global_factory` is `True` the factory will be
global otherwise a static method is added to the class.
`resources` can be a list of `rope.base.resource.File`\s that
this refactoring should be applied on; if `None` all python
files in the project are searched.
"""
if resources is None:
resources = self.project.get_python_files()
changes = ChangeSet('Introduce factory method <%s>' % factory_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
self._change_module(resources, changes, factory_name,
global_factory, job_set)
return changes | Get the changes this refactoring makes
`factory_name` indicates the name of the factory function to
be added. If `global_factory` is `True` the factory will be
global otherwise a static method is added to the class.
`resources` can be a list of `rope.base.resource.File`\s that
this refactoring should be applied on; if `None` all python
files in the project are searched. | Below is the the instruction that describes the task:
### Input:
Get the changes this refactoring makes
`factory_name` indicates the name of the factory function to
be added. If `global_factory` is `True` the factory will be
global otherwise a static method is added to the class.
`resources` can be a list of `rope.base.resource.File`\s that
this refactoring should be applied on; if `None` all python
files in the project are searched.
### Response:
def get_changes(self, factory_name, global_factory=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes this refactoring makes
`factory_name` indicates the name of the factory function to
be added. If `global_factory` is `True` the factory will be
global otherwise a static method is added to the class.
`resources` can be a list of `rope.base.resource.File`\s that
this refactoring should be applied on; if `None` all python
files in the project are searched.
"""
if resources is None:
resources = self.project.get_python_files()
changes = ChangeSet('Introduce factory method <%s>' % factory_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
self._change_module(resources, changes, factory_name,
global_factory, job_set)
return changes |
def _traverse_dict(self, input_dict, resolution_data, resolver_method):
"""
Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved
"""
for key, value in input_dict.items():
input_dict[key] = self._traverse(value, resolution_data, resolver_method)
return input_dict | Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved | Below is the the instruction that describes the task:
### Input:
Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved
### Response:
def _traverse_dict(self, input_dict, resolution_data, resolver_method):
"""
Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved
"""
for key, value in input_dict.items():
input_dict[key] = self._traverse(value, resolution_data, resolver_method)
return input_dict |
def score_hist(df, columns=None, groupby=None, threshold=0.7, stacked=True,
bins=20, percent=True, alpha=0.33, show=True, block=False, save=False):
"""Plot multiple histograms on one plot, typically of "score" values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories (0, .5, 1)
And the values are scores between 0 and 1.
"""
df = df if columns is None else df[([] if groupby is None else [groupby]) + list(columns)].copy()
if groupby is not None or threshold is not None:
df = groups_from_scores(df, groupby=groupby, threshold=threshold)
percent = 100. if percent else 1.
if isinstance(df, pd.core.groupby.DataFrameGroupBy):
df = df_from_groups(df, columns=columns) * percent
columns = df.columns if columns is None else columns
if bins is None:
bins = 20
if isinstance(bins, int):
bins = np.linspace(np.min(df.min()), np.max(df.max()), bins)
log.debug('bins: {}'.format(bins))
figs = []
df.plot(kind='hist', alpha=alpha, stacked=stacked, bins=bins)
# for col in df.columns:
# series = df[col] * percent
# log.debug('{}'.format(series))
# figs.append(plt.hist(series, bins=bins, alpha=alpha,
# weights=percent * np.ones_like(series) / len(series.dropna()),
# label=stringify(col)))
plt.legend()
plt.xlabel('Score (%)')
plt.ylabel('Percent')
plt.title('{} Scores for {}'.format(np.sum(df.count()), columns))
plt.draw()
if save or not show:
fig = plt.gcf()
today = datetime.datetime.today()
fig.savefig(os.path.join(IMAGES_PATH, 'score_hist_{:04d}-{:02d}-{:02d}_{:02d}{:02d}.jpg'.format(*today.timetuple())))
if show:
plt.show(block=block)
return figs | Plot multiple histograms on one plot, typically of "score" values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories (0, .5, 1)
And the values are scores between 0 and 1. | Below is the the instruction that describes the task:
### Input:
Plot multiple histograms on one plot, typically of "score" values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories (0, .5, 1)
And the values are scores between 0 and 1.
### Response:
def score_hist(df, columns=None, groupby=None, threshold=0.7, stacked=True,
bins=20, percent=True, alpha=0.33, show=True, block=False, save=False):
"""Plot multiple histograms on one plot, typically of "score" values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories (0, .5, 1)
And the values are scores between 0 and 1.
"""
df = df if columns is None else df[([] if groupby is None else [groupby]) + list(columns)].copy()
if groupby is not None or threshold is not None:
df = groups_from_scores(df, groupby=groupby, threshold=threshold)
percent = 100. if percent else 1.
if isinstance(df, pd.core.groupby.DataFrameGroupBy):
df = df_from_groups(df, columns=columns) * percent
columns = df.columns if columns is None else columns
if bins is None:
bins = 20
if isinstance(bins, int):
bins = np.linspace(np.min(df.min()), np.max(df.max()), bins)
log.debug('bins: {}'.format(bins))
figs = []
df.plot(kind='hist', alpha=alpha, stacked=stacked, bins=bins)
# for col in df.columns:
# series = df[col] * percent
# log.debug('{}'.format(series))
# figs.append(plt.hist(series, bins=bins, alpha=alpha,
# weights=percent * np.ones_like(series) / len(series.dropna()),
# label=stringify(col)))
plt.legend()
plt.xlabel('Score (%)')
plt.ylabel('Percent')
plt.title('{} Scores for {}'.format(np.sum(df.count()), columns))
plt.draw()
if save or not show:
fig = plt.gcf()
today = datetime.datetime.today()
fig.savefig(os.path.join(IMAGES_PATH, 'score_hist_{:04d}-{:02d}-{:02d}_{:02d}{:02d}.jpg'.format(*today.timetuple())))
if show:
plt.show(block=block)
return figs |
def send(self, message):
"""Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error
"""
message = message.SerializeToString()
self.socket.sendall(struct.pack('!I', len(message)) + message)
length = struct.unpack('!I', self.socket.recv(4))[0]
response = riemann_client.riemann_pb2.Msg()
response.ParseFromString(socket_recvall(self.socket, length))
if not response.ok:
raise RiemannError(response.error)
return response | Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error | Below is the the instruction that describes the task:
### Input:
Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error
### Response:
def send(self, message):
"""Sends a message to a Riemann server and returns it's response
:param message: The message to send to the Riemann server
:returns: The response message from Riemann
:raises RiemannError: if the server returns an error
"""
message = message.SerializeToString()
self.socket.sendall(struct.pack('!I', len(message)) + message)
length = struct.unpack('!I', self.socket.recv(4))[0]
response = riemann_client.riemann_pb2.Msg()
response.ParseFromString(socket_recvall(self.socket, length))
if not response.ok:
raise RiemannError(response.error)
return response |
def searchRegex(vd, sheet, moveCursor=False, reverse=False, **kwargs):
'Set row index if moveCursor, otherwise return list of row indexes.'
def findMatchingColumn(sheet, row, columns, func):
'Find column for which func matches the displayed value in this row'
for c in columns:
if func(c.getDisplayValue(row)):
return c
vd.searchContext.update(kwargs)
regex = kwargs.get("regex")
if regex:
vd.searchContext["regex"] = re.compile(regex, regex_flags()) or error('invalid regex: %s' % regex)
regex = vd.searchContext.get("regex") or fail("no regex")
columns = vd.searchContext.get("columns")
if columns == "cursorCol":
columns = [sheet.cursorCol]
elif columns == "visibleCols":
columns = tuple(sheet.visibleCols)
elif isinstance(columns, Column):
columns = [columns]
if not columns:
error('bad columns')
searchBackward = vd.searchContext.get("backward")
if reverse:
searchBackward = not searchBackward
matchingRowIndexes = 0
for r in rotate_range(len(sheet.rows), sheet.cursorRowIndex, reverse=searchBackward):
c = findMatchingColumn(sheet, sheet.rows[r], columns, regex.search)
if c:
if moveCursor:
sheet.cursorRowIndex = r
sheet.cursorVisibleColIndex = sheet.visibleCols.index(c)
return
else:
matchingRowIndexes += 1
yield r
status('%s matches for /%s/' % (matchingRowIndexes, regex.pattern)) | Set row index if moveCursor, otherwise return list of row indexes. | Below is the the instruction that describes the task:
### Input:
Set row index if moveCursor, otherwise return list of row indexes.
### Response:
def searchRegex(vd, sheet, moveCursor=False, reverse=False, **kwargs):
'Set row index if moveCursor, otherwise return list of row indexes.'
def findMatchingColumn(sheet, row, columns, func):
'Find column for which func matches the displayed value in this row'
for c in columns:
if func(c.getDisplayValue(row)):
return c
vd.searchContext.update(kwargs)
regex = kwargs.get("regex")
if regex:
vd.searchContext["regex"] = re.compile(regex, regex_flags()) or error('invalid regex: %s' % regex)
regex = vd.searchContext.get("regex") or fail("no regex")
columns = vd.searchContext.get("columns")
if columns == "cursorCol":
columns = [sheet.cursorCol]
elif columns == "visibleCols":
columns = tuple(sheet.visibleCols)
elif isinstance(columns, Column):
columns = [columns]
if not columns:
error('bad columns')
searchBackward = vd.searchContext.get("backward")
if reverse:
searchBackward = not searchBackward
matchingRowIndexes = 0
for r in rotate_range(len(sheet.rows), sheet.cursorRowIndex, reverse=searchBackward):
c = findMatchingColumn(sheet, sheet.rows[r], columns, regex.search)
if c:
if moveCursor:
sheet.cursorRowIndex = r
sheet.cursorVisibleColIndex = sheet.visibleCols.index(c)
return
else:
matchingRowIndexes += 1
yield r
status('%s matches for /%s/' % (matchingRowIndexes, regex.pattern)) |
def get_docs_sources_from_ES(self):
"""Get document sources using MGET elasticsearch API"""
docs = [doc for doc, _, _, get_from_ES in self.doc_to_update if get_from_ES]
if docs:
documents = self.docman.elastic.mget(body={"docs": docs}, realtime=True)
return iter(documents["docs"])
else:
return iter([]) | Get document sources using MGET elasticsearch API | Below is the the instruction that describes the task:
### Input:
Get document sources using MGET elasticsearch API
### Response:
def get_docs_sources_from_ES(self):
"""Get document sources using MGET elasticsearch API"""
docs = [doc for doc, _, _, get_from_ES in self.doc_to_update if get_from_ES]
if docs:
documents = self.docman.elastic.mget(body={"docs": docs}, realtime=True)
return iter(documents["docs"])
else:
return iter([]) |
def launch_tor(config, reactor,
tor_binary=None,
progress_updates=None,
connection_creator=None,
timeout=None,
kill_on_stderr=True,
stdout=None, stderr=None):
"""
Deprecated; use launch() instead.
See also controller.py
"""
from .controller import launch
# XXX FIXME are we dealing with options in the config "properly"
# as far as translating semantics from the old launch_tor to
# launch()? DataDirectory, User, ControlPort, ...?
tor = yield launch(
reactor,
stdout=stdout,
stderr=stderr,
progress_updates=progress_updates,
tor_binary=tor_binary,
connection_creator=connection_creator,
timeout=timeout,
kill_on_stderr=kill_on_stderr,
_tor_config=config,
)
defer.returnValue(tor.process) | Deprecated; use launch() instead.
See also controller.py | Below is the the instruction that describes the task:
### Input:
Deprecated; use launch() instead.
See also controller.py
### Response:
def launch_tor(config, reactor,
tor_binary=None,
progress_updates=None,
connection_creator=None,
timeout=None,
kill_on_stderr=True,
stdout=None, stderr=None):
"""
Deprecated; use launch() instead.
See also controller.py
"""
from .controller import launch
# XXX FIXME are we dealing with options in the config "properly"
# as far as translating semantics from the old launch_tor to
# launch()? DataDirectory, User, ControlPort, ...?
tor = yield launch(
reactor,
stdout=stdout,
stderr=stderr,
progress_updates=progress_updates,
tor_binary=tor_binary,
connection_creator=connection_creator,
timeout=timeout,
kill_on_stderr=kill_on_stderr,
_tor_config=config,
)
defer.returnValue(tor.process) |
def _sourced_dict(self, source=None, **kwargs):
"""Like ``dict(**kwargs)``, but where the ``source`` key is special.
"""
if source:
kwargs['source'] = source
elif self.source:
kwargs['source'] = self.source
return kwargs | Like ``dict(**kwargs)``, but where the ``source`` key is special. | Below is the the instruction that describes the task:
### Input:
Like ``dict(**kwargs)``, but where the ``source`` key is special.
### Response:
def _sourced_dict(self, source=None, **kwargs):
"""Like ``dict(**kwargs)``, but where the ``source`` key is special.
"""
if source:
kwargs['source'] = source
elif self.source:
kwargs['source'] = self.source
return kwargs |
def get_unused_paths(self):
"""
Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used.
"""
return [path for path in self.filter.paths if path not in self.seen_paths] | Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used. | Below is the the instruction that describes the task:
### Input:
Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used.
### Response:
def get_unused_paths(self):
"""
Returns which include_paths or exclude_paths that were not used via include_path method.
:return: [str] list of filtering paths that were not used.
"""
return [path for path in self.filter.paths if path not in self.seen_paths] |
def _replace_services_in_args(self, args):
""" Replace service references in arguments list """
_check_type('args', args, list)
new_args = []
for arg in args:
if isinstance(arg, list):
new_args.append(self._replace_services_in_args(arg))
elif isinstance(arg, dict):
new_args.append(self._replace_services_in_kwargs(arg))
elif isinstance(arg, string_types):
new_args.append(self._replace_service(arg))
else:
new_args.append(arg)
return new_args | Replace service references in arguments list | Below is the the instruction that describes the task:
### Input:
Replace service references in arguments list
### Response:
def _replace_services_in_args(self, args):
""" Replace service references in arguments list """
_check_type('args', args, list)
new_args = []
for arg in args:
if isinstance(arg, list):
new_args.append(self._replace_services_in_args(arg))
elif isinstance(arg, dict):
new_args.append(self._replace_services_in_kwargs(arg))
elif isinstance(arg, string_types):
new_args.append(self._replace_service(arg))
else:
new_args.append(arg)
return new_args |
def _get_repos(url):
"""Gets repos in url
:param url: Url
:return: List of repositories in given url
"""
current_page = 1
there_is_something_left = True
repos_list = []
while there_is_something_left:
api_driver = GithubRawApi(
url,
url_params={"page": current_page},
get_api_content_now=True
) # driver to parse API content
for repo in api_driver.api_content: # list of raw repository
repo_name = repo["name"]
repo_user = repo["owner"]["login"]
repos_list.append(
GithubUserRepository(repo_user, repo_name))
there_is_something_left = bool(api_driver.api_content)
current_page += 1
return repos_list | Gets repos in url
:param url: Url
:return: List of repositories in given url | Below is the the instruction that describes the task:
### Input:
Gets repos in url
:param url: Url
:return: List of repositories in given url
### Response:
def _get_repos(url):
"""Gets repos in url
:param url: Url
:return: List of repositories in given url
"""
current_page = 1
there_is_something_left = True
repos_list = []
while there_is_something_left:
api_driver = GithubRawApi(
url,
url_params={"page": current_page},
get_api_content_now=True
) # driver to parse API content
for repo in api_driver.api_content: # list of raw repository
repo_name = repo["name"]
repo_user = repo["owner"]["login"]
repos_list.append(
GithubUserRepository(repo_user, repo_name))
there_is_something_left = bool(api_driver.api_content)
current_page += 1
return repos_list |
def _lazy_listen(self):
"""
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
"""
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen()) | Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active | Below is the the instruction that describes the task:
### Input:
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
### Response:
def _lazy_listen(self):
"""
Ensures that the listener task only runs when actually needed.
This function is a no-op if any of the preconditions is not met.
Preconditions are:
* The application is running (self._loop is set)
* The task is not already running
* There are subscriptions: either pending, or active
"""
if all([
self._loop,
not self.running,
self._subscriptions or (self._pending and not self._pending.empty()),
]):
self._task = self._loop.create_task(self._listen()) |
def is_svn_page(html):
# type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) | Returns true if the page appears to be the index page of an svn repository | Below is the the instruction that describes the task:
### Input:
Returns true if the page appears to be the index page of an svn repository
### Response:
def is_svn_page(html):
# type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) |
def on_hid_pnp(self, hid_event = None):
"""This function will be called on per class event changes, so we need
to test if our device has being connected or is just gone"""
# keep old reference for UI updates
old_device = self.device
if hid_event:
print("Hey, a hid device just %s!" % hid_event)
if hid_event == "connected":
# test if our device is available
if self.device:
# see, at this point we could detect multiple devices!
# but... we only want just one
pass
else:
self.test_for_connection()
elif hid_event == "disconnected":
# the hid object is automatically closed on disconnection we just
# test if still is plugged (important as the object might be
# closing)
if self.device and not self.device.is_plugged():
self.device = None
print("you removed my hid device!")
else:
# poll for devices
self.test_for_connection()
if old_device != self.device:
# update ui
pass | This function will be called on per class event changes, so we need
to test if our device has being connected or is just gone | Below is the the instruction that describes the task:
### Input:
This function will be called on per class event changes, so we need
to test if our device has being connected or is just gone
### Response:
def on_hid_pnp(self, hid_event = None):
"""This function will be called on per class event changes, so we need
to test if our device has being connected or is just gone"""
# keep old reference for UI updates
old_device = self.device
if hid_event:
print("Hey, a hid device just %s!" % hid_event)
if hid_event == "connected":
# test if our device is available
if self.device:
# see, at this point we could detect multiple devices!
# but... we only want just one
pass
else:
self.test_for_connection()
elif hid_event == "disconnected":
# the hid object is automatically closed on disconnection we just
# test if still is plugged (important as the object might be
# closing)
if self.device and not self.device.is_plugged():
self.device = None
print("you removed my hid device!")
else:
# poll for devices
self.test_for_connection()
if old_device != self.device:
# update ui
pass |
def download(url, dir, filename=None, expect_size=None):
"""
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
"""
mkdir_p(dir)
if filename is None:
filename = url.split('/')[-1]
fpath = os.path.join(dir, filename)
if os.path.isfile(fpath):
if expect_size is not None and os.stat(fpath).st_size == expect_size:
logger.info("File {} exists! Skip download.".format(filename))
return fpath
else:
logger.warn("File {} exists. Will overwrite with a new download!".format(filename))
def hook(t):
last_b = [0]
def inner(b, bsize, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
statinfo = os.stat(fpath)
size = statinfo.st_size
except IOError:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Downloaded an empty file from {}!".format(url)
if expect_size is not None and size != expect_size:
logger.error("File downloaded from {} does not match the expected size!".format(url))
logger.error("You may have downloaded a broken file, or the upstream may have modified the file.")
# TODO human-readable size
logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
return fpath | Download URL to a directory.
Will figure out the filename automatically from URL, if not given. | Below is the the instruction that describes the task:
### Input:
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
### Response:
def download(url, dir, filename=None, expect_size=None):
"""
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
"""
mkdir_p(dir)
if filename is None:
filename = url.split('/')[-1]
fpath = os.path.join(dir, filename)
if os.path.isfile(fpath):
if expect_size is not None and os.stat(fpath).st_size == expect_size:
logger.info("File {} exists! Skip download.".format(filename))
return fpath
else:
logger.warn("File {} exists. Will overwrite with a new download!".format(filename))
def hook(t):
last_b = [0]
def inner(b, bsize, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
statinfo = os.stat(fpath)
size = statinfo.st_size
except IOError:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Downloaded an empty file from {}!".format(url)
if expect_size is not None and size != expect_size:
logger.error("File downloaded from {} does not match the expected size!".format(url))
logger.error("You may have downloaded a broken file, or the upstream may have modified the file.")
# TODO human-readable size
logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
return fpath |
def _restoreResults(newdir,origdir):
""" Move (not copy) all files from newdir back to the original directory
"""
for fname in glob.glob(os.path.join(newdir,'*')):
shutil.move(fname,os.path.join(origdir,os.path.basename(fname))) | Move (not copy) all files from newdir back to the original directory | Below is the the instruction that describes the task:
### Input:
Move (not copy) all files from newdir back to the original directory
### Response:
def _restoreResults(newdir,origdir):
""" Move (not copy) all files from newdir back to the original directory
"""
for fname in glob.glob(os.path.join(newdir,'*')):
shutil.move(fname,os.path.join(origdir,os.path.basename(fname))) |
def make_replacement_visitor(find_expression, replace_expression):
"""Return a visitor function that replaces every instance of one expression with another one."""
def visitor_fn(expression):
"""Return the replacement if this expression matches the expression we're looking for."""
if expression == find_expression:
return replace_expression
else:
return expression
return visitor_fn | Return a visitor function that replaces every instance of one expression with another one. | Below is the the instruction that describes the task:
### Input:
Return a visitor function that replaces every instance of one expression with another one.
### Response:
def make_replacement_visitor(find_expression, replace_expression):
"""Return a visitor function that replaces every instance of one expression with another one."""
def visitor_fn(expression):
"""Return the replacement if this expression matches the expression we're looking for."""
if expression == find_expression:
return replace_expression
else:
return expression
return visitor_fn |
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):
"""
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
# DDK calculations (self-consistent to get electric field).
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each BEC task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)
bec_tasks = []
for bec_inp in bec_inputs:
bec_task = self.register_bec_task(bec_inp, deps=bec_deps)
bec_tasks.append(bec_task)
return ddk_tasks, bec_tasks | Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks) | Below is the the instruction that describes the task:
### Input:
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
### Response:
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):
"""
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
# DDK calculations (self-consistent to get electric field).
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each BEC task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)
bec_tasks = []
for bec_inp in bec_inputs:
bec_task = self.register_bec_task(bec_inp, deps=bec_deps)
bec_tasks.append(bec_task)
return ddk_tasks, bec_tasks |
def hdg60(msg):
"""Megnetic heading of aircraft
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: heading in degrees to megnetic north (from 0 to 360)
"""
d = hex2bin(data(msg))
if d[0] == '0':
return None
sign = int(d[1]) # 1 -> west
value = bin2int(d[2:12])
if sign:
value = value - 1024
hdg = value * 90 / 512.0 # degree
# convert from [-180, 180] to [0, 360]
if hdg < 0:
hdg = 360 + hdg
return round(hdg, 3) | Megnetic heading of aircraft
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: heading in degrees to megnetic north (from 0 to 360) | Below is the the instruction that describes the task:
### Input:
Megnetic heading of aircraft
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: heading in degrees to megnetic north (from 0 to 360)
### Response:
def hdg60(msg):
"""Megnetic heading of aircraft
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: heading in degrees to megnetic north (from 0 to 360)
"""
d = hex2bin(data(msg))
if d[0] == '0':
return None
sign = int(d[1]) # 1 -> west
value = bin2int(d[2:12])
if sign:
value = value - 1024
hdg = value * 90 / 512.0 # degree
# convert from [-180, 180] to [0, 360]
if hdg < 0:
hdg = 360 + hdg
return round(hdg, 3) |
def get_dot_target_name(version=None, module=None):
"""Returns the current version/module in -dot- notation which is used by `target:` parameters."""
version = version or get_current_version_name()
module = module or get_current_module_name()
return '-dot-'.join((version, module)) | Returns the current version/module in -dot- notation which is used by `target:` parameters. | Below is the the instruction that describes the task:
### Input:
Returns the current version/module in -dot- notation which is used by `target:` parameters.
### Response:
def get_dot_target_name(version=None, module=None):
"""Returns the current version/module in -dot- notation which is used by `target:` parameters."""
version = version or get_current_version_name()
module = module or get_current_module_name()
return '-dot-'.join((version, module)) |
def _get_revision(self):
"""Validate and return the revision to use for current command
"""
assert self._revisions, "no migration revision exist"
revision = self._rev or self._revisions[-1]
# revision count must be less or equal since revisions are ordered
assert revision in self._revisions, "invalid revision specified"
return revision | Validate and return the revision to use for current command | Below is the the instruction that describes the task:
### Input:
Validate and return the revision to use for current command
### Response:
def _get_revision(self):
"""Validate and return the revision to use for current command
"""
assert self._revisions, "no migration revision exist"
revision = self._rev or self._revisions[-1]
# revision count must be less or equal since revisions are ordered
assert revision in self._revisions, "invalid revision specified"
return revision |
def autoencoder_discrete_cifar():
"""Discrete autoencoder model for compressing cifar."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_noise = 0.0
hparams.bottleneck_bits = 90
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.num_residual_layers = 4
hparams.batch_size = 32
hparams.learning_rate_constant = 1.0
return hparams | Discrete autoencoder model for compressing cifar. | Below is the the instruction that describes the task:
### Input:
Discrete autoencoder model for compressing cifar.
### Response:
def autoencoder_discrete_cifar():
"""Discrete autoencoder model for compressing cifar."""
hparams = autoencoder_ordered_discrete()
hparams.bottleneck_noise = 0.0
hparams.bottleneck_bits = 90
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.num_residual_layers = 4
hparams.batch_size = 32
hparams.learning_rate_constant = 1.0
return hparams |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.