code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs):
"""Diff this index against the working copy or a Tree or Commit object
For a documentation of the parameters and return values, see
Diffable.diff
:note:
Will only work with indices that represent the default git index as
they have not been initialized with a stream.
"""
# index against index is always empty
if other is self.Index:
return diff.DiffIndex()
# index against anything but None is a reverse diff with the respective
# item. Handle existing -R flags properly. Transform strings to the object
# so that we can call diff on it
if isinstance(other, string_types):
other = self.repo.rev_parse(other)
# END object conversion
if isinstance(other, Object):
# invert the existing R flag
cur_val = kwargs.get('R', False)
kwargs['R'] = not cur_val
return other.diff(self.Index, paths, create_patch, **kwargs)
# END diff against other item handling
# if other is not None here, something is wrong
if other is not None:
raise ValueError("other must be None, Diffable.Index, a Tree or Commit, was %r" % other)
# diff against working copy - can be handled by superclass natively
return super(IndexFile, self).diff(other, paths, create_patch, **kwargs) | Diff this index against the working copy or a Tree or Commit object
For a documentation of the parameters and return values, see
Diffable.diff
:note:
Will only work with indices that represent the default git index as
they have not been initialized with a stream. | Below is the the instruction that describes the task:
### Input:
Diff this index against the working copy or a Tree or Commit object
For a documentation of the parameters and return values, see
Diffable.diff
:note:
Will only work with indices that represent the default git index as
they have not been initialized with a stream.
### Response:
def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs):
"""Diff this index against the working copy or a Tree or Commit object
For a documentation of the parameters and return values, see
Diffable.diff
:note:
Will only work with indices that represent the default git index as
they have not been initialized with a stream.
"""
# index against index is always empty
if other is self.Index:
return diff.DiffIndex()
# index against anything but None is a reverse diff with the respective
# item. Handle existing -R flags properly. Transform strings to the object
# so that we can call diff on it
if isinstance(other, string_types):
other = self.repo.rev_parse(other)
# END object conversion
if isinstance(other, Object):
# invert the existing R flag
cur_val = kwargs.get('R', False)
kwargs['R'] = not cur_val
return other.diff(self.Index, paths, create_patch, **kwargs)
# END diff against other item handling
# if other is not None here, something is wrong
if other is not None:
raise ValueError("other must be None, Diffable.Index, a Tree or Commit, was %r" % other)
# diff against working copy - can be handled by superclass natively
return super(IndexFile, self).diff(other, paths, create_patch, **kwargs) |
def save(self, **kwargs):
"""
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
"""
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry | Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field. | Below is the the instruction that describes the task:
### Input:
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
### Response:
def save(self, **kwargs):
"""
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
"""
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry |
def watch(cams, path=None, delay=10):
"""Get screenshots from all cams at defined intervall."""
while True:
for c in cams:
c.snap(path)
time.sleep(delay) | Get screenshots from all cams at defined intervall. | Below is the the instruction that describes the task:
### Input:
Get screenshots from all cams at defined intervall.
### Response:
def watch(cams, path=None, delay=10):
"""Get screenshots from all cams at defined intervall."""
while True:
for c in cams:
c.snap(path)
time.sleep(delay) |
def hsps(self):
"""
Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs).
"""
for readAlignments in self:
for alignment in readAlignments:
for hsp in alignment.hsps:
yield hsp | Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs). | Below is the the instruction that describes the task:
### Input:
Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs).
### Response:
def hsps(self):
"""
Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs).
"""
for readAlignments in self:
for alignment in readAlignments:
for hsp in alignment.hsps:
yield hsp |
def render_error(category, error_message, error_codes, exception=None):
""" Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page
"""
if isinstance(error_codes, int):
error_codes = [error_codes]
error_code = error_codes[0]
template_list = [str(code) for code in error_codes]
template_list.append(str(int(error_code / 100) * 100))
template_list.append('error')
template = map_template(category, template_list)
if template:
return render_publ_template(
template,
_url_root=request.url_root,
category=Category(category),
error={'code': error_code, 'message': error_message},
exception=exception)[0], error_code
# no template found, so fall back to default Flask handler
return flask.abort(error_code) | Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page | Below is the the instruction that describes the task:
### Input:
Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page
### Response:
def render_error(category, error_message, error_codes, exception=None):
""" Render an error page.
Arguments:
category -- The category of the request
error_message -- The message to provide to the error template
error_codes -- The applicable HTTP error code(s). Will usually be an
integer or a list of integers; the HTTP error response will always
be the first error code in the list, and the others are alternates
for looking up the error template to use.
exception -- Any exception that led to this error page
"""
if isinstance(error_codes, int):
error_codes = [error_codes]
error_code = error_codes[0]
template_list = [str(code) for code in error_codes]
template_list.append(str(int(error_code / 100) * 100))
template_list.append('error')
template = map_template(category, template_list)
if template:
return render_publ_template(
template,
_url_root=request.url_root,
category=Category(category),
error={'code': error_code, 'message': error_message},
exception=exception)[0], error_code
# no template found, so fall back to default Flask handler
return flask.abort(error_code) |
def get_data_path(module_id: str) -> Path:
"""
Get the path for persistent storage of a module.
This method creates the queried path if not existing.
Args:
module_id (str): Module ID
Returns:
The data path of indicated module.
"""
profile = coordinator.profile
data_path = get_base_path() / 'profiles' / profile / module_id
if not data_path.exists():
data_path.mkdir(parents=True)
return data_path | Get the path for persistent storage of a module.
This method creates the queried path if not existing.
Args:
module_id (str): Module ID
Returns:
The data path of indicated module. | Below is the the instruction that describes the task:
### Input:
Get the path for persistent storage of a module.
This method creates the queried path if not existing.
Args:
module_id (str): Module ID
Returns:
The data path of indicated module.
### Response:
def get_data_path(module_id: str) -> Path:
"""
Get the path for persistent storage of a module.
This method creates the queried path if not existing.
Args:
module_id (str): Module ID
Returns:
The data path of indicated module.
"""
profile = coordinator.profile
data_path = get_base_path() / 'profiles' / profile / module_id
if not data_path.exists():
data_path.mkdir(parents=True)
return data_path |
def add_cli_write_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``write_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-d', '--directory', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),
help='output directory')
@click.pass_obj
def write(manager: BELNamespaceManagerMixin, directory: str):
"""Write a BEL namespace names/identifiers to terminology store."""
manager.write_directory(directory)
return main | Add a ``write_bel_namespace`` command to main :mod:`click` function. | Below is the the instruction that describes the task:
### Input:
Add a ``write_bel_namespace`` command to main :mod:`click` function.
### Response:
def add_cli_write_bel_namespace(main: click.Group) -> click.Group: # noqa: D202
"""Add a ``write_bel_namespace`` command to main :mod:`click` function."""
@main.command()
@click.option('-d', '--directory', type=click.Path(file_okay=False, dir_okay=True), default=os.getcwd(),
help='output directory')
@click.pass_obj
def write(manager: BELNamespaceManagerMixin, directory: str):
"""Write a BEL namespace names/identifiers to terminology store."""
manager.write_directory(directory)
return main |
def _add_page(self, text):
""" Helper function for PDFText, to have the document
add a page, and retry adding a large block of
text that would otherwise have been to long for the
page.
"""
save_cursor = self.parent.document.page.cursor.copy()
save_cursor.x_reset()
save_cursor.y_reset()
self.parent.document.add_page()
self.parent.document.set_cursor(save_cursor)
self.parent.document.add_text(text) | Helper function for PDFText, to have the document
add a page, and retry adding a large block of
text that would otherwise have been to long for the
page. | Below is the the instruction that describes the task:
### Input:
Helper function for PDFText, to have the document
add a page, and retry adding a large block of
text that would otherwise have been to long for the
page.
### Response:
def _add_page(self, text):
""" Helper function for PDFText, to have the document
add a page, and retry adding a large block of
text that would otherwise have been to long for the
page.
"""
save_cursor = self.parent.document.page.cursor.copy()
save_cursor.x_reset()
save_cursor.y_reset()
self.parent.document.add_page()
self.parent.document.set_cursor(save_cursor)
self.parent.document.add_text(text) |
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1) | Turn the bulb on, flashing with two colors. | Below is the the instruction that describes the task:
### Input:
Turn the bulb on, flashing with two colors.
### Response:
def set_flashing(self, duration, hsv1, hsv2):
"""Turn the bulb on, flashing with two colors."""
self.set_transition_time(100)
for step in range(0, int(duration/2)):
self.set_color_hsv(hsv1[0], hsv1[1], hsv1[2])
time.sleep(1)
self.set_color_hsv(hsv2[0], hsv2[1], hsv2[2])
time.sleep(1) |
def from_text(text):
"""Convert text into a DNS rdata class value.
@param text: the text
@type text: string
@rtype: int
@raises dns.rdataclass.UnknownRdataClass: the class is unknown
@raises ValueError: the rdata class value is not >= 0 and <= 65535
"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_class_pattern.match(text)
if match == None:
raise UnknownRdataclass
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("class must be between >= 0 and <= 65535")
return value | Convert text into a DNS rdata class value.
@param text: the text
@type text: string
@rtype: int
@raises dns.rdataclass.UnknownRdataClass: the class is unknown
@raises ValueError: the rdata class value is not >= 0 and <= 65535 | Below is the the instruction that describes the task:
### Input:
Convert text into a DNS rdata class value.
@param text: the text
@type text: string
@rtype: int
@raises dns.rdataclass.UnknownRdataClass: the class is unknown
@raises ValueError: the rdata class value is not >= 0 and <= 65535
### Response:
def from_text(text):
"""Convert text into a DNS rdata class value.
@param text: the text
@type text: string
@rtype: int
@raises dns.rdataclass.UnknownRdataClass: the class is unknown
@raises ValueError: the rdata class value is not >= 0 and <= 65535
"""
value = _by_text.get(text.upper())
if value is None:
match = _unknown_class_pattern.match(text)
if match == None:
raise UnknownRdataclass
value = int(match.group(1))
if value < 0 or value > 65535:
raise ValueError("class must be between >= 0 and <= 65535")
return value |
def generate_modules_cache(self, modules, underlined=None,
task_handle=taskhandle.NullTaskHandle()):
"""Generate global name cache for modules listed in `modules`"""
job_set = task_handle.create_jobset(
'Generatig autoimport cache for modules', len(modules))
for modname in modules:
job_set.started_job('Working on <%s>' % modname)
if modname.endswith('.*'):
mod = self.project.find_module(modname[:-2])
if mod:
for sub in submodules(mod):
self.update_resource(sub, underlined)
else:
self.update_module(modname, underlined)
job_set.finished_job() | Generate global name cache for modules listed in `modules` | Below is the the instruction that describes the task:
### Input:
Generate global name cache for modules listed in `modules`
### Response:
def generate_modules_cache(self, modules, underlined=None,
task_handle=taskhandle.NullTaskHandle()):
"""Generate global name cache for modules listed in `modules`"""
job_set = task_handle.create_jobset(
'Generatig autoimport cache for modules', len(modules))
for modname in modules:
job_set.started_job('Working on <%s>' % modname)
if modname.endswith('.*'):
mod = self.project.find_module(modname[:-2])
if mod:
for sub in submodules(mod):
self.update_resource(sub, underlined)
else:
self.update_module(modname, underlined)
job_set.finished_job() |
def WideResnetBlock(channels, strides=(1, 1), channel_mismatch=False):
"""WideResnet convolutational block."""
main = layers.Serial(layers.BatchNorm(), layers.Relu(),
layers.Conv(channels, (3, 3), strides, padding='SAME'),
layers.BatchNorm(), layers.Relu(),
layers.Conv(channels, (3, 3), padding='SAME'))
shortcut = layers.Identity() if not channel_mismatch else layers.Conv(
channels, (3, 3), strides, padding='SAME')
return layers.Serial(
layers.Branch(), layers.Parallel(main, shortcut), layers.SumBranches()) | WideResnet convolutational block. | Below is the the instruction that describes the task:
### Input:
WideResnet convolutational block.
### Response:
def WideResnetBlock(channels, strides=(1, 1), channel_mismatch=False):
"""WideResnet convolutational block."""
main = layers.Serial(layers.BatchNorm(), layers.Relu(),
layers.Conv(channels, (3, 3), strides, padding='SAME'),
layers.BatchNorm(), layers.Relu(),
layers.Conv(channels, (3, 3), padding='SAME'))
shortcut = layers.Identity() if not channel_mismatch else layers.Conv(
channels, (3, 3), strides, padding='SAME')
return layers.Serial(
layers.Branch(), layers.Parallel(main, shortcut), layers.SumBranches()) |
def upTo(self, key):
""" Returns the urn up to given level using URN Constants
:param key: Identifier of the wished resource using URN constants
:type key: int
:returns: String representation of the partial URN requested
:rtype: str
:Example:
>>> a = URN(urn="urn:cts:latinLit:phi1294.phi002.perseus-lat2:1.1")
>>> a.upTo(URN.TEXTGROUP) == "urn:cts:latinLit:phi1294"
"""
middle = [
component
for component in [self.__parsed["textgroup"], self.__parsed["work"], self.__parsed["version"]]
if component is not None
]
if key == URN.COMPLETE:
return self.__str__()
elif key == URN.NAMESPACE:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"]])
elif key == URN.TEXTGROUP and self.__parsed["textgroup"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
self.__parsed["textgroup"]
])
elif key == URN.WORK and self.__parsed["work"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join([self.__parsed["textgroup"], self.__parsed["work"]])
])
elif key == URN.VERSION and self.__parsed["version"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle)
])
elif key == URN.NO_PASSAGE and self.__parsed["work"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle)
])
elif key == URN.PASSAGE and self.__parsed["reference"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference)
])
elif key == URN.PASSAGE_START and self.__parsed["reference"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference.start)
])
elif key == URN.PASSAGE_END and self.__parsed["reference"] and self.reference.end is not None:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference.end)
])
else:
raise KeyError("Provided key is not recognized.") | Returns the urn up to given level using URN Constants
:param key: Identifier of the wished resource using URN constants
:type key: int
:returns: String representation of the partial URN requested
:rtype: str
:Example:
>>> a = URN(urn="urn:cts:latinLit:phi1294.phi002.perseus-lat2:1.1")
>>> a.upTo(URN.TEXTGROUP) == "urn:cts:latinLit:phi1294" | Below is the the instruction that describes the task:
### Input:
Returns the urn up to given level using URN Constants
:param key: Identifier of the wished resource using URN constants
:type key: int
:returns: String representation of the partial URN requested
:rtype: str
:Example:
>>> a = URN(urn="urn:cts:latinLit:phi1294.phi002.perseus-lat2:1.1")
>>> a.upTo(URN.TEXTGROUP) == "urn:cts:latinLit:phi1294"
### Response:
def upTo(self, key):
""" Returns the urn up to given level using URN Constants
:param key: Identifier of the wished resource using URN constants
:type key: int
:returns: String representation of the partial URN requested
:rtype: str
:Example:
>>> a = URN(urn="urn:cts:latinLit:phi1294.phi002.perseus-lat2:1.1")
>>> a.upTo(URN.TEXTGROUP) == "urn:cts:latinLit:phi1294"
"""
middle = [
component
for component in [self.__parsed["textgroup"], self.__parsed["work"], self.__parsed["version"]]
if component is not None
]
if key == URN.COMPLETE:
return self.__str__()
elif key == URN.NAMESPACE:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"]])
elif key == URN.TEXTGROUP and self.__parsed["textgroup"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
self.__parsed["textgroup"]
])
elif key == URN.WORK and self.__parsed["work"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join([self.__parsed["textgroup"], self.__parsed["work"]])
])
elif key == URN.VERSION and self.__parsed["version"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle)
])
elif key == URN.NO_PASSAGE and self.__parsed["work"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle)
])
elif key == URN.PASSAGE and self.__parsed["reference"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference)
])
elif key == URN.PASSAGE_START and self.__parsed["reference"]:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference.start)
])
elif key == URN.PASSAGE_END and self.__parsed["reference"] and self.reference.end is not None:
return ":".join([
"urn",
self.__parsed["urn_namespace"],
self.__parsed["cts_namespace"],
".".join(middle),
str(self.reference.end)
])
else:
raise KeyError("Provided key is not recognized.") |
def create_from_row(cls, table_row):
"""Create a `JobDetails` from an `astropy.table.row.Row` """
kwargs = {}
for key in table_row.colnames:
kwargs[key] = table_row[key]
infile_refs = kwargs.pop('infile_refs')
outfile_refs = kwargs.pop('outfile_refs')
rmfile_refs = kwargs.pop('rmfile_refs')
intfile_refs = kwargs.pop('intfile_refs')
kwargs['infile_ids'] = np.arange(infile_refs[0], infile_refs[1])
kwargs['outfile_ids'] = np.arange(outfile_refs[0], outfile_refs[1])
kwargs['rmfile_ids'] = np.arange(rmfile_refs[0], rmfile_refs[1])
kwargs['intfile_ids'] = np.arange(intfile_refs[0], intfile_refs[1])
return cls(**kwargs) | Create a `JobDetails` from an `astropy.table.row.Row` | Below is the the instruction that describes the task:
### Input:
Create a `JobDetails` from an `astropy.table.row.Row`
### Response:
def create_from_row(cls, table_row):
"""Create a `JobDetails` from an `astropy.table.row.Row` """
kwargs = {}
for key in table_row.colnames:
kwargs[key] = table_row[key]
infile_refs = kwargs.pop('infile_refs')
outfile_refs = kwargs.pop('outfile_refs')
rmfile_refs = kwargs.pop('rmfile_refs')
intfile_refs = kwargs.pop('intfile_refs')
kwargs['infile_ids'] = np.arange(infile_refs[0], infile_refs[1])
kwargs['outfile_ids'] = np.arange(outfile_refs[0], outfile_refs[1])
kwargs['rmfile_ids'] = np.arange(rmfile_refs[0], rmfile_refs[1])
kwargs['intfile_ids'] = np.arange(intfile_refs[0], intfile_refs[1])
return cls(**kwargs) |
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params):
"""
runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run
"""
clusters = []
nmf = NMF(k)
tsne = TSNE(2)
km = KMeans(k)
for i in range(n_runs):
w = nmf.fit_transform(data)
h = nmf.components_
tsne_wh = tsne.fit_transform(w.dot(h).T)
clust = km.fit_predict(tsne_wh)
clusters.append(clust)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
nmf_new = NMF(k, init='custom')
# TODO: find an initialization for the consensus W and H
init_w, init_h = nmf_init(data, consensus, k, init)
W = nmf_new.fit_transform(data, W=init_w, H=init_h)
H = nmf_new.components_
return W, H | runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run | Below is the the instruction that describes the task:
### Input:
runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run
### Response:
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params):
"""
runs tsne-consensus-NMF
1. run a bunch of NMFs, get W and H
2. run tsne + km on all WH matrices
3. run consensus clustering on all km results
4. use consensus clustering as initialization for a new run of NMF
5. return the W and H from the resulting NMF run
"""
clusters = []
nmf = NMF(k)
tsne = TSNE(2)
km = KMeans(k)
for i in range(n_runs):
w = nmf.fit_transform(data)
h = nmf.components_
tsne_wh = tsne.fit_transform(w.dot(h).T)
clust = km.fit_predict(tsne_wh)
clusters.append(clust)
clusterings = np.vstack(clusters)
consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k)
nmf_new = NMF(k, init='custom')
# TODO: find an initialization for the consensus W and H
init_w, init_h = nmf_init(data, consensus, k, init)
W = nmf_new.fit_transform(data, W=init_w, H=init_h)
H = nmf_new.components_
return W, H |
def load_file(self, currency_file):
"""To be subclassed if alternate methods of loading data.
"""
if currency_file.startswith(('http://', 'https://')):
content = urlopen(currency_file).read()
else:
with open(currency_file, 'rb') as f:
content = f.read()
if currency_file.endswith('.zip'):
self.load_lines(get_lines_from_zip(content))
else:
self.load_lines(content.decode('utf-8').splitlines()) | To be subclassed if alternate methods of loading data. | Below is the the instruction that describes the task:
### Input:
To be subclassed if alternate methods of loading data.
### Response:
def load_file(self, currency_file):
"""To be subclassed if alternate methods of loading data.
"""
if currency_file.startswith(('http://', 'https://')):
content = urlopen(currency_file).read()
else:
with open(currency_file, 'rb') as f:
content = f.read()
if currency_file.endswith('.zip'):
self.load_lines(get_lines_from_zip(content))
else:
self.load_lines(content.decode('utf-8').splitlines()) |
def _handle_display_data(self, msg):
"""
Reimplemented to handle communications between the figure explorer
and the kernel.
"""
img = None
data = msg['content']['data']
if 'image/svg+xml' in data:
fmt = 'image/svg+xml'
img = data['image/svg+xml']
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
fmt = 'image/png'
img = decodestring(data['image/png'].encode('ascii'))
elif 'image/jpeg' in data and self._jpg_supported:
fmt = 'image/jpeg'
img = decodestring(data['image/jpeg'].encode('ascii'))
if img is not None:
self.sig_new_inline_figure.emit(img, fmt)
if (self.figurebrowser is not None and
self.figurebrowser.mute_inline_plotting):
if not self.sended_render_message:
msg['content']['data']['text/plain'] = ''
self._append_html(
_('<br><hr>'
'\nFigures now render in the Plots pane by default. '
'To make them also appear inline in the Console, '
'uncheck "Mute Inline Plotting" under the Plots '
'pane options menu. \n'
'<hr><br>'), before_prompt=True)
self.sended_render_message = True
else:
msg['content']['data']['text/plain'] = ''
del msg['content']['data'][fmt]
return super(FigureBrowserWidget, self)._handle_display_data(msg) | Reimplemented to handle communications between the figure explorer
and the kernel. | Below is the the instruction that describes the task:
### Input:
Reimplemented to handle communications between the figure explorer
and the kernel.
### Response:
def _handle_display_data(self, msg):
"""
Reimplemented to handle communications between the figure explorer
and the kernel.
"""
img = None
data = msg['content']['data']
if 'image/svg+xml' in data:
fmt = 'image/svg+xml'
img = data['image/svg+xml']
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
fmt = 'image/png'
img = decodestring(data['image/png'].encode('ascii'))
elif 'image/jpeg' in data and self._jpg_supported:
fmt = 'image/jpeg'
img = decodestring(data['image/jpeg'].encode('ascii'))
if img is not None:
self.sig_new_inline_figure.emit(img, fmt)
if (self.figurebrowser is not None and
self.figurebrowser.mute_inline_plotting):
if not self.sended_render_message:
msg['content']['data']['text/plain'] = ''
self._append_html(
_('<br><hr>'
'\nFigures now render in the Plots pane by default. '
'To make them also appear inline in the Console, '
'uncheck "Mute Inline Plotting" under the Plots '
'pane options menu. \n'
'<hr><br>'), before_prompt=True)
self.sended_render_message = True
else:
msg['content']['data']['text/plain'] = ''
del msg['content']['data'][fmt]
return super(FigureBrowserWidget, self)._handle_display_data(msg) |
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
) | Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list. | Below is the the instruction that describes the task:
### Input:
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
### Response:
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
if not auth_settings:
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys.append((auth_setting['key'], auth_setting['value']))
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
) |
def convert_npdist(self, node):
"""
Convert the given node into a Nodal Plane Distribution.
:param node: a nodalPlaneDist node
:returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance
"""
with context(self.fname, node):
npdist = []
for np in node.nodalPlaneDist:
prob, strike, dip, rake = (
np['probability'], np['strike'], np['dip'], np['rake'])
npdist.append((prob, geo.NodalPlane(strike, dip, rake)))
if not self.spinning_floating:
npdist = [(1, npdist[0][1])] # consider the first nodal plane
return pmf.PMF(npdist) | Convert the given node into a Nodal Plane Distribution.
:param node: a nodalPlaneDist node
:returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance | Below is the the instruction that describes the task:
### Input:
Convert the given node into a Nodal Plane Distribution.
:param node: a nodalPlaneDist node
:returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance
### Response:
def convert_npdist(self, node):
"""
Convert the given node into a Nodal Plane Distribution.
:param node: a nodalPlaneDist node
:returns: a :class:`openquake.hazardlib.geo.NodalPlane` instance
"""
with context(self.fname, node):
npdist = []
for np in node.nodalPlaneDist:
prob, strike, dip, rake = (
np['probability'], np['strike'], np['dip'], np['rake'])
npdist.append((prob, geo.NodalPlane(strike, dip, rake)))
if not self.spinning_floating:
npdist = [(1, npdist[0][1])] # consider the first nodal plane
return pmf.PMF(npdist) |
def insert_basic_block(self, before, name=''):
"""Insert block before
"""
blk = Block(parent=self, name=name)
self.blocks.insert(before, blk)
return blk | Insert block before | Below is the the instruction that describes the task:
### Input:
Insert block before
### Response:
def insert_basic_block(self, before, name=''):
"""Insert block before
"""
blk = Block(parent=self, name=name)
self.blocks.insert(before, blk)
return blk |
def raid_alert(self, status, used, available, type):
"""RAID alert messages.
[available/used] means that ideally the array may have _available_
devices however, _used_ devices are in use.
Obviously when used >= available then things are good.
"""
if type == 'raid0':
return 'OK'
if status == 'inactive':
return 'CRITICAL'
if used is None or available is None:
return 'DEFAULT'
elif used < available:
return 'WARNING'
return 'OK' | RAID alert messages.
[available/used] means that ideally the array may have _available_
devices however, _used_ devices are in use.
Obviously when used >= available then things are good. | Below is the the instruction that describes the task:
### Input:
RAID alert messages.
[available/used] means that ideally the array may have _available_
devices however, _used_ devices are in use.
Obviously when used >= available then things are good.
### Response:
def raid_alert(self, status, used, available, type):
"""RAID alert messages.
[available/used] means that ideally the array may have _available_
devices however, _used_ devices are in use.
Obviously when used >= available then things are good.
"""
if type == 'raid0':
return 'OK'
if status == 'inactive':
return 'CRITICAL'
if used is None or available is None:
return 'DEFAULT'
elif used < available:
return 'WARNING'
return 'OK' |
def remote_media_url(self, with_ssl=False):
"""
Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url.
"""
protocol = 'http' if with_ssl is False else 'https'
url = (self.aws_bucket_cname and "%s://%s" or "%s://s3.amazonaws.com/%s") % (protocol, self.aws_bucket)
if self.aws_prefix:
url = "%s/%s" % (url, self.aws_prefix)
return url | Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url. | Below is the the instruction that describes the task:
### Input:
Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url.
### Response:
def remote_media_url(self, with_ssl=False):
"""
Returns the base remote media URL. In this case, we can safely make
some assumptions on the URL string based on bucket names, and having
public ACL on.
args:
with_ssl: (bool) If True, return an HTTPS url.
"""
protocol = 'http' if with_ssl is False else 'https'
url = (self.aws_bucket_cname and "%s://%s" or "%s://s3.amazonaws.com/%s") % (protocol, self.aws_bucket)
if self.aws_prefix:
url = "%s/%s" % (url, self.aws_prefix)
return url |
def nhill_i(self,x,threshold=0.1,power=2):
""" Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold).
"""
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return threshold_pow / (x_pow + threshold_pow) * (1 - x_pow) | Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold). | Below is the the instruction that describes the task:
### Input:
Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold).
### Response:
def nhill_i(self,x,threshold=0.1,power=2):
""" Normalized inhibiting hill function.
Is equivalent to 1-nhill_a(self,x,power,threshold).
"""
x_pow = np.power(x,power)
threshold_pow = np.power(threshold,power)
return threshold_pow / (x_pow + threshold_pow) * (1 - x_pow) |
def green(cls):
"Make the text foreground color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREEN
cls._set_text_attributes(wAttributes) | Make the text foreground color green. | Below is the the instruction that describes the task:
### Input:
Make the text foreground color green.
### Response:
def green(cls):
"Make the text foreground color green."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREEN
cls._set_text_attributes(wAttributes) |
def save(self, *args, **kwargs):
"""Override the default ``save`` method."""
if not self.status:
self.status = self.DRAFT
# Published pages should always have a publication date
if self.publication_date is None and self.status == self.PUBLISHED:
self.publication_date = now_utc()
# Drafts should not, unless they have been set to the future
if self.status == self.DRAFT:
if settings.PAGE_SHOW_START_DATE:
if (self.publication_date and
self.publication_date <= now_utc()):
self.publication_date = None
else:
self.publication_date = None
self.last_modification_date = now_utc()
# let's assume there is no more broken links after a save
cache.delete(self.PAGE_BROKEN_LINK_KEY % self.id)
super(Page, self).save(*args, **kwargs)
# fix sites many-to-many link when the're hidden from the form
if settings.PAGE_HIDE_SITES and self.sites.count() == 0:
self.sites.add(Site.objects.get(pk=settings.SITE_ID)) | Override the default ``save`` method. | Below is the the instruction that describes the task:
### Input:
Override the default ``save`` method.
### Response:
def save(self, *args, **kwargs):
"""Override the default ``save`` method."""
if not self.status:
self.status = self.DRAFT
# Published pages should always have a publication date
if self.publication_date is None and self.status == self.PUBLISHED:
self.publication_date = now_utc()
# Drafts should not, unless they have been set to the future
if self.status == self.DRAFT:
if settings.PAGE_SHOW_START_DATE:
if (self.publication_date and
self.publication_date <= now_utc()):
self.publication_date = None
else:
self.publication_date = None
self.last_modification_date = now_utc()
# let's assume there is no more broken links after a save
cache.delete(self.PAGE_BROKEN_LINK_KEY % self.id)
super(Page, self).save(*args, **kwargs)
# fix sites many-to-many link when the're hidden from the form
if settings.PAGE_HIDE_SITES and self.sites.count() == 0:
self.sites.add(Site.objects.get(pk=settings.SITE_ID)) |
def scan_cgroups(subsys_name, filters=list()):
"""
It returns a control group hierarchy which belong to the subsys_name.
When collecting cgroups, filters are applied to the cgroups. See pydoc
of apply_filters method of CGroup for more information about the filters.
"""
status = SubsystemStatus()
if subsys_name not in status.get_all():
raise NoSuchSubsystemError("No such subsystem found: " + subsys_name)
if subsys_name not in status.get_available():
raise EnvironmentError("Disabled in the kernel: " + subsys_name)
if subsys_name not in status.get_enabled():
raise EnvironmentError("Not enabled in the system: " + subsys_name)
subsystem = _get_subsystem(subsys_name)
mount_point = status.get_path(subsys_name)
return _scan_cgroups_recursive(subsystem, mount_point, mount_point, filters) | It returns a control group hierarchy which belong to the subsys_name.
When collecting cgroups, filters are applied to the cgroups. See pydoc
of apply_filters method of CGroup for more information about the filters. | Below is the the instruction that describes the task:
### Input:
It returns a control group hierarchy which belong to the subsys_name.
When collecting cgroups, filters are applied to the cgroups. See pydoc
of apply_filters method of CGroup for more information about the filters.
### Response:
def scan_cgroups(subsys_name, filters=list()):
"""
It returns a control group hierarchy which belong to the subsys_name.
When collecting cgroups, filters are applied to the cgroups. See pydoc
of apply_filters method of CGroup for more information about the filters.
"""
status = SubsystemStatus()
if subsys_name not in status.get_all():
raise NoSuchSubsystemError("No such subsystem found: " + subsys_name)
if subsys_name not in status.get_available():
raise EnvironmentError("Disabled in the kernel: " + subsys_name)
if subsys_name not in status.get_enabled():
raise EnvironmentError("Not enabled in the system: " + subsys_name)
subsystem = _get_subsystem(subsys_name)
mount_point = status.get_path(subsys_name)
return _scan_cgroups_recursive(subsystem, mount_point, mount_point, filters) |
def max_n_day_precipitation_amount(pr, window=1, freq='YS'):
r"""Highest precipitation amount cumulated over a n-day moving window.
Calculate the n-day rolling sum of the original daily total precipitation series
and determine the maximum value over each period.
Parameters
----------
da : xarray.DataArray
Daily precipitation values [Kg m-2 s-1] or [mm]
window : int
Window size in days.
freq : str, optional
Resampling frequency : default 'YS' (yearly)
Returns
-------
xarray.DataArray
The highest cumulated n-day precipitation value at the given time frequency.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the highest 5-day total precipitation
at an annual frequency:
>>> da = xr.open_dataset('pr.day.nc').pr
>>> window = 5
>>> output = max_n_day_precipitation_amount(da, window, freq="YS")
"""
# rolling sum of the values
arr = pr.rolling(time=window, center=False).sum()
out = arr.resample(time=freq).max(dim='time', keep_attrs=True)
out.attrs['units'] = pr.units
# Adjust values and units to make sure they are daily
return utils.pint_multiply(out, 1 * units.day, 'mm') | r"""Highest precipitation amount cumulated over a n-day moving window.
Calculate the n-day rolling sum of the original daily total precipitation series
and determine the maximum value over each period.
Parameters
----------
da : xarray.DataArray
Daily precipitation values [Kg m-2 s-1] or [mm]
window : int
Window size in days.
freq : str, optional
Resampling frequency : default 'YS' (yearly)
Returns
-------
xarray.DataArray
The highest cumulated n-day precipitation value at the given time frequency.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the highest 5-day total precipitation
at an annual frequency:
>>> da = xr.open_dataset('pr.day.nc').pr
>>> window = 5
>>> output = max_n_day_precipitation_amount(da, window, freq="YS") | Below is the the instruction that describes the task:
### Input:
r"""Highest precipitation amount cumulated over a n-day moving window.
Calculate the n-day rolling sum of the original daily total precipitation series
and determine the maximum value over each period.
Parameters
----------
da : xarray.DataArray
Daily precipitation values [Kg m-2 s-1] or [mm]
window : int
Window size in days.
freq : str, optional
Resampling frequency : default 'YS' (yearly)
Returns
-------
xarray.DataArray
The highest cumulated n-day precipitation value at the given time frequency.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the highest 5-day total precipitation
at an annual frequency:
>>> da = xr.open_dataset('pr.day.nc').pr
>>> window = 5
>>> output = max_n_day_precipitation_amount(da, window, freq="YS")
### Response:
def max_n_day_precipitation_amount(pr, window=1, freq='YS'):
r"""Highest precipitation amount cumulated over a n-day moving window.
Calculate the n-day rolling sum of the original daily total precipitation series
and determine the maximum value over each period.
Parameters
----------
da : xarray.DataArray
Daily precipitation values [Kg m-2 s-1] or [mm]
window : int
Window size in days.
freq : str, optional
Resampling frequency : default 'YS' (yearly)
Returns
-------
xarray.DataArray
The highest cumulated n-day precipitation value at the given time frequency.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the highest 5-day total precipitation
at an annual frequency:
>>> da = xr.open_dataset('pr.day.nc').pr
>>> window = 5
>>> output = max_n_day_precipitation_amount(da, window, freq="YS")
"""
# rolling sum of the values
arr = pr.rolling(time=window, center=False).sum()
out = arr.resample(time=freq).max(dim='time', keep_attrs=True)
out.attrs['units'] = pr.units
# Adjust values and units to make sure they are daily
return utils.pint_multiply(out, 1 * units.day, 'mm') |
def parse_function_type_comment(type_comment: str) -> Optional[FunctionType]:
"""Given a correct type comment, obtain a FunctionType object"""
if _ast_py3 is None:
return None
func_type = _ast_py3.parse(type_comment, "<type_comment>", "func_type")
return FunctionType(argtypes=func_type.argtypes, returns=func_type.returns) | Given a correct type comment, obtain a FunctionType object | Below is the the instruction that describes the task:
### Input:
Given a correct type comment, obtain a FunctionType object
### Response:
def parse_function_type_comment(type_comment: str) -> Optional[FunctionType]:
"""Given a correct type comment, obtain a FunctionType object"""
if _ast_py3 is None:
return None
func_type = _ast_py3.parse(type_comment, "<type_comment>", "func_type")
return FunctionType(argtypes=func_type.argtypes, returns=func_type.returns) |
def endings(self):
"""The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING) | The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. | Below is the the instruction that describes the task:
### Input:
The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
### Response:
def endings(self):
"""The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING) |
def get_variant(self, return_deaf):
"""
Возвращает вариант буквы.
:return_deaf:
True - вернуть глухой вариант. Если False - звонкий.
"""
return_deaf = bool(return_deaf)
for variants in self.sonorus_deaf_pairs:
if self.__letter in variants:
return variants[return_deaf]
return self.__letter | Возвращает вариант буквы.
:return_deaf:
True - вернуть глухой вариант. Если False - звонкий. | Below is the the instruction that describes the task:
### Input:
Возвращает вариант буквы.
:return_deaf:
True - вернуть глухой вариант. Если False - звонкий.
### Response:
def get_variant(self, return_deaf):
"""
Возвращает вариант буквы.
:return_deaf:
True - вернуть глухой вариант. Если False - звонкий.
"""
return_deaf = bool(return_deaf)
for variants in self.sonorus_deaf_pairs:
if self.__letter in variants:
return variants[return_deaf]
return self.__letter |
def get_code_hash(code: str) -> str:
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
code = code[2:] if code[:2] == "0x" else code
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code))
return "" | :param code: bytecode
:return: Returns hash of the given bytecode | Below is the the instruction that describes the task:
### Input:
:param code: bytecode
:return: Returns hash of the given bytecode
### Response:
def get_code_hash(code: str) -> str:
"""
:param code: bytecode
:return: Returns hash of the given bytecode
"""
code = code[2:] if code[:2] == "0x" else code
try:
keccak = sha3.keccak_256()
keccak.update(bytes.fromhex(code))
return "0x" + keccak.hexdigest()
except ValueError:
log.debug("Unable to change the bytecode to bytes. Bytecode: {}".format(code))
return "" |
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation.
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
momentum = tf.zeros_like(x)
adv_x = x
# Fix labels to the first model predictions for loss computation
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
"""Iterate until number of iterations completed"""
return tf.less(i, self.nb_iter)
def body(i, ax, m):
"""Do a momentum step"""
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, ax)
# Normalize current gradient and add it to the accumulated gradient
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasn't been tested for ord=1."
"It's not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
# Update and clip adversarial example in current iteration
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x | Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation. | Below is the the instruction that describes the task:
### Input:
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation.
### Response:
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: Keyword arguments. See `parse_params` for documentation.
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
asserts = []
# If a data range was specified, check that the input was in that range
if self.clip_min is not None:
asserts.append(utils_tf.assert_greater_equal(x,
tf.cast(self.clip_min,
x.dtype)))
if self.clip_max is not None:
asserts.append(utils_tf.assert_less_equal(x,
tf.cast(self.clip_max,
x.dtype)))
# Initialize loop variables
momentum = tf.zeros_like(x)
adv_x = x
# Fix labels to the first model predictions for loss computation
y, _nb_classes = self.get_or_guess_labels(x, kwargs)
y = y / reduce_sum(y, 1, keepdims=True)
targeted = (self.y_target is not None)
def cond(i, _, __):
"""Iterate until number of iterations completed"""
return tf.less(i, self.nb_iter)
def body(i, ax, m):
"""Do a momentum step"""
logits = self.model.get_logits(ax)
loss = softmax_cross_entropy_with_logits(labels=y, logits=logits)
if targeted:
loss = -loss
# Define gradient of loss wrt input
grad, = tf.gradients(loss, ax)
# Normalize current gradient and add it to the accumulated gradient
red_ind = list(range(1, len(grad.get_shape())))
avoid_zero_div = tf.cast(1e-12, grad.dtype)
grad = grad / tf.maximum(
avoid_zero_div,
reduce_mean(tf.abs(grad), red_ind, keepdims=True))
m = self.decay_factor * m + grad
optimal_perturbation = optimize_linear(m, self.eps_iter, self.ord)
if self.ord == 1:
raise NotImplementedError("This attack hasn't been tested for ord=1."
"It's not clear that FGM makes a good inner "
"loop step for iterative optimization since "
"it updates just one coordinate at a time.")
# Update and clip adversarial example in current iteration
ax = ax + optimal_perturbation
ax = x + utils_tf.clip_eta(ax - x, self.ord, self.eps)
if self.clip_min is not None and self.clip_max is not None:
ax = utils_tf.clip_by_value(ax, self.clip_min, self.clip_max)
ax = tf.stop_gradient(ax)
return i + 1, ax, m
_, adv_x, _ = tf.while_loop(
cond, body, (tf.zeros([]), adv_x, momentum), back_prop=True,
maximum_iterations=self.nb_iter)
if self.sanity_checks:
with tf.control_dependencies(asserts):
adv_x = tf.identity(adv_x)
return adv_x |
def strace_data_access_event(self,
operation,
address,
data,
data_mask=None,
access_width=4,
address_range=0):
"""Sets an event to trigger trace logic when data access is made.
Data access corresponds to either a read or write.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the load/store data.
data (int): the data to be compared the event data to.
data_mask (int): optional bitmask specifying bits to ignore in
comparison.
acess_width (int): optional access width for the data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error.
"""
cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET
event_info = structs.JLinkStraceEventInfo()
event_info.Type = enums.JLinkStraceEvent.DATA_ACCESS
event_info.Op = operation
event_info.AccessSize = int(access_width)
event_info.Addr = int(address)
event_info.Data = int(data)
event_info.DataMask = int(data_mask or 0)
event_info.AddrRangeSize = int(address_range)
handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info))
if handle < 0:
raise errors.JLinkException(handle)
return handle | Sets an event to trigger trace logic when data access is made.
Data access corresponds to either a read or write.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the load/store data.
data (int): the data to be compared the event data to.
data_mask (int): optional bitmask specifying bits to ignore in
comparison.
acess_width (int): optional access width for the data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error. | Below is the the instruction that describes the task:
### Input:
Sets an event to trigger trace logic when data access is made.
Data access corresponds to either a read or write.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the load/store data.
data (int): the data to be compared the event data to.
data_mask (int): optional bitmask specifying bits to ignore in
comparison.
acess_width (int): optional access width for the data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error.
### Response:
def strace_data_access_event(self,
operation,
address,
data,
data_mask=None,
access_width=4,
address_range=0):
"""Sets an event to trigger trace logic when data access is made.
Data access corresponds to either a read or write.
Args:
self (JLink): the ``JLink`` instance.
operation (int): one of the operations in ``JLinkStraceOperation``.
address (int): the address of the load/store data.
data (int): the data to be compared the event data to.
data_mask (int): optional bitmask specifying bits to ignore in
comparison.
acess_width (int): optional access width for the data.
address_range (int): optional range of address to trigger event on.
Returns:
An integer specifying the trace event handle. This handle should be
retained in order to clear the event at a later time.
Raises:
JLinkException: on error.
"""
cmd = enums.JLinkStraceCommand.TRACE_EVENT_SET
event_info = structs.JLinkStraceEventInfo()
event_info.Type = enums.JLinkStraceEvent.DATA_ACCESS
event_info.Op = operation
event_info.AccessSize = int(access_width)
event_info.Addr = int(address)
event_info.Data = int(data)
event_info.DataMask = int(data_mask or 0)
event_info.AddrRangeSize = int(address_range)
handle = self._dll.JLINK_STRACE_Control(cmd, ctypes.byref(event_info))
if handle < 0:
raise errors.JLinkException(handle)
return handle |
def list_processed_parameter_groups(self):
"""
Returns the existing parameter groups.
:rtype: ~collections.Iterable[str]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
path = '/archive/{}/parameter-groups'.format(self._instance)
response = self._client.get_proto(path=path)
message = archive_pb2.ParameterGroupInfo()
message.ParseFromString(response.content)
groups = getattr(message, 'group')
return iter(groups) | Returns the existing parameter groups.
:rtype: ~collections.Iterable[str] | Below is the the instruction that describes the task:
### Input:
Returns the existing parameter groups.
:rtype: ~collections.Iterable[str]
### Response:
def list_processed_parameter_groups(self):
"""
Returns the existing parameter groups.
:rtype: ~collections.Iterable[str]
"""
# Server does not do pagination on listings of this resource.
# Return an iterator anyway for similarity with other API methods
path = '/archive/{}/parameter-groups'.format(self._instance)
response = self._client.get_proto(path=path)
message = archive_pb2.ParameterGroupInfo()
message.ParseFromString(response.content)
groups = getattr(message, 'group')
return iter(groups) |
def get_name(node):
"""Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
"""
if isinstance(node, gast.Name):
return node.id
elif isinstance(node, (gast.Subscript, gast.Attribute)):
return get_name(node.value)
else:
raise TypeError | Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`. | Below is the the instruction that describes the task:
### Input:
Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
### Response:
def get_name(node):
"""Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
"""
if isinstance(node, gast.Name):
return node.id
elif isinstance(node, (gast.Subscript, gast.Attribute)):
return get_name(node.value)
else:
raise TypeError |
def download(
state, host, hostname, filename,
local_filename=None, force=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
local_filename = local_filename or filename
# Get local file info
local_file_info = host.fact.file(local_filename)
# Local file exists but isn't a file?
if local_file_info is False:
raise OperationError(
'Local destination {0} already exists and is not a file'.format(
local_filename,
),
)
# If the local file exists and we're not forcing a re-download, no-op
if local_file_info and not force:
return
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# Download the file with scp
yield 'scp {0}:{1} {2}'.format(connection_target, filename, local_filename) | Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user | Below is the the instruction that describes the task:
### Input:
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
### Response:
def download(
state, host, hostname, filename,
local_filename=None, force=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
local_filename = local_filename or filename
# Get local file info
local_file_info = host.fact.file(local_filename)
# Local file exists but isn't a file?
if local_file_info is False:
raise OperationError(
'Local destination {0} already exists and is not a file'.format(
local_filename,
),
)
# If the local file exists and we're not forcing a re-download, no-op
if local_file_info and not force:
return
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# Download the file with scp
yield 'scp {0}:{1} {2}'.format(connection_target, filename, local_filename) |
def build_attachment2():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = "BwdW"
attachment.type = "image/png"
attachment.filename = "banner.png"
attachment.disposition = "inline"
attachment.content_id = "Banner"
return attachment | Build attachment mock. | Below is the the instruction that describes the task:
### Input:
Build attachment mock.
### Response:
def build_attachment2():
"""Build attachment mock."""
attachment = Attachment()
attachment.content = "BwdW"
attachment.type = "image/png"
attachment.filename = "banner.png"
attachment.disposition = "inline"
attachment.content_id = "Banner"
return attachment |
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args | Get parameter names for the estimator | Below is the the instruction that describes the task:
### Input:
Get parameter names for the estimator
### Response:
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args |
def find(self, filter=None, page=1, per_page=10, fields=None, context=None):
"""
Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context.
"""
if filter is None:
filter = []
rv = self.client.session.get(
self.path,
params={
'filter': dumps(filter or []),
'page': page,
'per_page': per_page,
'field': fields,
'context': dumps(context or self.client.context),
}
)
response_received.send(rv)
return rv | Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context. | Below is the the instruction that describes the task:
### Input:
Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context.
### Response:
def find(self, filter=None, page=1, per_page=10, fields=None, context=None):
"""
Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context.
"""
if filter is None:
filter = []
rv = self.client.session.get(
self.path,
params={
'filter': dumps(filter or []),
'page': page,
'per_page': per_page,
'field': fields,
'context': dumps(context or self.client.context),
}
)
response_received.send(rv)
return rv |
def whois_domains(self, domains):
"""Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result}
"""
api_name = 'opendns-whois-domain'
fmt_url_path = u'whois/{0}'
return self._multi_get(api_name, fmt_url_path, domains) | Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result} | Below is the the instruction that describes the task:
### Input:
Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result}
### Response:
def whois_domains(self, domains):
"""Calls WHOIS domain end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_result}
"""
api_name = 'opendns-whois-domain'
fmt_url_path = u'whois/{0}'
return self._multi_get(api_name, fmt_url_path, domains) |
def measure(self, geometry):
"""Measure the length or the area of a geometry.
:param geometry: The geometry.
:type geometry: QgsGeometry
:return: The geometric size in the expected exposure unit.
:rtype: float
"""
message = 'Size with NaN value : geometry valid={valid}, WKT={wkt}'
feature_size = 0
if geometry.isMultipart():
# Be careful, the size calculator is not working well on a
# multipart.
# So we compute the size part per part. See ticket #3812
for single in geometry.asGeometryCollection():
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(single)
else:
geometry_size = self.calculator.measureArea(single)
if not isnan(geometry_size):
feature_size += geometry_size
else:
LOGGER.debug(message.format(
valid=single.isGeosValid(),
wkt=single.asWkt()))
else:
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(geometry)
else:
geometry_size = self.calculator.measureArea(geometry)
if not isnan(geometry_size):
feature_size = geometry_size
else:
LOGGER.debug(message.format(
valid=geometry.isGeosValid(),
wkt=geometry.asWkt()))
feature_size = round(feature_size)
if self.output_unit:
if self.output_unit != self.default_unit:
feature_size = convert_unit(
feature_size, self.default_unit, self.output_unit)
return feature_size | Measure the length or the area of a geometry.
:param geometry: The geometry.
:type geometry: QgsGeometry
:return: The geometric size in the expected exposure unit.
:rtype: float | Below is the the instruction that describes the task:
### Input:
Measure the length or the area of a geometry.
:param geometry: The geometry.
:type geometry: QgsGeometry
:return: The geometric size in the expected exposure unit.
:rtype: float
### Response:
def measure(self, geometry):
"""Measure the length or the area of a geometry.
:param geometry: The geometry.
:type geometry: QgsGeometry
:return: The geometric size in the expected exposure unit.
:rtype: float
"""
message = 'Size with NaN value : geometry valid={valid}, WKT={wkt}'
feature_size = 0
if geometry.isMultipart():
# Be careful, the size calculator is not working well on a
# multipart.
# So we compute the size part per part. See ticket #3812
for single in geometry.asGeometryCollection():
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(single)
else:
geometry_size = self.calculator.measureArea(single)
if not isnan(geometry_size):
feature_size += geometry_size
else:
LOGGER.debug(message.format(
valid=single.isGeosValid(),
wkt=single.asWkt()))
else:
if self.geometry_type == QgsWkbTypes.LineGeometry:
geometry_size = self.calculator.measureLength(geometry)
else:
geometry_size = self.calculator.measureArea(geometry)
if not isnan(geometry_size):
feature_size = geometry_size
else:
LOGGER.debug(message.format(
valid=geometry.isGeosValid(),
wkt=geometry.asWkt()))
feature_size = round(feature_size)
if self.output_unit:
if self.output_unit != self.default_unit:
feature_size = convert_unit(
feature_size, self.default_unit, self.output_unit)
return feature_size |
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config) | Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8 | Below is the the instruction that describes the task:
### Input:
Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
### Response:
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config) |
def pick_orientation(img1, img2, spacing, desired_aspect=1.618):
"""Pick a tiling orientation for two images.
Returns either 'lr' for left-and-right, or 'tb' for top-and-bottom.
Picks the one that makes the combined image have a better aspect
ratio, where 'better' is defined as 'closer to 1:1.618'.
"""
w1, h1 = img1.size
w2, h2 = img2.size
size_a = (w1 + spacing + w2, max(h1, h2, 1))
size_b = (max(w1, w2, 1), h1 + spacing + h2)
aspect_a = size_a[0] / size_a[1]
aspect_b = size_b[0] / size_b[1]
goodness_a = min(desired_aspect, aspect_a) / max(desired_aspect, aspect_a)
goodness_b = min(desired_aspect, aspect_b) / max(desired_aspect, aspect_b)
return 'lr' if goodness_a >= goodness_b else 'tb' | Pick a tiling orientation for two images.
Returns either 'lr' for left-and-right, or 'tb' for top-and-bottom.
Picks the one that makes the combined image have a better aspect
ratio, where 'better' is defined as 'closer to 1:1.618'. | Below is the the instruction that describes the task:
### Input:
Pick a tiling orientation for two images.
Returns either 'lr' for left-and-right, or 'tb' for top-and-bottom.
Picks the one that makes the combined image have a better aspect
ratio, where 'better' is defined as 'closer to 1:1.618'.
### Response:
def pick_orientation(img1, img2, spacing, desired_aspect=1.618):
"""Pick a tiling orientation for two images.
Returns either 'lr' for left-and-right, or 'tb' for top-and-bottom.
Picks the one that makes the combined image have a better aspect
ratio, where 'better' is defined as 'closer to 1:1.618'.
"""
w1, h1 = img1.size
w2, h2 = img2.size
size_a = (w1 + spacing + w2, max(h1, h2, 1))
size_b = (max(w1, w2, 1), h1 + spacing + h2)
aspect_a = size_a[0] / size_a[1]
aspect_b = size_b[0] / size_b[1]
goodness_a = min(desired_aspect, aspect_a) / max(desired_aspect, aspect_a)
goodness_b = min(desired_aspect, aspect_b) / max(desired_aspect, aspect_b)
return 'lr' if goodness_a >= goodness_b else 'tb' |
def set_mode_by_id(self, zone_id, mode):
"""
Set the mode by using the zone id
Supported zones are available in the enum Mode
"""
if not self._do_auth():
raise RuntimeError("Unable to login")
data = {
"ZoneId": zone_id,
"mode": mode.value
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
'Authorization':
'Bearer ' + self.login_data['token']['accessToken']
}
url = self.api_base_url + "Home/SetZoneMode"
response = requests.post(url, data=json.dumps(
data), headers=headers, timeout=10)
if response.status_code != 200:
return False
mode_data = response.json()
return mode_data.get("isSuccess", False) | Set the mode by using the zone id
Supported zones are available in the enum Mode | Below is the the instruction that describes the task:
### Input:
Set the mode by using the zone id
Supported zones are available in the enum Mode
### Response:
def set_mode_by_id(self, zone_id, mode):
"""
Set the mode by using the zone id
Supported zones are available in the enum Mode
"""
if not self._do_auth():
raise RuntimeError("Unable to login")
data = {
"ZoneId": zone_id,
"mode": mode.value
}
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
'Authorization':
'Bearer ' + self.login_data['token']['accessToken']
}
url = self.api_base_url + "Home/SetZoneMode"
response = requests.post(url, data=json.dumps(
data), headers=headers, timeout=10)
if response.status_code != 200:
return False
mode_data = response.json()
return mode_data.get("isSuccess", False) |
def plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff, filename=None):
''' Fit spline to the profile histogramed data, differentiate, determine MPV and plot.
Parameters
----------
x_p, y_p : array like
data points (x,y)
y_p_e : array like
error bars in y
'''
logging.info('Plot results')
plt.close()
p1 = plt.errorbar(x_p * analysis_configuration['vcal_calibration'], y_p, yerr=y_p_e, fmt='o') # plot data with error bars
p2, = plt.plot(x_p * analysis_configuration['vcal_calibration'], smoothed_data, '-r') # plot smoothed data
factor = np.amax(y_p) / np.amin(smoothed_data_diff) * 1.1
p3, = plt.plot(x_p * analysis_configuration['vcal_calibration'], factor * smoothed_data_diff, '-', lw=2) # plot differentiated data
mpv_index = np.argmax(-analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1))
p4, = plt.plot([x_p[mpv_index] * analysis_configuration['vcal_calibration'], x_p[mpv_index] * analysis_configuration['vcal_calibration']], [0, factor * smoothed_data_diff[mpv_index]], 'k-', lw=2)
text = 'MPV ' + str(int(x_p[mpv_index] * analysis_configuration['vcal_calibration'])) + ' e'
plt.text(1.01 * x_p[mpv_index] * analysis_configuration['vcal_calibration'], -10. * smoothed_data_diff[mpv_index], text, ha='left')
plt.legend([p1, p2, p3, p4], ['data', 'smoothed spline', 'spline differentiation', text], prop={'size': 12}, loc=0)
plt.title('\'Single hit cluster\'-occupancy for different pixel thresholds')
plt.xlabel('Pixel threshold [e]')
plt.ylabel('Single hit cluster occupancy [a.u.]')
plt.ylim(0, np.amax(y_p) * 1.15)
if filename is None:
plt.show()
else:
filename.savefig(plt.gcf())
return smoothed_data_diff | Fit spline to the profile histogramed data, differentiate, determine MPV and plot.
Parameters
----------
x_p, y_p : array like
data points (x,y)
y_p_e : array like
error bars in y | Below is the the instruction that describes the task:
### Input:
Fit spline to the profile histogramed data, differentiate, determine MPV and plot.
Parameters
----------
x_p, y_p : array like
data points (x,y)
y_p_e : array like
error bars in y
### Response:
def plot_result(x_p, y_p, y_p_e, smoothed_data, smoothed_data_diff, filename=None):
''' Fit spline to the profile histogramed data, differentiate, determine MPV and plot.
Parameters
----------
x_p, y_p : array like
data points (x,y)
y_p_e : array like
error bars in y
'''
logging.info('Plot results')
plt.close()
p1 = plt.errorbar(x_p * analysis_configuration['vcal_calibration'], y_p, yerr=y_p_e, fmt='o') # plot data with error bars
p2, = plt.plot(x_p * analysis_configuration['vcal_calibration'], smoothed_data, '-r') # plot smoothed data
factor = np.amax(y_p) / np.amin(smoothed_data_diff) * 1.1
p3, = plt.plot(x_p * analysis_configuration['vcal_calibration'], factor * smoothed_data_diff, '-', lw=2) # plot differentiated data
mpv_index = np.argmax(-analysis_utils.smooth_differentiation(x_p, y_p, weigths=1 / y_p_e, order=3, smoothness=analysis_configuration['smoothness'], derivation=1))
p4, = plt.plot([x_p[mpv_index] * analysis_configuration['vcal_calibration'], x_p[mpv_index] * analysis_configuration['vcal_calibration']], [0, factor * smoothed_data_diff[mpv_index]], 'k-', lw=2)
text = 'MPV ' + str(int(x_p[mpv_index] * analysis_configuration['vcal_calibration'])) + ' e'
plt.text(1.01 * x_p[mpv_index] * analysis_configuration['vcal_calibration'], -10. * smoothed_data_diff[mpv_index], text, ha='left')
plt.legend([p1, p2, p3, p4], ['data', 'smoothed spline', 'spline differentiation', text], prop={'size': 12}, loc=0)
plt.title('\'Single hit cluster\'-occupancy for different pixel thresholds')
plt.xlabel('Pixel threshold [e]')
plt.ylabel('Single hit cluster occupancy [a.u.]')
plt.ylim(0, np.amax(y_p) * 1.15)
if filename is None:
plt.show()
else:
filename.savefig(plt.gcf())
return smoothed_data_diff |
def list_address(self):
"""Get information about all the addresses present on the open wallet"""
result = []
for addrStr in self.wallet.Addresses:
addr = self.wallet.GetAddress(addrStr)
result.append({
"address": addrStr,
"haskey": not addr.IsWatchOnly,
"label": None,
"watchonly": addr.IsWatchOnly,
})
return result | Get information about all the addresses present on the open wallet | Below is the the instruction that describes the task:
### Input:
Get information about all the addresses present on the open wallet
### Response:
def list_address(self):
"""Get information about all the addresses present on the open wallet"""
result = []
for addrStr in self.wallet.Addresses:
addr = self.wallet.GetAddress(addrStr)
result.append({
"address": addrStr,
"haskey": not addr.IsWatchOnly,
"label": None,
"watchonly": addr.IsWatchOnly,
})
return result |
def create_nio(self, nio_settings):
"""
Creates a new NIO.
:param nio_settings: information to create the NIO
:returns: a NIO object
"""
nio = None
if nio_settings["type"] == "nio_udp":
lport = nio_settings["lport"]
rhost = nio_settings["rhost"]
rport = nio_settings["rport"]
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
raise aiohttp.web.HTTPInternalServerError(text="getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
filters = nio_settings.get("filters", {})
nio = NIOUDP(lport, rhost, rport, filters)
elif nio_settings["type"] == "nio_tap":
tap_device = nio_settings["tap_device"]
# if not is_interface_up(tap_device):
# raise aiohttp.web.HTTPConflict(text="TAP interface {} does not exist or is down".format(tap_device))
# FIXME: check for permissions on tap device
# if not self.has_privileged_access(executable):
# raise aiohttp.web.HTTPForbidden(text="{} has no privileged access to {}.".format(executable, tap_device))
nio = NIOTAP(tap_device)
elif nio_settings["type"] in ("nio_generic_ethernet", "nio_ethernet"):
ethernet_device = nio_settings["ethernet_device"]
if not is_interface_up(ethernet_device):
raise aiohttp.web.HTTPConflict(text="Ethernet interface {} does not exist or is down".format(ethernet_device))
nio = NIOEthernet(ethernet_device)
assert nio is not None
return nio | Creates a new NIO.
:param nio_settings: information to create the NIO
:returns: a NIO object | Below is the the instruction that describes the task:
### Input:
Creates a new NIO.
:param nio_settings: information to create the NIO
:returns: a NIO object
### Response:
def create_nio(self, nio_settings):
"""
Creates a new NIO.
:param nio_settings: information to create the NIO
:returns: a NIO object
"""
nio = None
if nio_settings["type"] == "nio_udp":
lport = nio_settings["lport"]
rhost = nio_settings["rhost"]
rport = nio_settings["rport"]
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
raise aiohttp.web.HTTPInternalServerError(text="getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
filters = nio_settings.get("filters", {})
nio = NIOUDP(lport, rhost, rport, filters)
elif nio_settings["type"] == "nio_tap":
tap_device = nio_settings["tap_device"]
# if not is_interface_up(tap_device):
# raise aiohttp.web.HTTPConflict(text="TAP interface {} does not exist or is down".format(tap_device))
# FIXME: check for permissions on tap device
# if not self.has_privileged_access(executable):
# raise aiohttp.web.HTTPForbidden(text="{} has no privileged access to {}.".format(executable, tap_device))
nio = NIOTAP(tap_device)
elif nio_settings["type"] in ("nio_generic_ethernet", "nio_ethernet"):
ethernet_device = nio_settings["ethernet_device"]
if not is_interface_up(ethernet_device):
raise aiohttp.web.HTTPConflict(text="Ethernet interface {} does not exist or is down".format(ethernet_device))
nio = NIOEthernet(ethernet_device)
assert nio is not None
return nio |
def upload_identity_keys(self):
"""Uploads this device's identity keys to HS.
This device must be the one used when logging in.
"""
device_keys = {
'user_id': self.user_id,
'device_id': self.device_id,
'algorithms': self._algorithms,
'keys': {'{}:{}'.format(alg, self.device_id): key
for alg, key in self.identity_keys.items()}
}
self.sign_json(device_keys)
ret = self.api.upload_keys(device_keys=device_keys)
self.one_time_keys_manager.server_counts = ret['one_time_key_counts']
logger.info('Uploaded identity keys.') | Uploads this device's identity keys to HS.
This device must be the one used when logging in. | Below is the the instruction that describes the task:
### Input:
Uploads this device's identity keys to HS.
This device must be the one used when logging in.
### Response:
def upload_identity_keys(self):
"""Uploads this device's identity keys to HS.
This device must be the one used when logging in.
"""
device_keys = {
'user_id': self.user_id,
'device_id': self.device_id,
'algorithms': self._algorithms,
'keys': {'{}:{}'.format(alg, self.device_id): key
for alg, key in self.identity_keys.items()}
}
self.sign_json(device_keys)
ret = self.api.upload_keys(device_keys=device_keys)
self.one_time_keys_manager.server_counts = ret['one_time_key_counts']
logger.info('Uploaded identity keys.') |
async def edit(self, **fields):
"""|coro|
Edits the group.
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed.
"""
try:
icon_bytes = fields['icon']
except KeyError:
pass
else:
if icon_bytes is not None:
fields['icon'] = utils._bytes_to_base64_data(icon_bytes)
data = await self._state.http.edit_group(self.id, **fields)
self._update_group(data) | |coro|
Edits the group.
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed. | Below is the the instruction that describes the task:
### Input:
|coro|
Edits the group.
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed.
### Response:
async def edit(self, **fields):
"""|coro|
Edits the group.
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed.
"""
try:
icon_bytes = fields['icon']
except KeyError:
pass
else:
if icon_bytes is not None:
fields['icon'] = utils._bytes_to_base64_data(icon_bytes)
data = await self._state.http.edit_group(self.id, **fields)
self._update_group(data) |
def _invoke(cls, cmd):
"""Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e))
out, _ = process.communicate()
return process, out | Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all. | Below is the the instruction that describes the task:
### Input:
Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
### Response:
def _invoke(cls, cmd):
"""Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e))
out, _ = process.communicate()
return process, out |
def AddWiFiConnection(self, dev_path, connection_name, ssid_name, key_mgmt):
'''Add an available connection to an existing WiFi device and access point.
You have to specify WiFi Device path, Connection object name,
SSID and key management.
The SSID must match one of the previously created access points.
Please note that this does not set any global properties.
Returns the new object path.
'''
dev_obj = dbusmock.get_object(dev_path)
connection_path = '/org/freedesktop/NetworkManager/Settings/' + connection_name
connections = dev_obj.Get(DEVICE_IFACE, 'AvailableConnections')
settings_obj = dbusmock.get_object(SETTINGS_OBJ)
main_connections = settings_obj.ListConnections()
ssid = ssid_name.encode('UTF-8')
# Find the access point by ssid
access_point = None
access_points = dev_obj.access_points
for ap_path in access_points:
ap = dbusmock.get_object(ap_path)
if ap.Get(ACCESS_POINT_IFACE, 'Ssid') == ssid:
access_point = ap
break
if not access_point:
raise dbus.exceptions.DBusException(
'Access point with SSID [%s] could not be found' % (ssid_name),
name=MANAGER_IFACE + '.DoesNotExist')
hw_address = access_point.Get(ACCESS_POINT_IFACE, 'HwAddress')
mode = access_point.Get(ACCESS_POINT_IFACE, 'Mode')
security = access_point.Get(ACCESS_POINT_IFACE, 'WpaFlags')
if connection_path in connections or connection_path in main_connections:
raise dbus.exceptions.DBusException(
'Connection %s on device %s already exists' % (connection_name, dev_path),
name=MANAGER_IFACE + '.AlreadyExists')
# Parse mac address string into byte array
mac_bytes = binascii.unhexlify(hw_address.replace(':', ''))
settings = {
'802-11-wireless': {
'seen-bssids': [hw_address],
'ssid': dbus.ByteArray(ssid),
'mac-address': dbus.ByteArray(mac_bytes),
'mode': InfrastructureMode.NAME_MAP[mode]
},
'connection': {
'timestamp': dbus.UInt64(1374828522),
'type': '802-11-wireless',
'id': ssid_name,
'uuid': str(uuid.uuid4())
},
}
if security != NM80211ApSecurityFlags.NM_802_11_AP_SEC_NONE:
settings['802-11-wireless']['security'] = '802-11-wireless-security'
settings['802-11-wireless-security'] = NM80211ApSecurityFlags.NAME_MAP[security]
self.AddObject(connection_path,
CSETTINGS_IFACE,
{
'Unsaved': False
},
[
('Delete', '', '', 'self.ConnectionDelete(self)'),
('GetSettings', '', 'a{sa{sv}}', 'ret = self.ConnectionGetSettings(self)'),
('GetSecrets', 's', 'a{sa{sv}}', 'ret = self.ConnectionGetSecrets(self, args[0])'),
('Update', 'a{sa{sv}}', '', 'self.ConnectionUpdate(self, args[0])'),
])
self.object_manager_emit_added(connection_path)
connection_obj = dbusmock.get_object(connection_path)
connection_obj.settings = settings
connection_obj.connection_path = connection_path
connection_obj.ConnectionDelete = ConnectionDelete
connection_obj.ConnectionGetSettings = ConnectionGetSettings
connection_obj.ConnectionGetSecrets = ConnectionGetSecrets
connection_obj.ConnectionUpdate = ConnectionUpdate
connections.append(dbus.ObjectPath(connection_path))
dev_obj.Set(DEVICE_IFACE, 'AvailableConnections', connections)
main_connections.append(connection_path)
settings_obj.Set(SETTINGS_IFACE, 'Connections', main_connections)
settings_obj.EmitSignal(SETTINGS_IFACE, 'NewConnection', 'o', [ap_path])
return connection_path | Add an available connection to an existing WiFi device and access point.
You have to specify WiFi Device path, Connection object name,
SSID and key management.
The SSID must match one of the previously created access points.
Please note that this does not set any global properties.
Returns the new object path. | Below is the the instruction that describes the task:
### Input:
Add an available connection to an existing WiFi device and access point.
You have to specify WiFi Device path, Connection object name,
SSID and key management.
The SSID must match one of the previously created access points.
Please note that this does not set any global properties.
Returns the new object path.
### Response:
def AddWiFiConnection(self, dev_path, connection_name, ssid_name, key_mgmt):
'''Add an available connection to an existing WiFi device and access point.
You have to specify WiFi Device path, Connection object name,
SSID and key management.
The SSID must match one of the previously created access points.
Please note that this does not set any global properties.
Returns the new object path.
'''
dev_obj = dbusmock.get_object(dev_path)
connection_path = '/org/freedesktop/NetworkManager/Settings/' + connection_name
connections = dev_obj.Get(DEVICE_IFACE, 'AvailableConnections')
settings_obj = dbusmock.get_object(SETTINGS_OBJ)
main_connections = settings_obj.ListConnections()
ssid = ssid_name.encode('UTF-8')
# Find the access point by ssid
access_point = None
access_points = dev_obj.access_points
for ap_path in access_points:
ap = dbusmock.get_object(ap_path)
if ap.Get(ACCESS_POINT_IFACE, 'Ssid') == ssid:
access_point = ap
break
if not access_point:
raise dbus.exceptions.DBusException(
'Access point with SSID [%s] could not be found' % (ssid_name),
name=MANAGER_IFACE + '.DoesNotExist')
hw_address = access_point.Get(ACCESS_POINT_IFACE, 'HwAddress')
mode = access_point.Get(ACCESS_POINT_IFACE, 'Mode')
security = access_point.Get(ACCESS_POINT_IFACE, 'WpaFlags')
if connection_path in connections or connection_path in main_connections:
raise dbus.exceptions.DBusException(
'Connection %s on device %s already exists' % (connection_name, dev_path),
name=MANAGER_IFACE + '.AlreadyExists')
# Parse mac address string into byte array
mac_bytes = binascii.unhexlify(hw_address.replace(':', ''))
settings = {
'802-11-wireless': {
'seen-bssids': [hw_address],
'ssid': dbus.ByteArray(ssid),
'mac-address': dbus.ByteArray(mac_bytes),
'mode': InfrastructureMode.NAME_MAP[mode]
},
'connection': {
'timestamp': dbus.UInt64(1374828522),
'type': '802-11-wireless',
'id': ssid_name,
'uuid': str(uuid.uuid4())
},
}
if security != NM80211ApSecurityFlags.NM_802_11_AP_SEC_NONE:
settings['802-11-wireless']['security'] = '802-11-wireless-security'
settings['802-11-wireless-security'] = NM80211ApSecurityFlags.NAME_MAP[security]
self.AddObject(connection_path,
CSETTINGS_IFACE,
{
'Unsaved': False
},
[
('Delete', '', '', 'self.ConnectionDelete(self)'),
('GetSettings', '', 'a{sa{sv}}', 'ret = self.ConnectionGetSettings(self)'),
('GetSecrets', 's', 'a{sa{sv}}', 'ret = self.ConnectionGetSecrets(self, args[0])'),
('Update', 'a{sa{sv}}', '', 'self.ConnectionUpdate(self, args[0])'),
])
self.object_manager_emit_added(connection_path)
connection_obj = dbusmock.get_object(connection_path)
connection_obj.settings = settings
connection_obj.connection_path = connection_path
connection_obj.ConnectionDelete = ConnectionDelete
connection_obj.ConnectionGetSettings = ConnectionGetSettings
connection_obj.ConnectionGetSecrets = ConnectionGetSecrets
connection_obj.ConnectionUpdate = ConnectionUpdate
connections.append(dbus.ObjectPath(connection_path))
dev_obj.Set(DEVICE_IFACE, 'AvailableConnections', connections)
main_connections.append(connection_path)
settings_obj.Set(SETTINGS_IFACE, 'Connections', main_connections)
settings_obj.EmitSignal(SETTINGS_IFACE, 'NewConnection', 'o', [ap_path])
return connection_path |
def ndwi(self):
"""
Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values
"""
data = self._read(self[self._ndwi_bands,...]).astype(np.float32)
return (data[1,:,:] - data[0,:,:]) / (data[0,:,:] + data[1,:,:]) | Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values | Below is the the instruction that describes the task:
### Input:
Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values
### Response:
def ndwi(self):
"""
Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values
"""
data = self._read(self[self._ndwi_bands,...]).astype(np.float32)
return (data[1,:,:] - data[0,:,:]) / (data[0,:,:] + data[1,:,:]) |
def _partial_date_slice(self, resolution, parsed):
"""Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> import pandas as pd
>>> import xarray as xr
>>> da = xr.DataArray([1, 2],
coords=[[DatetimeNoLeap(2001, 1, 1),
DatetimeNoLeap(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) object 2001-01-01 00:00:00
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray ()>
array(1)
Coordinates:
time datetime64[ns] 2001-01-01
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) datetime64[ns] 2001-01-01T01:00:00
"""
start, end = _parsed_string_to_bounds(self.date_type, resolution,
parsed)
times = self._data
if self.is_monotonic:
if (len(times) and ((start < times[0] and end < times[0]) or
(start > times[-1] and end > times[-1]))):
# we are out of range
raise KeyError
# a monotonic (sorted) series can be sliced
left = times.searchsorted(start, side='left')
right = times.searchsorted(end, side='right')
return slice(left, right)
lhs_mask = times >= start
rhs_mask = times <= end
return np.flatnonzero(lhs_mask & rhs_mask) | Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> import pandas as pd
>>> import xarray as xr
>>> da = xr.DataArray([1, 2],
coords=[[DatetimeNoLeap(2001, 1, 1),
DatetimeNoLeap(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) object 2001-01-01 00:00:00
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray ()>
array(1)
Coordinates:
time datetime64[ns] 2001-01-01
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) datetime64[ns] 2001-01-01T01:00:00 | Below is the the instruction that describes the task:
### Input:
Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> import pandas as pd
>>> import xarray as xr
>>> da = xr.DataArray([1, 2],
coords=[[DatetimeNoLeap(2001, 1, 1),
DatetimeNoLeap(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) object 2001-01-01 00:00:00
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray ()>
array(1)
Coordinates:
time datetime64[ns] 2001-01-01
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) datetime64[ns] 2001-01-01T01:00:00
### Response:
def _partial_date_slice(self, resolution, parsed):
"""Adapted from
pandas.tseries.index.DatetimeIndex._partial_date_slice
Note that when using a CFTimeIndex, if a partial-date selection
returns a single element, it will never be converted to a scalar
coordinate; this is in slight contrast to the behavior when using
a DatetimeIndex, which sometimes will return a DataArray with a scalar
coordinate depending on the resolution of the datetimes used in
defining the index. For example:
>>> from cftime import DatetimeNoLeap
>>> import pandas as pd
>>> import xarray as xr
>>> da = xr.DataArray([1, 2],
coords=[[DatetimeNoLeap(2001, 1, 1),
DatetimeNoLeap(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) object 2001-01-01 00:00:00
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray ()>
array(1)
Coordinates:
time datetime64[ns] 2001-01-01
>>> da = xr.DataArray([1, 2],
coords=[[pd.Timestamp(2001, 1, 1, 1),
pd.Timestamp(2001, 2, 1)]],
dims=['time'])
>>> da.sel(time='2001-01-01')
<xarray.DataArray (time: 1)>
array([1])
Coordinates:
* time (time) datetime64[ns] 2001-01-01T01:00:00
"""
start, end = _parsed_string_to_bounds(self.date_type, resolution,
parsed)
times = self._data
if self.is_monotonic:
if (len(times) and ((start < times[0] and end < times[0]) or
(start > times[-1] and end > times[-1]))):
# we are out of range
raise KeyError
# a monotonic (sorted) series can be sliced
left = times.searchsorted(start, side='left')
right = times.searchsorted(end, side='right')
return slice(left, right)
lhs_mask = times >= start
rhs_mask = times <= end
return np.flatnonzero(lhs_mask & rhs_mask) |
def split_query(qs, keep_blank_values=False):
'''Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
'''
items = []
for pair in qs.split('&'):
name, delim, value = pair.partition('=')
if not delim and keep_blank_values:
value = None
if keep_blank_values or value:
items.append((name, value))
return items | Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values. | Below is the the instruction that describes the task:
### Input:
Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
### Response:
def split_query(qs, keep_blank_values=False):
'''Split the query string.
Note for empty values: If an equal sign (``=``) is present, the value
will be an empty string (``''``). Otherwise, the value will be ``None``::
>>> list(split_query('a=&b', keep_blank_values=True))
[('a', ''), ('b', None)]
No processing is done on the actual values.
'''
items = []
for pair in qs.split('&'):
name, delim, value = pair.partition('=')
if not delim and keep_blank_values:
value = None
if keep_blank_values or value:
items.append((name, value))
return items |
def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass | After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names | Below is the the instruction that describes the task:
### Input:
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
### Response:
def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass |
def manual_pal(values):
"""
Create a palette from a list of values
Parameters
----------
values : sequence
Values that will be returned by the palette function.
Returns
-------
out : function
A function palette that takes a single
:class:`int` parameter ``n`` and returns ``n`` values.
Examples
--------
>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])
>>> palette(3)
['a', 'b', 'c']
"""
max_n = len(values)
def _manual_pal(n):
if n > max_n:
msg = ("Palette can return a maximum of {} values. "
"{} were requested from it.")
warnings.warn(msg.format(max_n, n))
return values[:n]
return _manual_pal | Create a palette from a list of values
Parameters
----------
values : sequence
Values that will be returned by the palette function.
Returns
-------
out : function
A function palette that takes a single
:class:`int` parameter ``n`` and returns ``n`` values.
Examples
--------
>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])
>>> palette(3)
['a', 'b', 'c'] | Below is the the instruction that describes the task:
### Input:
Create a palette from a list of values
Parameters
----------
values : sequence
Values that will be returned by the palette function.
Returns
-------
out : function
A function palette that takes a single
:class:`int` parameter ``n`` and returns ``n`` values.
Examples
--------
>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])
>>> palette(3)
['a', 'b', 'c']
### Response:
def manual_pal(values):
"""
Create a palette from a list of values
Parameters
----------
values : sequence
Values that will be returned by the palette function.
Returns
-------
out : function
A function palette that takes a single
:class:`int` parameter ``n`` and returns ``n`` values.
Examples
--------
>>> palette = manual_pal(['a', 'b', 'c', 'd', 'e'])
>>> palette(3)
['a', 'b', 'c']
"""
max_n = len(values)
def _manual_pal(n):
if n > max_n:
msg = ("Palette can return a maximum of {} values. "
"{} were requested from it.")
warnings.warn(msg.format(max_n, n))
return values[:n]
return _manual_pal |
def _pp_str(self):
"""
Return the pretty-printed IRSB.
:rtype: str
"""
sa = []
sa.append("IRSB {")
if self.statements is not None:
sa.append(" %s" % self.tyenv)
sa.append("")
if self.statements is not None:
for i, s in enumerate(self.statements):
if isinstance(s, stmt.Put):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.WrTmp) and isinstance(s.data, expr.Get):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.data.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.Exit):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offsIP, self.arch.bits // 8))
else:
stmt_str = s.__str__()
sa.append(" %02d | %s" % (i, stmt_str))
else:
sa.append(" Statements are omitted.")
sa.append(
" NEXT: PUT(%s) = %s; %s" % (self.arch.translate_register_name(self.offsIP), self.next, self.jumpkind))
sa.append("}")
return '\n'.join(sa) | Return the pretty-printed IRSB.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Return the pretty-printed IRSB.
:rtype: str
### Response:
def _pp_str(self):
"""
Return the pretty-printed IRSB.
:rtype: str
"""
sa = []
sa.append("IRSB {")
if self.statements is not None:
sa.append(" %s" % self.tyenv)
sa.append("")
if self.statements is not None:
for i, s in enumerate(self.statements):
if isinstance(s, stmt.Put):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.WrTmp) and isinstance(s.data, expr.Get):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.data.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.Exit):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offsIP, self.arch.bits // 8))
else:
stmt_str = s.__str__()
sa.append(" %02d | %s" % (i, stmt_str))
else:
sa.append(" Statements are omitted.")
sa.append(
" NEXT: PUT(%s) = %s; %s" % (self.arch.translate_register_name(self.offsIP), self.next, self.jumpkind))
sa.append("}")
return '\n'.join(sa) |
def _get_args_for_reloading():
"""Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading. This also contains
a workaround for linux where the file is executable (possibly with
a program other than python)
"""
rv = [sys.executable]
py_script = os.path.abspath(sys.argv[0])
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
if __main__.__package__ is None:
# Executed a file, like "python app.py".
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"):
py_script += ".exe"
if (
os.path.splitext(rv[0])[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
elif os.path.isfile(py_script) and os.access(py_script, os.X_OK):
# The file is marked as executable. Nix adds a wrapper that
# shouldn't be called with the Python executable.
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
py_module = __main__.__package__
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += "." + name
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv | Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading. This also contains
a workaround for linux where the file is executable (possibly with
a program other than python) | Below is the the instruction that describes the task:
### Input:
Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading. This also contains
a workaround for linux where the file is executable (possibly with
a program other than python)
### Response:
def _get_args_for_reloading():
"""Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading. This also contains
a workaround for linux where the file is executable (possibly with
a program other than python)
"""
rv = [sys.executable]
py_script = os.path.abspath(sys.argv[0])
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
if __main__.__package__ is None:
# Executed a file, like "python app.py".
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"):
py_script += ".exe"
if (
os.path.splitext(rv[0])[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
elif os.path.isfile(py_script) and os.access(py_script, os.X_OK):
# The file is marked as executable. Nix adds a wrapper that
# shouldn't be called with the Python executable.
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
py_module = __main__.__package__
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += "." + name
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv |
def extract_tokens(representation, separators=SEPARATOR_CHARACTERS):
"""Extracts durations tokens from a duration representation.
Parses the string representation incrementaly and raises
on first error met.
:param representation: duration representation
:type representation: string
"""
buff = ""
elements = []
last_index = 0
last_token = None
for index, c in enumerate(representation):
# if separator character is found, push
# the content of the buffer in the elements list
if c in separators:
if buff:
# If the last found token is invalid,
# raise and InvalidTokenError
if not valid_token(buff):
raise InvalidTokenError(
"Duration representation {0} contains "
"an invalid token: {1}".format(representation, buff)
)
# If buffer content is a separator word, for example
# "and", just ignore it
if not buff.strip() in SEPARATOR_TOKENS:
elements.append(buff)
# Anyway, reset buffer and last token marker
# to their zero value
buff = ""
last_token = None
else:
token = compute_char_token(c)
if (token is not None and last_token is not None and token != last_token):
elements.append(buff)
buff = c
else:
buff += c
last_token = token
# push the content left in representation
# in the elements list
elements.append(buff)
return list(zip(elements[::2], elements[1::2])) | Extracts durations tokens from a duration representation.
Parses the string representation incrementaly and raises
on first error met.
:param representation: duration representation
:type representation: string | Below is the the instruction that describes the task:
### Input:
Extracts durations tokens from a duration representation.
Parses the string representation incrementaly and raises
on first error met.
:param representation: duration representation
:type representation: string
### Response:
def extract_tokens(representation, separators=SEPARATOR_CHARACTERS):
"""Extracts durations tokens from a duration representation.
Parses the string representation incrementaly and raises
on first error met.
:param representation: duration representation
:type representation: string
"""
buff = ""
elements = []
last_index = 0
last_token = None
for index, c in enumerate(representation):
# if separator character is found, push
# the content of the buffer in the elements list
if c in separators:
if buff:
# If the last found token is invalid,
# raise and InvalidTokenError
if not valid_token(buff):
raise InvalidTokenError(
"Duration representation {0} contains "
"an invalid token: {1}".format(representation, buff)
)
# If buffer content is a separator word, for example
# "and", just ignore it
if not buff.strip() in SEPARATOR_TOKENS:
elements.append(buff)
# Anyway, reset buffer and last token marker
# to their zero value
buff = ""
last_token = None
else:
token = compute_char_token(c)
if (token is not None and last_token is not None and token != last_token):
elements.append(buff)
buff = c
else:
buff += c
last_token = token
# push the content left in representation
# in the elements list
elements.append(buff)
return list(zip(elements[::2], elements[1::2])) |
def taskfileinfo_element_data(tfi, role):
"""Return the data for the element (e.g. the Asset or Shot)
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None
"""
task = tfi.task
element = task.element
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return element.name | Return the data for the element (e.g. the Asset or Shot)
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None | Below is the the instruction that describes the task:
### Input:
Return the data for the element (e.g. the Asset or Shot)
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None
### Response:
def taskfileinfo_element_data(tfi, role):
"""Return the data for the element (e.g. the Asset or Shot)
:param tfi: the :class:`jukeboxcore.filesys.TaskFileInfo` holds the data
:type tfi: :class:`jukeboxcore.filesys.TaskFileInfo`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the element
:rtype: depending on role
:raises: None
"""
task = tfi.task
element = task.element
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
return element.name |
def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state | Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object. | Below is the the instruction that describes the task:
### Input:
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
### Response:
def create_state(self, value: dict = None, *, namespace: str = None):
"""
Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
:param value:
If provided, call ``state.update(value)``.
:param namespace:
Use this as the namespace for the :py:class:`State` object,
instead of this :py:class:`Context`\ 's namespace.
:return:
A :py:class:`State` object.
"""
if namespace is None:
namespace = self.namespace
state = State(self.server_address, namespace=namespace)
if value is not None:
state.update(value)
return state |
def get_exports(client, bucket, prefix, latest=True):
"""Find exports for a given account
"""
keys = client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', [])
found = []
years = []
for y in keys:
part = y['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
year = int(part)
years.append(year)
if not years:
return []
years.sort(reverse=True)
if latest:
years = [years[0]]
for y in years:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/" % (prefix.strip('/'), y),
Delimiter='/').get('CommonPrefixes', [])
months = []
for m in keys:
part = m['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
month = int(part)
date_key = (y, month)
months.append(month)
months.sort(reverse=True)
if not months:
continue
if latest:
months = [months[0]]
for m in months:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/%s/" % (
prefix.strip('/'), y, ('%d' % m).rjust(2, '0')),
Delimiter='/').get('CommonPrefixes', [])
for d in keys:
part = d['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
day = int(part)
date_key = (y, m, day)
found.append(date_key)
found.sort(reverse=True)
if latest:
found = [found[0]]
return found | Find exports for a given account | Below is the the instruction that describes the task:
### Input:
Find exports for a given account
### Response:
def get_exports(client, bucket, prefix, latest=True):
"""Find exports for a given account
"""
keys = client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter='/').get('CommonPrefixes', [])
found = []
years = []
for y in keys:
part = y['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
year = int(part)
years.append(year)
if not years:
return []
years.sort(reverse=True)
if latest:
years = [years[0]]
for y in years:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/" % (prefix.strip('/'), y),
Delimiter='/').get('CommonPrefixes', [])
months = []
for m in keys:
part = m['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
month = int(part)
date_key = (y, month)
months.append(month)
months.sort(reverse=True)
if not months:
continue
if latest:
months = [months[0]]
for m in months:
keys = client.list_objects_v2(
Bucket=bucket, Prefix="%s/%d/%s/" % (
prefix.strip('/'), y, ('%d' % m).rjust(2, '0')),
Delimiter='/').get('CommonPrefixes', [])
for d in keys:
part = d['Prefix'].rsplit('/', 2)[-2]
if not part.isdigit():
continue
day = int(part)
date_key = (y, m, day)
found.append(date_key)
found.sort(reverse=True)
if latest:
found = [found[0]]
return found |
def assume_role(role_name, credentials, role_arn, role_session_name, silent = False):
"""
Assume role and save credentials
:param role_name:
:param credentials:
:param role_arn:
:param role_session_name:
:param silent:
:return:
"""
external_id = credentials.pop('ExternalId') if 'ExternalId' in credentials else None
# Connect to STS
sts_client = connect_service('sts', credentials, silent = silent)
# Set required arguments for assume role call
sts_args = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name
}
# MFA used ?
if 'mfa_serial' in credentials and 'mfa_code' in credentials:
sts_args['TokenCode'] = credentials['mfa_code']
sts_args['SerialNumber'] = credentials['mfa_serial']
# External ID used ?
if external_id:
sts_args['ExternalId'] = external_id
# Assume the role
sts_response = sts_client.assume_role(**sts_args)
credentials = sts_response['Credentials']
cached_credentials_filename = get_cached_credentials_filename(role_name, role_arn)
#with open(cached_credentials_filename, 'wt+') as f:
# write_data_to_file(f, sts_response, True, False)
cached_credentials_path = os.path.dirname(cached_credentials_filename)
if not os.path.isdir(cached_credentials_path):
os.makedirs(cached_credentials_path)
save_blob_as_json(cached_credentials_filename, sts_response, True, False) # blob, force_write, debug):
return credentials | Assume role and save credentials
:param role_name:
:param credentials:
:param role_arn:
:param role_session_name:
:param silent:
:return: | Below is the the instruction that describes the task:
### Input:
Assume role and save credentials
:param role_name:
:param credentials:
:param role_arn:
:param role_session_name:
:param silent:
:return:
### Response:
def assume_role(role_name, credentials, role_arn, role_session_name, silent = False):
"""
Assume role and save credentials
:param role_name:
:param credentials:
:param role_arn:
:param role_session_name:
:param silent:
:return:
"""
external_id = credentials.pop('ExternalId') if 'ExternalId' in credentials else None
# Connect to STS
sts_client = connect_service('sts', credentials, silent = silent)
# Set required arguments for assume role call
sts_args = {
'RoleArn': role_arn,
'RoleSessionName': role_session_name
}
# MFA used ?
if 'mfa_serial' in credentials and 'mfa_code' in credentials:
sts_args['TokenCode'] = credentials['mfa_code']
sts_args['SerialNumber'] = credentials['mfa_serial']
# External ID used ?
if external_id:
sts_args['ExternalId'] = external_id
# Assume the role
sts_response = sts_client.assume_role(**sts_args)
credentials = sts_response['Credentials']
cached_credentials_filename = get_cached_credentials_filename(role_name, role_arn)
#with open(cached_credentials_filename, 'wt+') as f:
# write_data_to_file(f, sts_response, True, False)
cached_credentials_path = os.path.dirname(cached_credentials_filename)
if not os.path.isdir(cached_credentials_path):
os.makedirs(cached_credentials_path)
save_blob_as_json(cached_credentials_filename, sts_response, True, False) # blob, force_write, debug):
return credentials |
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
ui.status("no modified go files\n")
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return | apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns. | Below is the the instruction that describes the task:
### Input:
apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
### Response:
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
ui.status("no modified go files\n")
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return |
def discard(self, key):
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
"""
if key in self:
i = self.map[key]
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1 | Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item. | Below is the the instruction that describes the task:
### Input:
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
### Response:
def discard(self, key):
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
"""
if key in self:
i = self.map[key]
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1 |
def mutate_rows(
self,
table_name,
entries,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "mutate_rows" not in self._inner_api_calls:
self._inner_api_calls[
"mutate_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_rows,
default_retry=self._method_configs["MutateRows"].retry,
default_timeout=self._method_configs["MutateRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.MutateRowsRequest(
table_name=table_name, entries=entries, app_profile_id=app_profile_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["mutate_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def mutate_rows(
self,
table_name,
entries,
app_profile_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Mutates multiple rows in a batch. Each individual row is mutated
atomically as in MutateRow, but the entire batch is not executed
atomically.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> for element in client.mutate_rows(table_name, entries):
... # process element
... pass
Args:
table_name (str): The unique name of the table to which the mutations should be applied.
entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk.
Each entry is applied as an atomic mutation, but the entries may be
applied in arbitrary order (even between entries for the same row).
At least one entry must be specified, and in total the entries can
contain at most 100000 mutations.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.Entry`
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "mutate_rows" not in self._inner_api_calls:
self._inner_api_calls[
"mutate_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.mutate_rows,
default_retry=self._method_configs["MutateRows"].retry,
default_timeout=self._method_configs["MutateRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.MutateRowsRequest(
table_name=table_name, entries=entries, app_profile_id=app_profile_id
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["mutate_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def sweep_channels_slow(self, sampling_window_ms, n_sampling_windows,
delay_between_windows_ms, interleave_samples,
use_rms, channel_mask):
'''
Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
For each channel in the channel mask. The measured voltage _(i.e.,
``V2``)_ can be used to compute the impedance of the measured load, the
input voltage _(i.e., ``V1``)_, etc.
**N.B.,** Use one firmware call per channel, as opposed to scanning all
channels with a single firmware call as in :meth:`sweep_channels`
method.
Returns
-------
pandas.DataFrame
Table containing one actuation RMS measurement and one device load
impedance measurement per row and the columns ``frequency``,
``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and
``impedance``.
Rows are indexed by time since first measurement in frame.
'''
channel_count = len(channel_mask)
scan_count = sum(channel_mask)
frames = []
print ''
scan_count_i = 0
# Iterate through channel mask, measuring impedance for each selected
# channel in the mask.
for channel_i, state_i in enumerate(channel_mask):
if state_i:
scan_count_i += 1
print '\rMeasure impedance: {} ({}/{})'.format(channel_i,
scan_count_i,
scan_count),
channel_states_i = [0] * channel_count
channel_states_i[channel_i] = 1
start_time_i = datetime.utcnow()
feedback_results_i = \
self.measure_impedance(sampling_window_ms,
n_sampling_windows,
delay_between_windows_ms,
interleave_samples, use_rms,
channel_states_i)
# Convert custom feedback results object into a
# `pandas.DataFrame`.
df_result_i =\
feedback_results_to_impedance_frame(feedback_results_i)
df_result_i.insert(2, 'channel_i', channel_i)
df_result_i.insert(0, 'utc_start', start_time_i)
frames.append(df_result_i)
print ''
if not frames:
df_result = pd.DataFrame(None, columns=['utc_start', 'seconds',
'channel_i', 'frequency',
'V_actuation',
'capacitance',
'impedance'])
else:
df_result = pd.concat(frames)
return df_result | Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
For each channel in the channel mask. The measured voltage _(i.e.,
``V2``)_ can be used to compute the impedance of the measured load, the
input voltage _(i.e., ``V1``)_, etc.
**N.B.,** Use one firmware call per channel, as opposed to scanning all
channels with a single firmware call as in :meth:`sweep_channels`
method.
Returns
-------
pandas.DataFrame
Table containing one actuation RMS measurement and one device load
impedance measurement per row and the columns ``frequency``,
``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and
``impedance``.
Rows are indexed by time since first measurement in frame. | Below is the the instruction that describes the task:
### Input:
Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
For each channel in the channel mask. The measured voltage _(i.e.,
``V2``)_ can be used to compute the impedance of the measured load, the
input voltage _(i.e., ``V1``)_, etc.
**N.B.,** Use one firmware call per channel, as opposed to scanning all
channels with a single firmware call as in :meth:`sweep_channels`
method.
Returns
-------
pandas.DataFrame
Table containing one actuation RMS measurement and one device load
impedance measurement per row and the columns ``frequency``,
``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and
``impedance``.
Rows are indexed by time since first measurement in frame.
### Response:
def sweep_channels_slow(self, sampling_window_ms, n_sampling_windows,
delay_between_windows_ms, interleave_samples,
use_rms, channel_mask):
'''
Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
For each channel in the channel mask. The measured voltage _(i.e.,
``V2``)_ can be used to compute the impedance of the measured load, the
input voltage _(i.e., ``V1``)_, etc.
**N.B.,** Use one firmware call per channel, as opposed to scanning all
channels with a single firmware call as in :meth:`sweep_channels`
method.
Returns
-------
pandas.DataFrame
Table containing one actuation RMS measurement and one device load
impedance measurement per row and the columns ``frequency``,
``voltage``, ``channel_i``, ``V_actuation``, ``capacitance``, and
``impedance``.
Rows are indexed by time since first measurement in frame.
'''
channel_count = len(channel_mask)
scan_count = sum(channel_mask)
frames = []
print ''
scan_count_i = 0
# Iterate through channel mask, measuring impedance for each selected
# channel in the mask.
for channel_i, state_i in enumerate(channel_mask):
if state_i:
scan_count_i += 1
print '\rMeasure impedance: {} ({}/{})'.format(channel_i,
scan_count_i,
scan_count),
channel_states_i = [0] * channel_count
channel_states_i[channel_i] = 1
start_time_i = datetime.utcnow()
feedback_results_i = \
self.measure_impedance(sampling_window_ms,
n_sampling_windows,
delay_between_windows_ms,
interleave_samples, use_rms,
channel_states_i)
# Convert custom feedback results object into a
# `pandas.DataFrame`.
df_result_i =\
feedback_results_to_impedance_frame(feedback_results_i)
df_result_i.insert(2, 'channel_i', channel_i)
df_result_i.insert(0, 'utc_start', start_time_i)
frames.append(df_result_i)
print ''
if not frames:
df_result = pd.DataFrame(None, columns=['utc_start', 'seconds',
'channel_i', 'frequency',
'V_actuation',
'capacitance',
'impedance'])
else:
df_result = pd.concat(frames)
return df_result |
def nps_survey_responses(self, survey_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/nps-api/responses#list-responses"
api_path = "/api/v2/nps/surveys/{survey_id}/responses.json"
api_path = api_path.format(survey_id=survey_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/nps-api/responses#list-responses | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/nps-api/responses#list-responses
### Response:
def nps_survey_responses(self, survey_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/nps-api/responses#list-responses"
api_path = "/api/v2/nps/surveys/{survey_id}/responses.json"
api_path = api_path.format(survey_id=survey_id)
return self.call(api_path, **kwargs) |
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
configuration.conf.get('core', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name | Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results | Below is the the instruction that describes the task:
### Input:
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
### Response:
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
configuration.conf.get('core', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name |
def _parse_path(self, path):
"""Parse ``path`` into segments.
Paths must start with a WORD (i.e., a top level Django setting
name). Path segments are separated by dots. Compound path
segments (i.e., a name with a dot in it) can be grouped inside
parentheses.
Examples::
>>> settings = Settings()
>>> settings._parse_path('WORD')
['WORD']
>>> settings._parse_path('WORD.x')
['WORD', 'x']
>>> settings._parse_path('WORD.(x)')
['WORD', 'x']
>>> settings._parse_path('WORD.(x.y)')
['WORD', 'x.y']
>>> settings._parse_path('WORD.(x.y).z')
['WORD', 'x.y', 'z']
>>> settings._parse_path('WORD.0.z')
['WORD', 0, 'z']
>>> settings._parse_path('WORD.(0).z')
['WORD', '0', 'z']
>>> settings._parse_path('WORD.(0)X.z')
['WORD', '0X', 'z']
An example of where compound names are actually useful is in
logger settings::
LOGGING.loggers.(package.module).handlers = ["console"]
LOGGING.loggers.(package.module).level = "DEBUG"
Paths may also contain interpolation groups. Dotted names in
these groups will not be split (so there's no need to group them
inside parentheses)::
>>> settings = Settings()
>>> settings._parse_path('WORD.{{x}}')
['WORD', '{{x}}']
>>> settings._parse_path('WORD.{{x.y}}')
['WORD', '{{x.y}}']
>>> settings._parse_path('WORD.{{x.y.z}}XYZ')
['WORD', '{{x.y.z}}XYZ']
Interpolation groups *can* be wrapped in parentheses, but doing
so is redundant::
>>> settings._parse_path('WORD.({{x.y.z}}XYZ)')
['WORD', '{{x.y.z}}XYZ']
Any segment that A) looks like an int and B) does *not* contain
a (...) or {{...}} group will be converted to an int. Segments
that start with a leading "0" followed by other digits will not
be converted.
"""
if not path:
raise ValueError('path cannot be empty')
segments = []
path_iter = zip(iter(path), chain(path[1:], (None,)))
if six.PY2:
# zip() returns a list on Python 2
path_iter = iter(path_iter)
convert_name = self._convert_name
current_segment = []
current_segment_contains_group = False
def append_segment():
segment = ''.join(current_segment)
if not current_segment_contains_group:
segment = convert_name(segment)
segments.append(segment)
del current_segment[:]
for c, d in path_iter:
if c == '.':
append_segment()
current_segment_contains_group = False
elif c == '(':
nested = 0
for c, d in path_iter:
current_segment.append(c)
if c == '(':
nested += 1
elif c == ')':
if nested:
nested -= 1
else:
current_segment.pop() # Remove the closing paren
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed (...) in %s' % path)
elif c == '{' and d == '{':
current_segment_contains_group = True
current_segment.append(c)
for c, d in path_iter:
current_segment.append(c)
if c == '}' and d == '}':
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed {{...}} in %s' % path)
else:
current_segment.append(c)
if current_segment:
append_segment()
return segments | Parse ``path`` into segments.
Paths must start with a WORD (i.e., a top level Django setting
name). Path segments are separated by dots. Compound path
segments (i.e., a name with a dot in it) can be grouped inside
parentheses.
Examples::
>>> settings = Settings()
>>> settings._parse_path('WORD')
['WORD']
>>> settings._parse_path('WORD.x')
['WORD', 'x']
>>> settings._parse_path('WORD.(x)')
['WORD', 'x']
>>> settings._parse_path('WORD.(x.y)')
['WORD', 'x.y']
>>> settings._parse_path('WORD.(x.y).z')
['WORD', 'x.y', 'z']
>>> settings._parse_path('WORD.0.z')
['WORD', 0, 'z']
>>> settings._parse_path('WORD.(0).z')
['WORD', '0', 'z']
>>> settings._parse_path('WORD.(0)X.z')
['WORD', '0X', 'z']
An example of where compound names are actually useful is in
logger settings::
LOGGING.loggers.(package.module).handlers = ["console"]
LOGGING.loggers.(package.module).level = "DEBUG"
Paths may also contain interpolation groups. Dotted names in
these groups will not be split (so there's no need to group them
inside parentheses)::
>>> settings = Settings()
>>> settings._parse_path('WORD.{{x}}')
['WORD', '{{x}}']
>>> settings._parse_path('WORD.{{x.y}}')
['WORD', '{{x.y}}']
>>> settings._parse_path('WORD.{{x.y.z}}XYZ')
['WORD', '{{x.y.z}}XYZ']
Interpolation groups *can* be wrapped in parentheses, but doing
so is redundant::
>>> settings._parse_path('WORD.({{x.y.z}}XYZ)')
['WORD', '{{x.y.z}}XYZ']
Any segment that A) looks like an int and B) does *not* contain
a (...) or {{...}} group will be converted to an int. Segments
that start with a leading "0" followed by other digits will not
be converted. | Below is the the instruction that describes the task:
### Input:
Parse ``path`` into segments.
Paths must start with a WORD (i.e., a top level Django setting
name). Path segments are separated by dots. Compound path
segments (i.e., a name with a dot in it) can be grouped inside
parentheses.
Examples::
>>> settings = Settings()
>>> settings._parse_path('WORD')
['WORD']
>>> settings._parse_path('WORD.x')
['WORD', 'x']
>>> settings._parse_path('WORD.(x)')
['WORD', 'x']
>>> settings._parse_path('WORD.(x.y)')
['WORD', 'x.y']
>>> settings._parse_path('WORD.(x.y).z')
['WORD', 'x.y', 'z']
>>> settings._parse_path('WORD.0.z')
['WORD', 0, 'z']
>>> settings._parse_path('WORD.(0).z')
['WORD', '0', 'z']
>>> settings._parse_path('WORD.(0)X.z')
['WORD', '0X', 'z']
An example of where compound names are actually useful is in
logger settings::
LOGGING.loggers.(package.module).handlers = ["console"]
LOGGING.loggers.(package.module).level = "DEBUG"
Paths may also contain interpolation groups. Dotted names in
these groups will not be split (so there's no need to group them
inside parentheses)::
>>> settings = Settings()
>>> settings._parse_path('WORD.{{x}}')
['WORD', '{{x}}']
>>> settings._parse_path('WORD.{{x.y}}')
['WORD', '{{x.y}}']
>>> settings._parse_path('WORD.{{x.y.z}}XYZ')
['WORD', '{{x.y.z}}XYZ']
Interpolation groups *can* be wrapped in parentheses, but doing
so is redundant::
>>> settings._parse_path('WORD.({{x.y.z}}XYZ)')
['WORD', '{{x.y.z}}XYZ']
Any segment that A) looks like an int and B) does *not* contain
a (...) or {{...}} group will be converted to an int. Segments
that start with a leading "0" followed by other digits will not
be converted.
### Response:
def _parse_path(self, path):
"""Parse ``path`` into segments.
Paths must start with a WORD (i.e., a top level Django setting
name). Path segments are separated by dots. Compound path
segments (i.e., a name with a dot in it) can be grouped inside
parentheses.
Examples::
>>> settings = Settings()
>>> settings._parse_path('WORD')
['WORD']
>>> settings._parse_path('WORD.x')
['WORD', 'x']
>>> settings._parse_path('WORD.(x)')
['WORD', 'x']
>>> settings._parse_path('WORD.(x.y)')
['WORD', 'x.y']
>>> settings._parse_path('WORD.(x.y).z')
['WORD', 'x.y', 'z']
>>> settings._parse_path('WORD.0.z')
['WORD', 0, 'z']
>>> settings._parse_path('WORD.(0).z')
['WORD', '0', 'z']
>>> settings._parse_path('WORD.(0)X.z')
['WORD', '0X', 'z']
An example of where compound names are actually useful is in
logger settings::
LOGGING.loggers.(package.module).handlers = ["console"]
LOGGING.loggers.(package.module).level = "DEBUG"
Paths may also contain interpolation groups. Dotted names in
these groups will not be split (so there's no need to group them
inside parentheses)::
>>> settings = Settings()
>>> settings._parse_path('WORD.{{x}}')
['WORD', '{{x}}']
>>> settings._parse_path('WORD.{{x.y}}')
['WORD', '{{x.y}}']
>>> settings._parse_path('WORD.{{x.y.z}}XYZ')
['WORD', '{{x.y.z}}XYZ']
Interpolation groups *can* be wrapped in parentheses, but doing
so is redundant::
>>> settings._parse_path('WORD.({{x.y.z}}XYZ)')
['WORD', '{{x.y.z}}XYZ']
Any segment that A) looks like an int and B) does *not* contain
a (...) or {{...}} group will be converted to an int. Segments
that start with a leading "0" followed by other digits will not
be converted.
"""
if not path:
raise ValueError('path cannot be empty')
segments = []
path_iter = zip(iter(path), chain(path[1:], (None,)))
if six.PY2:
# zip() returns a list on Python 2
path_iter = iter(path_iter)
convert_name = self._convert_name
current_segment = []
current_segment_contains_group = False
def append_segment():
segment = ''.join(current_segment)
if not current_segment_contains_group:
segment = convert_name(segment)
segments.append(segment)
del current_segment[:]
for c, d in path_iter:
if c == '.':
append_segment()
current_segment_contains_group = False
elif c == '(':
nested = 0
for c, d in path_iter:
current_segment.append(c)
if c == '(':
nested += 1
elif c == ')':
if nested:
nested -= 1
else:
current_segment.pop() # Remove the closing paren
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed (...) in %s' % path)
elif c == '{' and d == '{':
current_segment_contains_group = True
current_segment.append(c)
for c, d in path_iter:
current_segment.append(c)
if c == '}' and d == '}':
current_segment_contains_group = True
break
else:
raise ValueError('Unclosed {{...}} in %s' % path)
else:
current_segment.append(c)
if current_segment:
append_segment()
return segments |
def is_colliding(self, other):
"""Check to see if two circles are colliding."""
if isinstance(other, BoundingCircle):
#Calculate the distance between two circles.
distance = Vector2.distance(self.coords, other.coords)
#Check to see if the sum of thier radi are greater than or equal to the distance.
radi_sum = self.radius + other.radius
if distance <= radi_sum:
#There has been a collision
## print "Distance: ", distance, "\nRadi Sum: ", radi_sum
## print "Self Coords: ", self.coords, "\nOther Coords: ", other.coords
return True
#No collision.
return False | Check to see if two circles are colliding. | Below is the the instruction that describes the task:
### Input:
Check to see if two circles are colliding.
### Response:
def is_colliding(self, other):
"""Check to see if two circles are colliding."""
if isinstance(other, BoundingCircle):
#Calculate the distance between two circles.
distance = Vector2.distance(self.coords, other.coords)
#Check to see if the sum of thier radi are greater than or equal to the distance.
radi_sum = self.radius + other.radius
if distance <= radi_sum:
#There has been a collision
## print "Distance: ", distance, "\nRadi Sum: ", radi_sum
## print "Self Coords: ", self.coords, "\nOther Coords: ", other.coords
return True
#No collision.
return False |
def apply_tag_sets(tag_sets, selection):
"""All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference.
"""
for tag_set in tag_sets:
with_tag_set = apply_single_tag_set(tag_set, selection)
if with_tag_set:
return with_tag_set
return selection.with_server_descriptions([]) | All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference. | Below is the the instruction that describes the task:
### Input:
All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference.
### Response:
def apply_tag_sets(tag_sets, selection):
"""All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference.
"""
for tag_set in tag_sets:
with_tag_set = apply_single_tag_set(tag_set, selection)
if with_tag_set:
return with_tag_set
return selection.with_server_descriptions([]) |
def remove_from_list(self, key: str, value, count: int = 0,
pipeline: bool = False):
"""Remove specified value(s) from the list stored at key.
Args:
key (str): Key where the list is stored.
value: value to remove
count (int): Number of entries to remove, default 0 == all
pipeline(bool): If True, start a transaction block. Default False.
"""
if pipeline:
if redis.__version__ == '2.10.6':
self._pipeline.lrem(name=key, value=value, num=count)
else:
self._pipeline.lrem(key, count, value)
else:
if self._db.exists(key):
if redis.__version__ == '2.10.6':
self._db.lrem(name=key, value=value, num=count)
else:
self._db.lrem(key, count, value) | Remove specified value(s) from the list stored at key.
Args:
key (str): Key where the list is stored.
value: value to remove
count (int): Number of entries to remove, default 0 == all
pipeline(bool): If True, start a transaction block. Default False. | Below is the the instruction that describes the task:
### Input:
Remove specified value(s) from the list stored at key.
Args:
key (str): Key where the list is stored.
value: value to remove
count (int): Number of entries to remove, default 0 == all
pipeline(bool): If True, start a transaction block. Default False.
### Response:
def remove_from_list(self, key: str, value, count: int = 0,
pipeline: bool = False):
"""Remove specified value(s) from the list stored at key.
Args:
key (str): Key where the list is stored.
value: value to remove
count (int): Number of entries to remove, default 0 == all
pipeline(bool): If True, start a transaction block. Default False.
"""
if pipeline:
if redis.__version__ == '2.10.6':
self._pipeline.lrem(name=key, value=value, num=count)
else:
self._pipeline.lrem(key, count, value)
else:
if self._db.exists(key):
if redis.__version__ == '2.10.6':
self._db.lrem(name=key, value=value, num=count)
else:
self._db.lrem(key, count, value) |
def import_oauth2_credentials(filename=STORAGE_FILENAME):
"""Import OAuth 2.0 session credentials from storage file.
Parameters
filename (str)
Name of storage file.
Returns
credentials (dict)
All your app credentials and information
imported from the configuration file.
"""
with open(filename, 'r') as storage_file:
storage = safe_load(storage_file)
# depending on OAuth 2.0 grant_type, these values may not exist
client_secret = storage.get('client_secret')
refresh_token = storage.get('refresh_token')
credentials = {
'access_token': storage['access_token'],
'client_id': storage['client_id'],
'client_secret': client_secret,
'expires_in_seconds': storage['expires_in_seconds'],
'grant_type': storage['grant_type'],
'refresh_token': refresh_token,
'scopes': storage['scopes'],
}
return credentials | Import OAuth 2.0 session credentials from storage file.
Parameters
filename (str)
Name of storage file.
Returns
credentials (dict)
All your app credentials and information
imported from the configuration file. | Below is the the instruction that describes the task:
### Input:
Import OAuth 2.0 session credentials from storage file.
Parameters
filename (str)
Name of storage file.
Returns
credentials (dict)
All your app credentials and information
imported from the configuration file.
### Response:
def import_oauth2_credentials(filename=STORAGE_FILENAME):
"""Import OAuth 2.0 session credentials from storage file.
Parameters
filename (str)
Name of storage file.
Returns
credentials (dict)
All your app credentials and information
imported from the configuration file.
"""
with open(filename, 'r') as storage_file:
storage = safe_load(storage_file)
# depending on OAuth 2.0 grant_type, these values may not exist
client_secret = storage.get('client_secret')
refresh_token = storage.get('refresh_token')
credentials = {
'access_token': storage['access_token'],
'client_id': storage['client_id'],
'client_secret': client_secret,
'expires_in_seconds': storage['expires_in_seconds'],
'grant_type': storage['grant_type'],
'refresh_token': refresh_token,
'scopes': storage['scopes'],
}
return credentials |
def keypress(self, data):
"""
Press key. NOTE: keyrelease should be called
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
window = self._get_front_most_window()
except (IndexError,):
window = self._get_any_window()
key_press_action = KeyPressAction(window, data)
return 1 | Press key. NOTE: keyrelease should be called
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Press key. NOTE: keyrelease should be called
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
### Response:
def keypress(self, data):
"""
Press key. NOTE: keyrelease should be called
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
window = self._get_front_most_window()
except (IndexError,):
window = self._get_any_window()
key_press_action = KeyPressAction(window, data)
return 1 |
def get_port_channel_detail_input_last_aggregator_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
input = ET.SubElement(get_port_channel_detail, "input")
last_aggregator_id = ET.SubElement(input, "last-aggregator-id")
last_aggregator_id.text = kwargs.pop('last_aggregator_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_channel_detail_input_last_aggregator_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
input = ET.SubElement(get_port_channel_detail, "input")
last_aggregator_id = ET.SubElement(input, "last-aggregator-id")
last_aggregator_id.text = kwargs.pop('last_aggregator_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def DbGetAttributeAliasList(self, argin):
""" Get attribute alias list for a specified filter
:param argin: attribute alias filter string (eg: att*)
:type: tango.DevString
:return: attribute aliases
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetAttributeAliasList()")
if not argin:
argin = "%"
else:
argin = replace_wildcard(argin)
return self.db.get_attribute_alias_list(argin) | Get attribute alias list for a specified filter
:param argin: attribute alias filter string (eg: att*)
:type: tango.DevString
:return: attribute aliases
:rtype: tango.DevVarStringArray | Below is the the instruction that describes the task:
### Input:
Get attribute alias list for a specified filter
:param argin: attribute alias filter string (eg: att*)
:type: tango.DevString
:return: attribute aliases
:rtype: tango.DevVarStringArray
### Response:
def DbGetAttributeAliasList(self, argin):
""" Get attribute alias list for a specified filter
:param argin: attribute alias filter string (eg: att*)
:type: tango.DevString
:return: attribute aliases
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetAttributeAliasList()")
if not argin:
argin = "%"
else:
argin = replace_wildcard(argin)
return self.db.get_attribute_alias_list(argin) |
def update_webhook(self, webhook_url, webhook_id, events=None):
"""Register webhook (if it doesn't exit)."""
hooks = self._request(MINUT_WEBHOOKS_URL, request_type='GET')['hooks']
try:
self._webhook = next(
hook for hook in hooks if hook['url'] == webhook_url)
_LOGGER.debug("Webhook: %s", self._webhook)
except StopIteration: # Not found
if events is None:
events = [e for v in EVENTS.values() for e in v if e]
self._webhook = self._register_webhook(webhook_url, events)
_LOGGER.debug("Registered hook: %s", self._webhook)
return self._webhook | Register webhook (if it doesn't exit). | Below is the the instruction that describes the task:
### Input:
Register webhook (if it doesn't exit).
### Response:
def update_webhook(self, webhook_url, webhook_id, events=None):
"""Register webhook (if it doesn't exit)."""
hooks = self._request(MINUT_WEBHOOKS_URL, request_type='GET')['hooks']
try:
self._webhook = next(
hook for hook in hooks if hook['url'] == webhook_url)
_LOGGER.debug("Webhook: %s", self._webhook)
except StopIteration: # Not found
if events is None:
events = [e for v in EVENTS.values() for e in v if e]
self._webhook = self._register_webhook(webhook_url, events)
_LOGGER.debug("Registered hook: %s", self._webhook)
return self._webhook |
def execute_deploy_from_linked_clone(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_from_linked_clone(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result | Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return: | Below is the the instruction that describes the task:
### Input:
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
### Response:
def execute_deploy_from_linked_clone(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
"""
Calls the deployer to deploy vm from snapshot
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromLinkedClone
:param vcenter_data_model:
:return:
"""
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_from_linked_clone(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result |
def train(self, input_mode, input_config, role, job_name, output_config, # noqa: C901
resource_config, vpc_config, hyperparameters, stop_condition, tags, metric_definitions,
enable_network_isolation=False, image=None, algorithm_arn=None,
encrypt_inter_container_traffic=False):
"""Create an Amazon SageMaker training job.
Args:
input_mode (str): The input mode that the algorithm supports. Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to
a directory in the Docker container.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
input_config (list): A list of Channel objects. Each channel is a named input source. Please refer to
the format details described:
https://botocore.readthedocs.io/en/latest/reference/services/sagemaker.html#SageMaker.Client.create_training_job
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
job_name (str): Name of the training job being created.
output_config (dict): The S3 URI where you want to store the training results and optional KMS key ID.
resource_config (dict): Contains values for ResourceConfig:
* instance_count (int): Number of EC2 instances to use for training.
The key in resource_config is 'InstanceCount'.
* instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
The key in resource_config is 'InstanceType'.
vpc_config (dict): Contains values for VpcConfig:
* subnets (list[str]): List of subnet ids.
The key in vpc_config is 'Subnets'.
* security_group_ids (list[str]): List of security group ids.
The key in vpc_config is 'SecurityGroupIds'.
hyperparameters (dict): Hyperparameters for model training. The hyperparameters are made accessible as
a dict[str, str] to the training code on SageMaker. For convenience, this accepts other types for
keys and values, but ``str()`` will be called to convert them before training.
stop_condition (dict): Defines when training shall finish. Contains entries that can be understood by the
service like ``MaxRuntimeInSeconds``.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
metric_definitions (list[dict]): A list of dictionaries that defines the metric(s) used to evaluate the
training jobs. Each dictionary contains two keys: 'Name' for the name of the metric, and 'Regex' for
the regular expression used to extract the metric from the logs.
enable_network_isolation (bool): Whether to request for the training job to run with
network isolation or not.
image (str): Docker image containing training code.
algorithm_arn (str): Algorithm Arn from Marketplace.
encrypt_inter_container_traffic (bool): Specifies whether traffic between training containers is
encrypted for the training job (default: ``False``).
Returns:
str: ARN of the training job, if it is created.
"""
train_request = {
'AlgorithmSpecification': {
'TrainingInputMode': input_mode
},
'OutputDataConfig': output_config,
'TrainingJobName': job_name,
'StoppingCondition': stop_condition,
'ResourceConfig': resource_config,
'RoleArn': role,
}
if image and algorithm_arn:
raise ValueError('image and algorithm_arn are mutually exclusive.'
'Both were provided: image: %s algorithm_arn: %s' % (image, algorithm_arn))
if image is None and algorithm_arn is None:
raise ValueError('either image or algorithm_arn is required. None was provided.')
if image is not None:
train_request['AlgorithmSpecification']['TrainingImage'] = image
if algorithm_arn is not None:
train_request['AlgorithmSpecification']['AlgorithmName'] = algorithm_arn
if input_config is not None:
train_request['InputDataConfig'] = input_config
if metric_definitions is not None:
train_request['AlgorithmSpecification']['MetricDefinitions'] = metric_definitions
if hyperparameters and len(hyperparameters) > 0:
train_request['HyperParameters'] = hyperparameters
if tags is not None:
train_request['Tags'] = tags
if vpc_config is not None:
train_request['VpcConfig'] = vpc_config
if enable_network_isolation:
train_request['EnableNetworkIsolation'] = enable_network_isolation
if encrypt_inter_container_traffic:
train_request['EnableInterContainerTrafficEncryption'] = \
encrypt_inter_container_traffic
LOGGER.info('Creating training-job with name: {}'.format(job_name))
LOGGER.debug('train request: {}'.format(json.dumps(train_request, indent=4)))
self.sagemaker_client.create_training_job(**train_request) | Create an Amazon SageMaker training job.
Args:
input_mode (str): The input mode that the algorithm supports. Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to
a directory in the Docker container.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
input_config (list): A list of Channel objects. Each channel is a named input source. Please refer to
the format details described:
https://botocore.readthedocs.io/en/latest/reference/services/sagemaker.html#SageMaker.Client.create_training_job
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
job_name (str): Name of the training job being created.
output_config (dict): The S3 URI where you want to store the training results and optional KMS key ID.
resource_config (dict): Contains values for ResourceConfig:
* instance_count (int): Number of EC2 instances to use for training.
The key in resource_config is 'InstanceCount'.
* instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
The key in resource_config is 'InstanceType'.
vpc_config (dict): Contains values for VpcConfig:
* subnets (list[str]): List of subnet ids.
The key in vpc_config is 'Subnets'.
* security_group_ids (list[str]): List of security group ids.
The key in vpc_config is 'SecurityGroupIds'.
hyperparameters (dict): Hyperparameters for model training. The hyperparameters are made accessible as
a dict[str, str] to the training code on SageMaker. For convenience, this accepts other types for
keys and values, but ``str()`` will be called to convert them before training.
stop_condition (dict): Defines when training shall finish. Contains entries that can be understood by the
service like ``MaxRuntimeInSeconds``.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
metric_definitions (list[dict]): A list of dictionaries that defines the metric(s) used to evaluate the
training jobs. Each dictionary contains two keys: 'Name' for the name of the metric, and 'Regex' for
the regular expression used to extract the metric from the logs.
enable_network_isolation (bool): Whether to request for the training job to run with
network isolation or not.
image (str): Docker image containing training code.
algorithm_arn (str): Algorithm Arn from Marketplace.
encrypt_inter_container_traffic (bool): Specifies whether traffic between training containers is
encrypted for the training job (default: ``False``).
Returns:
str: ARN of the training job, if it is created. | Below is the the instruction that describes the task:
### Input:
Create an Amazon SageMaker training job.
Args:
input_mode (str): The input mode that the algorithm supports. Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to
a directory in the Docker container.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
input_config (list): A list of Channel objects. Each channel is a named input source. Please refer to
the format details described:
https://botocore.readthedocs.io/en/latest/reference/services/sagemaker.html#SageMaker.Client.create_training_job
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
job_name (str): Name of the training job being created.
output_config (dict): The S3 URI where you want to store the training results and optional KMS key ID.
resource_config (dict): Contains values for ResourceConfig:
* instance_count (int): Number of EC2 instances to use for training.
The key in resource_config is 'InstanceCount'.
* instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
The key in resource_config is 'InstanceType'.
vpc_config (dict): Contains values for VpcConfig:
* subnets (list[str]): List of subnet ids.
The key in vpc_config is 'Subnets'.
* security_group_ids (list[str]): List of security group ids.
The key in vpc_config is 'SecurityGroupIds'.
hyperparameters (dict): Hyperparameters for model training. The hyperparameters are made accessible as
a dict[str, str] to the training code on SageMaker. For convenience, this accepts other types for
keys and values, but ``str()`` will be called to convert them before training.
stop_condition (dict): Defines when training shall finish. Contains entries that can be understood by the
service like ``MaxRuntimeInSeconds``.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
metric_definitions (list[dict]): A list of dictionaries that defines the metric(s) used to evaluate the
training jobs. Each dictionary contains two keys: 'Name' for the name of the metric, and 'Regex' for
the regular expression used to extract the metric from the logs.
enable_network_isolation (bool): Whether to request for the training job to run with
network isolation or not.
image (str): Docker image containing training code.
algorithm_arn (str): Algorithm Arn from Marketplace.
encrypt_inter_container_traffic (bool): Specifies whether traffic between training containers is
encrypted for the training job (default: ``False``).
Returns:
str: ARN of the training job, if it is created.
### Response:
def train(self, input_mode, input_config, role, job_name, output_config, # noqa: C901
resource_config, vpc_config, hyperparameters, stop_condition, tags, metric_definitions,
enable_network_isolation=False, image=None, algorithm_arn=None,
encrypt_inter_container_traffic=False):
"""Create an Amazon SageMaker training job.
Args:
input_mode (str): The input mode that the algorithm supports. Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to
a directory in the Docker container.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via a Unix-named pipe.
input_config (list): A list of Channel objects. Each channel is a named input source. Please refer to
the format details described:
https://botocore.readthedocs.io/en/latest/reference/services/sagemaker.html#SageMaker.Client.create_training_job
role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs
that create Amazon SageMaker endpoints use this role to access training data and model artifacts.
You must grant sufficient permissions to this role.
job_name (str): Name of the training job being created.
output_config (dict): The S3 URI where you want to store the training results and optional KMS key ID.
resource_config (dict): Contains values for ResourceConfig:
* instance_count (int): Number of EC2 instances to use for training.
The key in resource_config is 'InstanceCount'.
* instance_type (str): Type of EC2 instance to use for training, for example, 'ml.c4.xlarge'.
The key in resource_config is 'InstanceType'.
vpc_config (dict): Contains values for VpcConfig:
* subnets (list[str]): List of subnet ids.
The key in vpc_config is 'Subnets'.
* security_group_ids (list[str]): List of security group ids.
The key in vpc_config is 'SecurityGroupIds'.
hyperparameters (dict): Hyperparameters for model training. The hyperparameters are made accessible as
a dict[str, str] to the training code on SageMaker. For convenience, this accepts other types for
keys and values, but ``str()`` will be called to convert them before training.
stop_condition (dict): Defines when training shall finish. Contains entries that can be understood by the
service like ``MaxRuntimeInSeconds``.
tags (list[dict]): List of tags for labeling a training job. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
metric_definitions (list[dict]): A list of dictionaries that defines the metric(s) used to evaluate the
training jobs. Each dictionary contains two keys: 'Name' for the name of the metric, and 'Regex' for
the regular expression used to extract the metric from the logs.
enable_network_isolation (bool): Whether to request for the training job to run with
network isolation or not.
image (str): Docker image containing training code.
algorithm_arn (str): Algorithm Arn from Marketplace.
encrypt_inter_container_traffic (bool): Specifies whether traffic between training containers is
encrypted for the training job (default: ``False``).
Returns:
str: ARN of the training job, if it is created.
"""
train_request = {
'AlgorithmSpecification': {
'TrainingInputMode': input_mode
},
'OutputDataConfig': output_config,
'TrainingJobName': job_name,
'StoppingCondition': stop_condition,
'ResourceConfig': resource_config,
'RoleArn': role,
}
if image and algorithm_arn:
raise ValueError('image and algorithm_arn are mutually exclusive.'
'Both were provided: image: %s algorithm_arn: %s' % (image, algorithm_arn))
if image is None and algorithm_arn is None:
raise ValueError('either image or algorithm_arn is required. None was provided.')
if image is not None:
train_request['AlgorithmSpecification']['TrainingImage'] = image
if algorithm_arn is not None:
train_request['AlgorithmSpecification']['AlgorithmName'] = algorithm_arn
if input_config is not None:
train_request['InputDataConfig'] = input_config
if metric_definitions is not None:
train_request['AlgorithmSpecification']['MetricDefinitions'] = metric_definitions
if hyperparameters and len(hyperparameters) > 0:
train_request['HyperParameters'] = hyperparameters
if tags is not None:
train_request['Tags'] = tags
if vpc_config is not None:
train_request['VpcConfig'] = vpc_config
if enable_network_isolation:
train_request['EnableNetworkIsolation'] = enable_network_isolation
if encrypt_inter_container_traffic:
train_request['EnableInterContainerTrafficEncryption'] = \
encrypt_inter_container_traffic
LOGGER.info('Creating training-job with name: {}'.format(job_name))
LOGGER.debug('train request: {}'.format(json.dumps(train_request, indent=4)))
self.sagemaker_client.create_training_job(**train_request) |
def sample(N, D):
"""Generate (N x D) numpy array of Sobol sequence samples"""
scale = 31
result = np.zeros([N, D])
if D > len(directions) + 1:
raise ValueError("Error in Sobol sequence: not enough dimensions")
L = int(math.ceil(math.log(N) / math.log(2)))
if L > scale:
raise ValueError("Error in Sobol sequence: not enough bits")
for i in range(D):
V = np.zeros(L + 1, dtype=long)
if i == 0:
for j in range(1, L + 1):
V[j] = 1 << (scale - j) # all m's = 1
else:
m = np.array(directions[i - 1], dtype=int)
a = m[0]
s = len(m) - 1
# The following code discards the first row of the ``m`` array
# Because it has floating point errors, e.g. values of 2.24e-314
if L <= s:
for j in range(1, L + 1):
V[j] = m[j] << (scale - j)
else:
for j in range(1, s + 1):
V[j] = m[j] << (scale - j)
for j in range(s + 1, L + 1):
V[j] = V[j - s] ^ (V[j - s] >> s)
for k in range(1, s):
V[j] ^= ((a >> (s - 1 - k)) & 1) * V[j - k]
X = long(0)
for j in range(1, N):
X ^= V[index_of_least_significant_zero_bit(j - 1)]
result[j][i] = float(X / math.pow(2, scale))
return result | Generate (N x D) numpy array of Sobol sequence samples | Below is the the instruction that describes the task:
### Input:
Generate (N x D) numpy array of Sobol sequence samples
### Response:
def sample(N, D):
"""Generate (N x D) numpy array of Sobol sequence samples"""
scale = 31
result = np.zeros([N, D])
if D > len(directions) + 1:
raise ValueError("Error in Sobol sequence: not enough dimensions")
L = int(math.ceil(math.log(N) / math.log(2)))
if L > scale:
raise ValueError("Error in Sobol sequence: not enough bits")
for i in range(D):
V = np.zeros(L + 1, dtype=long)
if i == 0:
for j in range(1, L + 1):
V[j] = 1 << (scale - j) # all m's = 1
else:
m = np.array(directions[i - 1], dtype=int)
a = m[0]
s = len(m) - 1
# The following code discards the first row of the ``m`` array
# Because it has floating point errors, e.g. values of 2.24e-314
if L <= s:
for j in range(1, L + 1):
V[j] = m[j] << (scale - j)
else:
for j in range(1, s + 1):
V[j] = m[j] << (scale - j)
for j in range(s + 1, L + 1):
V[j] = V[j - s] ^ (V[j - s] >> s)
for k in range(1, s):
V[j] ^= ((a >> (s - 1 - k)) & 1) * V[j - k]
X = long(0)
for j in range(1, N):
X ^= V[index_of_least_significant_zero_bit(j - 1)]
result[j][i] = float(X / math.pow(2, scale))
return result |
def setViewModel(self, model):
"""Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
"""
if isinstance(model, DataFrameModel):
self.enableEditing(False)
self.uncheckButton()
selectionModel = self.tableView.selectionModel()
self.tableView.setModel(model)
model.dtypeChanged.connect(self.updateDelegate)
model.dataChanged.connect(self.updateDelegates)
del selectionModel | Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View. | Below is the the instruction that describes the task:
### Input:
Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
### Response:
def setViewModel(self, model):
"""Sets the model for the enclosed TableView in this widget.
Args:
model (DataFrameModel): The model to be displayed by
the Table View.
"""
if isinstance(model, DataFrameModel):
self.enableEditing(False)
self.uncheckButton()
selectionModel = self.tableView.selectionModel()
self.tableView.setModel(model)
model.dtypeChanged.connect(self.updateDelegate)
model.dataChanged.connect(self.updateDelegates)
del selectionModel |
def pre_populate_buyer_email(self, pre_populate_buyer_email):
"""
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
"""
if pre_populate_buyer_email is None:
raise ValueError("Invalid value for `pre_populate_buyer_email`, must not be `None`")
if len(pre_populate_buyer_email) > 254:
raise ValueError("Invalid value for `pre_populate_buyer_email`, length must be less than `254`")
self._pre_populate_buyer_email = pre_populate_buyer_email | Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
### Response:
def pre_populate_buyer_email(self, pre_populate_buyer_email):
"""
Sets the pre_populate_buyer_email of this CreateCheckoutRequest.
If provided, the buyer's email is pre-populated on the checkout page as an editable text field. Default: none; only exists if explicitly set.
:param pre_populate_buyer_email: The pre_populate_buyer_email of this CreateCheckoutRequest.
:type: str
"""
if pre_populate_buyer_email is None:
raise ValueError("Invalid value for `pre_populate_buyer_email`, must not be `None`")
if len(pre_populate_buyer_email) > 254:
raise ValueError("Invalid value for `pre_populate_buyer_email`, length must be less than `254`")
self._pre_populate_buyer_email = pre_populate_buyer_email |
def evergreen(self, included_channel_ids=None, excluded_channel_ids=None, **kwargs):
"""
Search containing any evergreen piece of Content.
:included_channel_ids list: Contains ids for channel ids relevant to the query.
:excluded_channel_ids list: Contains ids for channel ids excluded from the query.
"""
eqs = self.search(**kwargs)
eqs = eqs.filter(Evergreen())
if included_channel_ids:
eqs = eqs.filter(VideohubChannel(included_ids=included_channel_ids))
if excluded_channel_ids:
eqs = eqs.filter(VideohubChannel(excluded_ids=excluded_channel_ids))
return eqs | Search containing any evergreen piece of Content.
:included_channel_ids list: Contains ids for channel ids relevant to the query.
:excluded_channel_ids list: Contains ids for channel ids excluded from the query. | Below is the the instruction that describes the task:
### Input:
Search containing any evergreen piece of Content.
:included_channel_ids list: Contains ids for channel ids relevant to the query.
:excluded_channel_ids list: Contains ids for channel ids excluded from the query.
### Response:
def evergreen(self, included_channel_ids=None, excluded_channel_ids=None, **kwargs):
"""
Search containing any evergreen piece of Content.
:included_channel_ids list: Contains ids for channel ids relevant to the query.
:excluded_channel_ids list: Contains ids for channel ids excluded from the query.
"""
eqs = self.search(**kwargs)
eqs = eqs.filter(Evergreen())
if included_channel_ids:
eqs = eqs.filter(VideohubChannel(included_ids=included_channel_ids))
if excluded_channel_ids:
eqs = eqs.filter(VideohubChannel(excluded_ids=excluded_channel_ids))
return eqs |
def call(self, args, **kwargs):
"""
A thin wrapper around ``subprocess.Popen``. Takes the same
options as ``subprocess.Popen``, with the exception of the
``cwd``, and ``env`` parameters, which come from the
``Environment`` instance. Note that if the sole positional
argument is a string, it will be converted into a sequence
using the ``shlex.split()`` function.
"""
# Convert string args into a sequence
if isinstance(args, six.string_types):
args = shlex.split(args)
# Substitute cwd and env
kwargs['cwd'] = self._cwd
kwargs['env'] = self._data
# Set a default for close_fds
kwargs.setdefault('close_fds', True)
return subprocess.Popen(args, **kwargs) | A thin wrapper around ``subprocess.Popen``. Takes the same
options as ``subprocess.Popen``, with the exception of the
``cwd``, and ``env`` parameters, which come from the
``Environment`` instance. Note that if the sole positional
argument is a string, it will be converted into a sequence
using the ``shlex.split()`` function. | Below is the the instruction that describes the task:
### Input:
A thin wrapper around ``subprocess.Popen``. Takes the same
options as ``subprocess.Popen``, with the exception of the
``cwd``, and ``env`` parameters, which come from the
``Environment`` instance. Note that if the sole positional
argument is a string, it will be converted into a sequence
using the ``shlex.split()`` function.
### Response:
def call(self, args, **kwargs):
"""
A thin wrapper around ``subprocess.Popen``. Takes the same
options as ``subprocess.Popen``, with the exception of the
``cwd``, and ``env`` parameters, which come from the
``Environment`` instance. Note that if the sole positional
argument is a string, it will be converted into a sequence
using the ``shlex.split()`` function.
"""
# Convert string args into a sequence
if isinstance(args, six.string_types):
args = shlex.split(args)
# Substitute cwd and env
kwargs['cwd'] = self._cwd
kwargs['env'] = self._data
# Set a default for close_fds
kwargs.setdefault('close_fds', True)
return subprocess.Popen(args, **kwargs) |
def get_source(self, name):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(name)
try:
source_bytes = self.get_data(path)
except OSError as exc:
e = _ImportError('source not available through get_data()',
name=name)
e.__cause__ = exc
raise e
return decode_source(source_bytes) | Concrete implementation of InspectLoader.get_source. | Below is the the instruction that describes the task:
### Input:
Concrete implementation of InspectLoader.get_source.
### Response:
def get_source(self, name):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(name)
try:
source_bytes = self.get_data(path)
except OSError as exc:
e = _ImportError('source not available through get_data()',
name=name)
e.__cause__ = exc
raise e
return decode_source(source_bytes) |
def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type)) | Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool | Below is the the instruction that describes the task:
### Input:
Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
### Response:
def check_types(parameters, parameter_types, strict_floats):
"""Checks that the given parameters have the correct types.
:param parameters: List of (name, value) pairs of the given parameters
:type parameters: dict[str, object]
:param parameter_types: Parameter type by name.
:type parameter_types: dict[str, type]
:param strict_floats: If False, treat integers as floats
:type strict_floats: bool
"""
for name, parameter_type in parameter_types.items():
if name not in parameters:
raise InvalidParamsError("Parameter '{}' is missing.".format(name))
if not _is_instance(parameters[name], parameter_type, strict_floats):
raise InvalidParamsError("Value '{}' for parameter '{}' is not of expected type {}."
.format(parameters[name], name, parameter_type)) |
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
self._update_limits_from_api()
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. | Below is the the instruction that describes the task:
### Input:
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
### Response:
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
self._update_limits_from_api()
self._have_usage = True
logger.debug("Done checking usage.") |
def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret | Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity | Below is the the instruction that describes the task:
### Input:
Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity
### Response:
def validateDocumentFinal(self, ctxt):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlValidateDocumentFinal(ctxt__o, self._o)
return ret |
def verify_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == "put":
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == "get":
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False | Verify sufficient space is available on destination file system (return boolean). | Below is the the instruction that describes the task:
### Input:
Verify sufficient space is available on destination file system (return boolean).
### Response:
def verify_space_available(self, search_pattern=r"(\d+) \w+ free"):
"""Verify sufficient space is available on destination file system (return boolean)."""
if self.direction == "put":
space_avail = self.remote_space_available(search_pattern=search_pattern)
elif self.direction == "get":
space_avail = self.local_space_available()
if space_avail > self.file_size:
return True
return False |
def tokenize(self, data):
"""
Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text
"""
data = data.replace('\r\n', '\n').replace('\r', '\n')
pos = start = end = 0
ld = len(data)
tokens = []
while pos < ld:
start = data.find(self.tag_opener, pos)
if start >= pos:
# Check to see if there was data between this start and the last end.
if start > pos:
tl = self._newline_tokenize(data[pos:start])
tokens.extend(tl)
pos = start
# Find the extent of this tag, if it's ever closed.
end, found_close = self._tag_extent(data, start)
if found_close:
tag = data[start:end]
valid, tag_name, closer, opts = self._parse_tag(tag)
# Make sure this is a well-formed, recognized tag, otherwise it's just data.
if valid and tag_name in self.recognized_tags:
if closer:
tokens.append((self.TOKEN_TAG_END, tag_name, None, tag))
else:
tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag))
elif valid and self.drop_unrecognized and tag_name not in self.recognized_tags:
# If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it.
pass
else:
tokens.extend(self._newline_tokenize(tag))
else:
# We didn't find a closing tag, tack it on as text.
tokens.extend(self._newline_tokenize(data[start:end]))
pos = end
else:
# No more tags left to parse.
break
if pos < ld:
tl = self._newline_tokenize(data[pos:])
tokens.extend(tl)
return tokens | Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text | Below is the the instruction that describes the task:
### Input:
Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text
### Response:
def tokenize(self, data):
"""
Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text
"""
data = data.replace('\r\n', '\n').replace('\r', '\n')
pos = start = end = 0
ld = len(data)
tokens = []
while pos < ld:
start = data.find(self.tag_opener, pos)
if start >= pos:
# Check to see if there was data between this start and the last end.
if start > pos:
tl = self._newline_tokenize(data[pos:start])
tokens.extend(tl)
pos = start
# Find the extent of this tag, if it's ever closed.
end, found_close = self._tag_extent(data, start)
if found_close:
tag = data[start:end]
valid, tag_name, closer, opts = self._parse_tag(tag)
# Make sure this is a well-formed, recognized tag, otherwise it's just data.
if valid and tag_name in self.recognized_tags:
if closer:
tokens.append((self.TOKEN_TAG_END, tag_name, None, tag))
else:
tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag))
elif valid and self.drop_unrecognized and tag_name not in self.recognized_tags:
# If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it.
pass
else:
tokens.extend(self._newline_tokenize(tag))
else:
# We didn't find a closing tag, tack it on as text.
tokens.extend(self._newline_tokenize(data[start:end]))
pos = end
else:
# No more tags left to parse.
break
if pos < ld:
tl = self._newline_tokenize(data[pos:])
tokens.extend(tl)
return tokens |
def interleave_data(self, prop):
r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : string
The property name to be retrieved
Returns
-------
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible; however
when data are missing this can be tricky. Data can be missing in two
different ways: A set of pores is not assisgned to a geometry or the
network contains multiple geometries and data does not exist on all.
Float and boolean data is fine, but missing ints are converted to float
when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.tomask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False]
"""
element = self._parse_element(prop.split('.')[0], single=True)
N = self.project.network._count(element)
# Fetch sources list depending on object type?
proj = self.project
if self._isa() in ['network', 'geometry']:
sources = list(proj.geometries().values())
elif self._isa() in ['phase', 'physics']:
sources = list(proj.find_physics(phase=self))
elif self._isa() in ['algorithm', 'base']:
sources = [self]
else:
raise Exception('Unrecognized object type, cannot find dependents')
# Attempt to fetch the requested array from each object
arrs = [item.get(prop, None) for item in sources]
locs = [self._get_indices(element, item.name) for item in sources]
sizes = [sp.size(a) for a in arrs]
if sp.all([item is None for item in arrs]): # prop not found anywhere
raise KeyError(prop)
# Check the general type of each array
atype = []
for a in arrs:
if a is not None:
t = a.dtype.name
if t.startswith('int') or t.startswith('float'):
atype.append('numeric')
elif t.startswith('bool'):
atype.append('boolean')
else:
atype.append('other')
if not all([item == atype[0] for item in atype]):
raise Exception('The array types are not compatible')
else:
dummy_val = {'numeric': sp.nan, 'boolean': False, 'other': None}
# Create an empty array of the right type and shape
for item in arrs:
if item is not None:
if len(item.shape) == 1:
temp_arr = sp.zeros((N, ), dtype=item.dtype)
else:
temp_arr = sp.zeros((N, item.shape[1]), dtype=item.dtype)
temp_arr.fill(dummy_val[atype[0]])
# Convert int arrays to float IF NaNs are expected
if temp_arr.dtype.name.startswith('int') and \
(sp.any([i is None for i in arrs]) or sp.sum(sizes) != N):
temp_arr = temp_arr.astype(float)
temp_arr.fill(sp.nan)
# Fill new array with values in the corresponding locations
for vals, inds in zip(arrs, locs):
if vals is not None:
temp_arr[inds] = vals
else:
temp_arr[inds] = dummy_val[atype[0]]
return temp_arr | r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : string
The property name to be retrieved
Returns
-------
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible; however
when data are missing this can be tricky. Data can be missing in two
different ways: A set of pores is not assisgned to a geometry or the
network contains multiple geometries and data does not exist on all.
Float and boolean data is fine, but missing ints are converted to float
when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.tomask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False] | Below is the the instruction that describes the task:
### Input:
r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : string
The property name to be retrieved
Returns
-------
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible; however
when data are missing this can be tricky. Data can be missing in two
different ways: A set of pores is not assisgned to a geometry or the
network contains multiple geometries and data does not exist on all.
Float and boolean data is fine, but missing ints are converted to float
when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.tomask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False]
### Response:
def interleave_data(self, prop):
r"""
Retrieves requested property from associated objects, to produce a full
Np or Nt length array.
Parameters
----------
prop : string
The property name to be retrieved
Returns
-------
A full length (Np or Nt) array of requested property values.
Notes
-----
This makes an effort to maintain the data 'type' when possible; however
when data are missing this can be tricky. Data can be missing in two
different ways: A set of pores is not assisgned to a geometry or the
network contains multiple geometries and data does not exist on all.
Float and boolean data is fine, but missing ints are converted to float
when nans are inserted.
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[2, 2, 2])
>>> Ps = pn['pore.top']
>>> Ts = pn.find_neighbor_throats(pores=Ps)
>>> g1 = op.geometry.GenericGeometry(network=pn, pores=Ps, throats=Ts)
>>> Ts = ~pn.tomask(throats=Ts)
>>> g2 = op.geometry.GenericGeometry(network=pn, pores=~Ps, throats=Ts)
>>> g1['pore.value'] = 1
>>> print(g1['pore.value'])
[1 1 1 1]
>>> print(g2['pore.value']) # 'pore.value' is defined on g1, not g2
[nan nan nan nan]
>>> print(pn['pore.value'])
[nan 1. nan 1. nan 1. nan 1.]
>>> g2['pore.value'] = 20
>>> print(pn['pore.value'])
[20 1 20 1 20 1 20 1]
>>> pn['pore.label'] = False
>>> print(g1['pore.label']) # 'pore.label' is defined on pn, not g1
[False False False False]
"""
element = self._parse_element(prop.split('.')[0], single=True)
N = self.project.network._count(element)
# Fetch sources list depending on object type?
proj = self.project
if self._isa() in ['network', 'geometry']:
sources = list(proj.geometries().values())
elif self._isa() in ['phase', 'physics']:
sources = list(proj.find_physics(phase=self))
elif self._isa() in ['algorithm', 'base']:
sources = [self]
else:
raise Exception('Unrecognized object type, cannot find dependents')
# Attempt to fetch the requested array from each object
arrs = [item.get(prop, None) for item in sources]
locs = [self._get_indices(element, item.name) for item in sources]
sizes = [sp.size(a) for a in arrs]
if sp.all([item is None for item in arrs]): # prop not found anywhere
raise KeyError(prop)
# Check the general type of each array
atype = []
for a in arrs:
if a is not None:
t = a.dtype.name
if t.startswith('int') or t.startswith('float'):
atype.append('numeric')
elif t.startswith('bool'):
atype.append('boolean')
else:
atype.append('other')
if not all([item == atype[0] for item in atype]):
raise Exception('The array types are not compatible')
else:
dummy_val = {'numeric': sp.nan, 'boolean': False, 'other': None}
# Create an empty array of the right type and shape
for item in arrs:
if item is not None:
if len(item.shape) == 1:
temp_arr = sp.zeros((N, ), dtype=item.dtype)
else:
temp_arr = sp.zeros((N, item.shape[1]), dtype=item.dtype)
temp_arr.fill(dummy_val[atype[0]])
# Convert int arrays to float IF NaNs are expected
if temp_arr.dtype.name.startswith('int') and \
(sp.any([i is None for i in arrs]) or sp.sum(sizes) != N):
temp_arr = temp_arr.astype(float)
temp_arr.fill(sp.nan)
# Fill new array with values in the corresponding locations
for vals, inds in zip(arrs, locs):
if vals is not None:
temp_arr[inds] = vals
else:
temp_arr[inds] = dummy_val[atype[0]]
return temp_arr |
def _get_iface_info(iface):
'''
If `iface` is available, return interface info and no error, otherwise
return no info and log and return an error
'''
iface_info = interfaces()
if iface in iface_info.keys():
return iface_info, False
else:
error_msg = ('Interface "{0}" not in available interfaces: "{1}"'
''.format(iface, '", "'.join(iface_info.keys())))
log.error(error_msg)
return None, error_msg | If `iface` is available, return interface info and no error, otherwise
return no info and log and return an error | Below is the the instruction that describes the task:
### Input:
If `iface` is available, return interface info and no error, otherwise
return no info and log and return an error
### Response:
def _get_iface_info(iface):
'''
If `iface` is available, return interface info and no error, otherwise
return no info and log and return an error
'''
iface_info = interfaces()
if iface in iface_info.keys():
return iface_info, False
else:
error_msg = ('Interface "{0}" not in available interfaces: "{1}"'
''.format(iface, '", "'.join(iface_info.keys())))
log.error(error_msg)
return None, error_msg |
def lost_device(
self, number,
text='This iPhone has been lost. Please call me.',
newpasscode=""
):
""" Send a request to the device to trigger 'lost mode'.
The device will show the message in `text`, and if a number has
been passed, then the person holding the device can call
the number without entering the passcode.
"""
data = json.dumps({
'text': text,
'userText': True,
'ownerNbr': number,
'lostModeEnabled': True,
'trackingEnabled': True,
'device': self.content['id'],
'passcode': newpasscode
})
self.session.post(
self.lost_url,
params=self.params,
data=data
) | Send a request to the device to trigger 'lost mode'.
The device will show the message in `text`, and if a number has
been passed, then the person holding the device can call
the number without entering the passcode. | Below is the the instruction that describes the task:
### Input:
Send a request to the device to trigger 'lost mode'.
The device will show the message in `text`, and if a number has
been passed, then the person holding the device can call
the number without entering the passcode.
### Response:
def lost_device(
self, number,
text='This iPhone has been lost. Please call me.',
newpasscode=""
):
""" Send a request to the device to trigger 'lost mode'.
The device will show the message in `text`, and if a number has
been passed, then the person holding the device can call
the number without entering the passcode.
"""
data = json.dumps({
'text': text,
'userText': True,
'ownerNbr': number,
'lostModeEnabled': True,
'trackingEnabled': True,
'device': self.content['id'],
'passcode': newpasscode
})
self.session.post(
self.lost_url,
params=self.params,
data=data
) |
def get_field_names(self):
"""
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
"""
field_names = super(StationDataIO, self).get_field_names()
if set(field_names) == set(['meta', 'data']):
meta_fields = list(self.data[0]['meta'].keys())
if set(meta_fields) < set(self.getvalue('meta')):
meta_fields = self.getvalue('meta')
field_names = list(meta_fields) + ['data']
return field_names | ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names | Below is the the instruction that describes the task:
### Input:
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
### Response:
def get_field_names(self):
"""
ACIS web service returns "meta" and "data" for each station;
Use meta attributes as field names
"""
field_names = super(StationDataIO, self).get_field_names()
if set(field_names) == set(['meta', 'data']):
meta_fields = list(self.data[0]['meta'].keys())
if set(meta_fields) < set(self.getvalue('meta')):
meta_fields = self.getvalue('meta')
field_names = list(meta_fields) + ['data']
return field_names |
def resolve_parent_registry_name(self, registry_name, suffix):
"""
Subclasses should override to specify the default suffix, as the
invocation is done without a suffix.
"""
if not registry_name.endswith(suffix):
raise ValueError(
"child module registry name defined with invalid suffix "
"('%s' does not end with '%s')" % (registry_name, suffix))
return registry_name[:-len(suffix)] | Subclasses should override to specify the default suffix, as the
invocation is done without a suffix. | Below is the the instruction that describes the task:
### Input:
Subclasses should override to specify the default suffix, as the
invocation is done without a suffix.
### Response:
def resolve_parent_registry_name(self, registry_name, suffix):
"""
Subclasses should override to specify the default suffix, as the
invocation is done without a suffix.
"""
if not registry_name.endswith(suffix):
raise ValueError(
"child module registry name defined with invalid suffix "
"('%s' does not end with '%s')" % (registry_name, suffix))
return registry_name[:-len(suffix)] |
def upscale(image, ratio):
"""
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
"""
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio < 1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels), dtype=np.uint8)
for x, y in np.ndindex((width, height)):
out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]
return out | return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1) | Below is the the instruction that describes the task:
### Input:
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
### Response:
def upscale(image, ratio):
"""
return upscaled image array
Arguments:
image -- a (H,W,C) numpy.ndarray
ratio -- scaling factor (>1)
"""
if not isinstance(image, np.ndarray):
raise ValueError('Expected ndarray')
if ratio < 1:
raise ValueError('Ratio must be greater than 1 (ratio=%f)' % ratio)
width = int(math.floor(image.shape[1] * ratio))
height = int(math.floor(image.shape[0] * ratio))
channels = image.shape[2]
out = np.ndarray((height, width, channels), dtype=np.uint8)
for x, y in np.ndindex((width, height)):
out[y, x] = image[int(math.floor(y / ratio)), int(math.floor(x / ratio))]
return out |
def __crawl(self, crawl_candidate):
''' wrap the crawling functionality '''
def crawler_wrapper(parser, parsers_lst, crawl_candidate):
try:
crawler = Crawler(self.config, self.fetcher)
article = crawler.crawl(crawl_candidate)
except (UnicodeDecodeError, ValueError) as ex:
if parsers_lst:
parser = parsers_lst.pop(0) # remove it also!
return crawler_wrapper(parser, parsers_lst, crawl_candidate)
else:
raise ex
return article
# use the wrapper
parsers = list(self.config.available_parsers)
parsers.remove(self.config.parser_class)
return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate) | wrap the crawling functionality | Below is the the instruction that describes the task:
### Input:
wrap the crawling functionality
### Response:
def __crawl(self, crawl_candidate):
''' wrap the crawling functionality '''
def crawler_wrapper(parser, parsers_lst, crawl_candidate):
try:
crawler = Crawler(self.config, self.fetcher)
article = crawler.crawl(crawl_candidate)
except (UnicodeDecodeError, ValueError) as ex:
if parsers_lst:
parser = parsers_lst.pop(0) # remove it also!
return crawler_wrapper(parser, parsers_lst, crawl_candidate)
else:
raise ex
return article
# use the wrapper
parsers = list(self.config.available_parsers)
parsers.remove(self.config.parser_class)
return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate) |
def highlightBlock(self, block):
"""
Reimplements the :meth:`QSyntaxHighlighter.highlightBlock` method.
:param block: Text block.
:type block: QString
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(self.__class__.__name__,
self.highlightBlock.__name__,
self.__class__.__name__)) | Reimplements the :meth:`QSyntaxHighlighter.highlightBlock` method.
:param block: Text block.
:type block: QString | Below is the the instruction that describes the task:
### Input:
Reimplements the :meth:`QSyntaxHighlighter.highlightBlock` method.
:param block: Text block.
:type block: QString
### Response:
def highlightBlock(self, block):
"""
Reimplements the :meth:`QSyntaxHighlighter.highlightBlock` method.
:param block: Text block.
:type block: QString
"""
raise NotImplementedError("{0} | '{1}' must be implemented by '{2}' subclasses!".format(self.__class__.__name__,
self.highlightBlock.__name__,
self.__class__.__name__)) |
def _set_hostname_domain(self):
"""Extract hostname and domain"""
self._hostname, _, self._domain = str(self._fqdn).partition('.')
log.debug('Hostname: %s, Domain: %s' % (self._hostname, self._domain)) | Extract hostname and domain | Below is the the instruction that describes the task:
### Input:
Extract hostname and domain
### Response:
def _set_hostname_domain(self):
"""Extract hostname and domain"""
self._hostname, _, self._domain = str(self._fqdn).partition('.')
log.debug('Hostname: %s, Domain: %s' % (self._hostname, self._domain)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.