code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _find_files(root, includes, excludes, follow_symlinks):
"""List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html
"""
root = os.path.abspath(root)
file_set = formic.FileSet(
directory=root, include=includes,
exclude=excludes, symlinks=follow_symlinks,
)
for filename in file_set.qualified_files(absolute=False):
yield filename | List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html | Below is the the instruction that describes the task:
### Input:
List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html
### Response:
def _find_files(root, includes, excludes, follow_symlinks):
"""List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html
"""
root = os.path.abspath(root)
file_set = formic.FileSet(
directory=root, include=includes,
exclude=excludes, symlinks=follow_symlinks,
)
for filename in file_set.qualified_files(absolute=False):
yield filename |
def compute_default_choice(self):
"""Computes and sets the default choice"""
choices = self.choices
if len(choices) == 0:
return None
high_choice = max(choices, key=lambda choice: choice.performance)
self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice", high_choice.name)
self.refresh()
return high_choice | Computes and sets the default choice | Below is the the instruction that describes the task:
### Input:
Computes and sets the default choice
### Response:
def compute_default_choice(self):
"""Computes and sets the default choice"""
choices = self.choices
if len(choices) == 0:
return None
high_choice = max(choices, key=lambda choice: choice.performance)
self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice", high_choice.name)
self.refresh()
return high_choice |
def _make_sure_table_exists(self, name_seq):
"""
Makes sure the table with the full name comprising of name_seq exists.
"""
t = self
for key in name_seq[:-1]:
t = t[key]
name = name_seq[-1]
if name not in t:
self.append_elements([element_factory.create_table_header_element(name_seq),
element_factory.create_table({})]) | Makes sure the table with the full name comprising of name_seq exists. | Below is the the instruction that describes the task:
### Input:
Makes sure the table with the full name comprising of name_seq exists.
### Response:
def _make_sure_table_exists(self, name_seq):
"""
Makes sure the table with the full name comprising of name_seq exists.
"""
t = self
for key in name_seq[:-1]:
t = t[key]
name = name_seq[-1]
if name not in t:
self.append_elements([element_factory.create_table_header_element(name_seq),
element_factory.create_table({})]) |
def add_scroll_bar(self):
"""Packs the scrollbar.
"""
adj = self.terminal.get_vadjustment()
scroll = Gtk.VScrollbar(adj)
scroll.show()
self.pack_start(scroll, False, False, 0) | Packs the scrollbar. | Below is the the instruction that describes the task:
### Input:
Packs the scrollbar.
### Response:
def add_scroll_bar(self):
"""Packs the scrollbar.
"""
adj = self.terminal.get_vadjustment()
scroll = Gtk.VScrollbar(adj)
scroll.show()
self.pack_start(scroll, False, False, 0) |
def get_image_layer(self, image_id):
"""GET /v1/images/(image_id)/json"""
return self._http_call(self.IMAGE_JSON, get, image_id=image_id) | GET /v1/images/(image_id)/json | Below is the the instruction that describes the task:
### Input:
GET /v1/images/(image_id)/json
### Response:
def get_image_layer(self, image_id):
"""GET /v1/images/(image_id)/json"""
return self._http_call(self.IMAGE_JSON, get, image_id=image_id) |
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built | Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false`` | Below is the the instruction that describes the task:
### Input:
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
### Response:
def Boolean():
"""
Creates a validator that attempts to convert the given value to a boolean
or raises an error. The following rules are used:
``None`` is converted to ``False``.
``int`` values are ``True`` except for ``0``.
``str`` values converted in lower- and uppercase:
* ``y, yes, t, true``
* ``n, no, f, false``
"""
@wraps(Boolean)
def built(value):
# Already a boolean?
if isinstance(value, bool):
return value
# None
if value == None:
return False
# Integers
if isinstance(value, int):
return not value == 0
# Strings
if isinstance(value, str):
if value.lower() in { 'y', 'yes', 't', 'true' }:
return True
elif value.lower() in { 'n', 'no', 'f', 'false' }:
return False
# Nope
raise Error("Not a boolean value.")
return built |
def datetime_field_data(field, **kwargs):
"""
Return random value for DateTimeField
>>> result = any_form_field(forms.DateTimeField())
>>> type(result)
<type 'str'>
"""
from_date = kwargs.get('from_date', datetime(1990, 1, 1))
to_date = kwargs.get('to_date', datetime.today())
date_format = random.choice(field.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'))
return xunit.any_datetime(from_date=from_date, to_date=to_date).strftime(date_format) | Return random value for DateTimeField
>>> result = any_form_field(forms.DateTimeField())
>>> type(result)
<type 'str'> | Below is the the instruction that describes the task:
### Input:
Return random value for DateTimeField
>>> result = any_form_field(forms.DateTimeField())
>>> type(result)
<type 'str'>
### Response:
def datetime_field_data(field, **kwargs):
"""
Return random value for DateTimeField
>>> result = any_form_field(forms.DateTimeField())
>>> type(result)
<type 'str'>
"""
from_date = kwargs.get('from_date', datetime(1990, 1, 1))
to_date = kwargs.get('to_date', datetime.today())
date_format = random.choice(field.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'))
return xunit.any_datetime(from_date=from_date, to_date=to_date).strftime(date_format) |
def _get_snmpv3(self, oid):
"""
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(
self.user,
self.auth_key,
self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto,
),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid,
lookupNames=True,
lookupValues=True,
)
if not error_detected and snmp_data[0][1]:
return text_type(snmp_data[0][1])
return "" | Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve. | Below is the the instruction that describes the task:
### Input:
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
### Response:
def _get_snmpv3(self, oid):
"""
Try to send an SNMP GET operation using SNMPv3 for the specified OID.
Parameters
----------
oid : str
The SNMP OID that you want to get.
Returns
-------
string : str
The string as part of the value from the OID you are trying to retrieve.
"""
snmp_target = (self.hostname, self.snmp_port)
cmd_gen = cmdgen.CommandGenerator()
(error_detected, error_status, error_index, snmp_data) = cmd_gen.getCmd(
cmdgen.UsmUserData(
self.user,
self.auth_key,
self.encrypt_key,
authProtocol=self.auth_proto,
privProtocol=self.encryp_proto,
),
cmdgen.UdpTransportTarget(snmp_target, timeout=1.5, retries=2),
oid,
lookupNames=True,
lookupValues=True,
)
if not error_detected and snmp_data[0][1]:
return text_type(snmp_data[0][1])
return "" |
def poll_job(self, job_key, timeoutSecs=10, retryDelaySecs=0.5, key=None, **kwargs):
'''
Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out.
'''
params_dict = {}
# merge kwargs into params_dict
h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'poll_job', False)
start_time = time.time()
pollCount = 0
while True:
result = self.do_json_request('3/Jobs.json/' + job_key, timeout=timeoutSecs, params=params_dict)
# print 'Job: ', dump_json(result)
if key:
frames_result = self.frames(key=key)
print 'frames_result for key:', key, dump_json(result)
jobs = result['jobs'][0]
description = jobs['description']
dest = jobs['dest']
dest_name = dest['name']
msec = jobs['msec']
status = jobs['status']
progress = jobs['progress']
print description, \
"dest_name:", dest_name, \
"\tprogress:", "%-10s" % progress, \
"\tstatus:", "%-12s" % status, \
"\tmsec:", msec
if status=='DONE' or status=='CANCELLED' or status=='FAILED':
h2o_sandbox.check_sandbox_for_errors()
return result
# what about 'CREATED'
# FIX! what are the other legal polling statuses that we should check for?
if not h2o_args.no_timeout and (time.time() - start_time > timeoutSecs):
h2o_sandbox.check_sandbox_for_errors()
emsg = "Job:", job_key, "timed out in:", timeoutSecs
# for debug
a = h2o.nodes[0].get_cloud()
print "cloud.json:", dump_json(a)
raise Exception(emsg)
print emsg
return None
# check every other poll, for now
if (pollCount % 2) == 0:
h2o_sandbox.check_sandbox_for_errors()
time.sleep(retryDelaySecs)
pollCount += 1 | Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out. | Below is the the instruction that describes the task:
### Input:
Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out.
### Response:
def poll_job(self, job_key, timeoutSecs=10, retryDelaySecs=0.5, key=None, **kwargs):
'''
Poll a single job from the /Jobs endpoint until it is "status": "DONE" or "CANCELLED" or "FAILED" or we time out.
'''
params_dict = {}
# merge kwargs into params_dict
h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'poll_job', False)
start_time = time.time()
pollCount = 0
while True:
result = self.do_json_request('3/Jobs.json/' + job_key, timeout=timeoutSecs, params=params_dict)
# print 'Job: ', dump_json(result)
if key:
frames_result = self.frames(key=key)
print 'frames_result for key:', key, dump_json(result)
jobs = result['jobs'][0]
description = jobs['description']
dest = jobs['dest']
dest_name = dest['name']
msec = jobs['msec']
status = jobs['status']
progress = jobs['progress']
print description, \
"dest_name:", dest_name, \
"\tprogress:", "%-10s" % progress, \
"\tstatus:", "%-12s" % status, \
"\tmsec:", msec
if status=='DONE' or status=='CANCELLED' or status=='FAILED':
h2o_sandbox.check_sandbox_for_errors()
return result
# what about 'CREATED'
# FIX! what are the other legal polling statuses that we should check for?
if not h2o_args.no_timeout and (time.time() - start_time > timeoutSecs):
h2o_sandbox.check_sandbox_for_errors()
emsg = "Job:", job_key, "timed out in:", timeoutSecs
# for debug
a = h2o.nodes[0].get_cloud()
print "cloud.json:", dump_json(a)
raise Exception(emsg)
print emsg
return None
# check every other poll, for now
if (pollCount % 2) == 0:
h2o_sandbox.check_sandbox_for_errors()
time.sleep(retryDelaySecs)
pollCount += 1 |
def add_content_type(self, ct):
"""
Add a CoRE Link Format ct attribute to the resource.
:param ct: the CoRE Link Format ct attribute
"""
lst = self._attributes.get("ct")
if lst is None:
lst = []
if isinstance(ct, str):
ct = defines.Content_types[ct]
lst.append(ct)
self._attributes["ct"] = lst | Add a CoRE Link Format ct attribute to the resource.
:param ct: the CoRE Link Format ct attribute | Below is the the instruction that describes the task:
### Input:
Add a CoRE Link Format ct attribute to the resource.
:param ct: the CoRE Link Format ct attribute
### Response:
def add_content_type(self, ct):
"""
Add a CoRE Link Format ct attribute to the resource.
:param ct: the CoRE Link Format ct attribute
"""
lst = self._attributes.get("ct")
if lst is None:
lst = []
if isinstance(ct, str):
ct = defines.Content_types[ct]
lst.append(ct)
self._attributes["ct"] = lst |
def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
"""
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = s.oshape.translate(-s.pad)
bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1]))
rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if
np.size(region_size) == 1 else np.array(region_size))
n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int')
particle_groups = []
tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)
if doshift == 'rand':
doshift = np.random.choice([True, False])
if doshift:
shift = rs // 2
n_translate += 1
else:
shift = 0
deltas = np.meshgrid(*[np.arange(i) for i in n_translate])
positions = s.obj_get_positions()
if bounds is None:
# FIXME this (deliberately) masks a problem where optimization
# places particles outside the image. However, it ensures that
# all particles are in at least one group when `bounds is None`,
# which is the use case within opt. The 1e-3 is to ensure that
# they are inside the box and not on the edge.
positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3)
groups = list(map(lambda *args: find_particles_in_tile(positions,
tile.translate( np.array(args) * rs - shift)), *[d.ravel()
for d in deltas]))
for i in range(len(groups)-1, -1, -1):
if groups[i].size == 0:
groups.pop(i)
assert _check_groups(s, groups)
return groups | Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region. | Below is the the instruction that describes the task:
### Input:
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
### Response:
def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
"""
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = s.oshape.translate(-s.pad)
bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1]))
rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if
np.size(region_size) == 1 else np.array(region_size))
n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int')
particle_groups = []
tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)
if doshift == 'rand':
doshift = np.random.choice([True, False])
if doshift:
shift = rs // 2
n_translate += 1
else:
shift = 0
deltas = np.meshgrid(*[np.arange(i) for i in n_translate])
positions = s.obj_get_positions()
if bounds is None:
# FIXME this (deliberately) masks a problem where optimization
# places particles outside the image. However, it ensures that
# all particles are in at least one group when `bounds is None`,
# which is the use case within opt. The 1e-3 is to ensure that
# they are inside the box and not on the edge.
positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3)
groups = list(map(lambda *args: find_particles_in_tile(positions,
tile.translate( np.array(args) * rs - shift)), *[d.ravel()
for d in deltas]))
for i in range(len(groups)-1, -1, -1):
if groups[i].size == 0:
groups.pop(i)
assert _check_groups(s, groups)
return groups |
def contents(self, path, ref=None):
"""Get the contents of the file pointed to by ``path``.
If the path provided is actually a directory, you will receive a
dictionary back of the form::
{
'filename.md': Contents(), # Where Contents an instance
'github.py': Contents(),
}
:param str path: (required), path to file, e.g.
github3/repo.py
:param str ref: (optional), the string name of a commit/branch/tag.
Default: master
:returns: :class:`Contents <github3.repos.contents.Contents>` or dict
if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
json = self._json(self._get(url, params={'ref': ref}), 200)
if isinstance(json, dict):
return Contents(json, self)
elif isinstance(json, list):
return dict((j.get('name'), Contents(j, self)) for j in json)
return None | Get the contents of the file pointed to by ``path``.
If the path provided is actually a directory, you will receive a
dictionary back of the form::
{
'filename.md': Contents(), # Where Contents an instance
'github.py': Contents(),
}
:param str path: (required), path to file, e.g.
github3/repo.py
:param str ref: (optional), the string name of a commit/branch/tag.
Default: master
:returns: :class:`Contents <github3.repos.contents.Contents>` or dict
if successful, else None | Below is the the instruction that describes the task:
### Input:
Get the contents of the file pointed to by ``path``.
If the path provided is actually a directory, you will receive a
dictionary back of the form::
{
'filename.md': Contents(), # Where Contents an instance
'github.py': Contents(),
}
:param str path: (required), path to file, e.g.
github3/repo.py
:param str ref: (optional), the string name of a commit/branch/tag.
Default: master
:returns: :class:`Contents <github3.repos.contents.Contents>` or dict
if successful, else None
### Response:
def contents(self, path, ref=None):
"""Get the contents of the file pointed to by ``path``.
If the path provided is actually a directory, you will receive a
dictionary back of the form::
{
'filename.md': Contents(), # Where Contents an instance
'github.py': Contents(),
}
:param str path: (required), path to file, e.g.
github3/repo.py
:param str ref: (optional), the string name of a commit/branch/tag.
Default: master
:returns: :class:`Contents <github3.repos.contents.Contents>` or dict
if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
json = self._json(self._get(url, params={'ref': ref}), 200)
if isinstance(json, dict):
return Contents(json, self)
elif isinstance(json, list):
return dict((j.get('name'), Contents(j, self)) for j in json)
return None |
def is_met(self, ti, session, dep_context=None):
"""
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) | Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext | Below is the the instruction that describes the task:
### Input:
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
### Response:
def is_met(self, ti, session, dep_context=None):
"""
Returns whether or not this dependency is met for a given task instance. A
dependency is considered met if all of the dependency statuses it reports are
passing.
:param ti: the task instance to see if this dependency is met for
:type ti: airflow.models.TaskInstance
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param dep_context: The context this dependency is being checked under that stores
state that can be used by this dependency.
:type dep_context: BaseDepContext
"""
return all(status.passed for status in
self.get_dep_statuses(ti, session, dep_context)) |
def element_should_be_disabled(self, locator, loglevel='INFO'):
"""Verifies that element identified with locator is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if self._element_find(locator, True, True).is_enabled():
self.log_source(loglevel)
raise AssertionError("Element '%s' should be disabled "
"but did not" % locator)
self._info("Element '%s' is disabled ." % locator) | Verifies that element identified with locator is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements. | Below is the the instruction that describes the task:
### Input:
Verifies that element identified with locator is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
### Response:
def element_should_be_disabled(self, locator, loglevel='INFO'):
"""Verifies that element identified with locator is disabled.
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
if self._element_find(locator, True, True).is_enabled():
self.log_source(loglevel)
raise AssertionError("Element '%s' should be disabled "
"but did not" % locator)
self._info("Element '%s' is disabled ." % locator) |
def _client_allowed(self):
"""Check if client is allowed to connect to this server."""
client_ip = self._client_address[0]
if not client_ip in self._settings.allowed_clients and \
not 'ALL' in self._settings.allowed_clients:
content = 'Access from host {} forbidden.'.format(client_ip).encode('utf-8')
self._send_content(content, 'text/html')
return False
return True | Check if client is allowed to connect to this server. | Below is the the instruction that describes the task:
### Input:
Check if client is allowed to connect to this server.
### Response:
def _client_allowed(self):
"""Check if client is allowed to connect to this server."""
client_ip = self._client_address[0]
if not client_ip in self._settings.allowed_clients and \
not 'ALL' in self._settings.allowed_clients:
content = 'Access from host {} forbidden.'.format(client_ip).encode('utf-8')
self._send_content(content, 'text/html')
return False
return True |
def watch(self, path, func=None, delay=0, ignore=None):
"""Add a task to watcher.
:param path: a filepath or directory path or glob pattern
:param func: the function to be executed when file changed
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath.
"""
self._tasks[path] = {
'func': func,
'delay': delay,
'ignore': ignore,
} | Add a task to watcher.
:param path: a filepath or directory path or glob pattern
:param func: the function to be executed when file changed
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath. | Below is the the instruction that describes the task:
### Input:
Add a task to watcher.
:param path: a filepath or directory path or glob pattern
:param func: the function to be executed when file changed
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath.
### Response:
def watch(self, path, func=None, delay=0, ignore=None):
"""Add a task to watcher.
:param path: a filepath or directory path or glob pattern
:param func: the function to be executed when file changed
:param delay: Delay sending the reload message. Use 'forever' to
not send it. This is useful to compile sass files to
css, but reload on changed css files then only.
:param ignore: A function return True to ignore a certain pattern of
filepath.
"""
self._tasks[path] = {
'func': func,
'delay': delay,
'ignore': ignore,
} |
def plot(self, name, funcname):
"""Plot item"""
sw = self.shellwidget
if sw._reading:
sw.dbg_exec_magic('varexp', '--%s %s' % (funcname, name))
else:
sw.execute("%%varexp --%s %s" % (funcname, name)) | Plot item | Below is the the instruction that describes the task:
### Input:
Plot item
### Response:
def plot(self, name, funcname):
"""Plot item"""
sw = self.shellwidget
if sw._reading:
sw.dbg_exec_magic('varexp', '--%s %s' % (funcname, name))
else:
sw.execute("%%varexp --%s %s" % (funcname, name)) |
def check_save(sender, **kwargs):
"""
Checks item type uniqueness, field applicability and multiplicity.
"""
tag = kwargs['instance']
obj = Tag.get_object(tag)
previous_tags = Tag.get_tags(obj)
err_uniq = check_item_type_uniqueness(tag, previous_tags)
err_appl = check_field_applicability(tag)
err_mult = check_field_multiplicity(tag, previous_tags)
err_msg = generate_error_message(tag, err_uniq, err_appl, err_mult)
if err_uniq or err_appl or err_mult:
raise TypeError(err_msg) | Checks item type uniqueness, field applicability and multiplicity. | Below is the the instruction that describes the task:
### Input:
Checks item type uniqueness, field applicability and multiplicity.
### Response:
def check_save(sender, **kwargs):
"""
Checks item type uniqueness, field applicability and multiplicity.
"""
tag = kwargs['instance']
obj = Tag.get_object(tag)
previous_tags = Tag.get_tags(obj)
err_uniq = check_item_type_uniqueness(tag, previous_tags)
err_appl = check_field_applicability(tag)
err_mult = check_field_multiplicity(tag, previous_tags)
err_msg = generate_error_message(tag, err_uniq, err_appl, err_mult)
if err_uniq or err_appl or err_mult:
raise TypeError(err_msg) |
def get(self, spike_ids, channels=None):
"""Load the waveforms of the specified spikes."""
if isinstance(spike_ids, slice):
spike_ids = _range_from_slice(spike_ids,
start=0,
stop=self.n_spikes,
)
if not hasattr(spike_ids, '__len__'):
spike_ids = [spike_ids]
if channels is None:
channels = slice(None, None, None)
nc = self.n_channels
else:
channels = np.asarray(channels, dtype=np.int32)
assert np.all(channels < self.n_channels)
nc = len(channels)
# Ensure a list of time samples are being requested.
spike_ids = _as_array(spike_ids)
n_spikes = len(spike_ids)
# Initialize the array.
# NOTE: last dimension is time to simplify things.
shape = (n_spikes, nc, self._n_samples_extract)
waveforms = np.zeros(shape, dtype=np.float32)
# No traces: return null arrays.
if self.n_samples_trace == 0:
return np.transpose(waveforms, (0, 2, 1))
# Load all spikes.
for i, spike_id in enumerate(spike_ids):
assert 0 <= spike_id < self.n_spikes
time = self._spike_samples[spike_id]
# Extract the waveforms on the unmasked channels.
try:
w = self._load_at(time, channels)
except ValueError as e: # pragma: no cover
logger.warn("Error while loading waveform: %s", str(e))
continue
assert w.shape == (self._n_samples_extract, nc)
waveforms[i, :, :] = w.T
# Filter the waveforms.
waveforms_f = waveforms.reshape((-1, self._n_samples_extract))
# Only filter the non-zero waveforms.
unmasked = waveforms_f.max(axis=1) != 0
waveforms_f[unmasked] = self._filter(waveforms_f[unmasked], axis=1)
waveforms_f = waveforms_f.reshape((n_spikes, nc,
self._n_samples_extract))
# Remove the margin.
margin_before, margin_after = self._filter_margin
if margin_after > 0:
assert margin_before >= 0
waveforms_f = waveforms_f[:, :, margin_before:-margin_after]
assert waveforms_f.shape == (n_spikes,
nc,
self.n_samples_waveforms,
)
# NOTE: we transpose before returning the array.
return np.transpose(waveforms_f, (0, 2, 1)) | Load the waveforms of the specified spikes. | Below is the the instruction that describes the task:
### Input:
Load the waveforms of the specified spikes.
### Response:
def get(self, spike_ids, channels=None):
"""Load the waveforms of the specified spikes."""
if isinstance(spike_ids, slice):
spike_ids = _range_from_slice(spike_ids,
start=0,
stop=self.n_spikes,
)
if not hasattr(spike_ids, '__len__'):
spike_ids = [spike_ids]
if channels is None:
channels = slice(None, None, None)
nc = self.n_channels
else:
channels = np.asarray(channels, dtype=np.int32)
assert np.all(channels < self.n_channels)
nc = len(channels)
# Ensure a list of time samples are being requested.
spike_ids = _as_array(spike_ids)
n_spikes = len(spike_ids)
# Initialize the array.
# NOTE: last dimension is time to simplify things.
shape = (n_spikes, nc, self._n_samples_extract)
waveforms = np.zeros(shape, dtype=np.float32)
# No traces: return null arrays.
if self.n_samples_trace == 0:
return np.transpose(waveforms, (0, 2, 1))
# Load all spikes.
for i, spike_id in enumerate(spike_ids):
assert 0 <= spike_id < self.n_spikes
time = self._spike_samples[spike_id]
# Extract the waveforms on the unmasked channels.
try:
w = self._load_at(time, channels)
except ValueError as e: # pragma: no cover
logger.warn("Error while loading waveform: %s", str(e))
continue
assert w.shape == (self._n_samples_extract, nc)
waveforms[i, :, :] = w.T
# Filter the waveforms.
waveforms_f = waveforms.reshape((-1, self._n_samples_extract))
# Only filter the non-zero waveforms.
unmasked = waveforms_f.max(axis=1) != 0
waveforms_f[unmasked] = self._filter(waveforms_f[unmasked], axis=1)
waveforms_f = waveforms_f.reshape((n_spikes, nc,
self._n_samples_extract))
# Remove the margin.
margin_before, margin_after = self._filter_margin
if margin_after > 0:
assert margin_before >= 0
waveforms_f = waveforms_f[:, :, margin_before:-margin_after]
assert waveforms_f.shape == (n_spikes,
nc,
self.n_samples_waveforms,
)
# NOTE: we transpose before returning the array.
return np.transpose(waveforms_f, (0, 2, 1)) |
def top_1(x, reduced_dim, dtype=tf.int32, name=None):
"""Argmax and Max.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
dtype: a tf.dtype (for the output)
name: an optional string
Returns:
indices: a Tensor with given dtype
values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
"""
reduced_dim = convert_to_dimension(reduced_dim)
with tf.name_scope(name, default_name="top_1"):
max_val = reduce_max(x, reduced_dim=reduced_dim)
is_max = to_float(equal(x, max_val))
pos = mtf_range(x.mesh, reduced_dim, tf.float32)
ret = reduce_max(is_max * pos, reduced_dim=reduced_dim)
ret = cast(ret, dtype)
return ret, max_val | Argmax and Max.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
dtype: a tf.dtype (for the output)
name: an optional string
Returns:
indices: a Tensor with given dtype
values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim) | Below is the the instruction that describes the task:
### Input:
Argmax and Max.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
dtype: a tf.dtype (for the output)
name: an optional string
Returns:
indices: a Tensor with given dtype
values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
### Response:
def top_1(x, reduced_dim, dtype=tf.int32, name=None):
"""Argmax and Max.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
dtype: a tf.dtype (for the output)
name: an optional string
Returns:
indices: a Tensor with given dtype
values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
"""
reduced_dim = convert_to_dimension(reduced_dim)
with tf.name_scope(name, default_name="top_1"):
max_val = reduce_max(x, reduced_dim=reduced_dim)
is_max = to_float(equal(x, max_val))
pos = mtf_range(x.mesh, reduced_dim, tf.float32)
ret = reduce_max(is_max * pos, reduced_dim=reduced_dim)
ret = cast(ret, dtype)
return ret, max_val |
def main():
"""Main"""
lic = (
'License :: OSI Approved :: GNU Affero '
'General Public License v3 or later (AGPLv3+)')
version = load_source("version", os.path.join("spamc", "version.py"))
opts = dict(
name="spamc",
version=version.__version__,
description="Python spamassassin spamc client library",
long_description=get_readme(),
keywords="spam spamc spamassassin",
author="Andrew Colin Kissa",
author_email="andrew@topdog.za.net",
url="https://github.com/akissa/spamc",
license="AGPLv3+",
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
tests_require=TESTS_REQUIRE,
install_requires=INSTALL_REQUIRES,
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
lic,
'Natural Language :: English',
'Operating System :: OS Independent'],)
setup(**opts) | Main | Below is the the instruction that describes the task:
### Input:
Main
### Response:
def main():
"""Main"""
lic = (
'License :: OSI Approved :: GNU Affero '
'General Public License v3 or later (AGPLv3+)')
version = load_source("version", os.path.join("spamc", "version.py"))
opts = dict(
name="spamc",
version=version.__version__,
description="Python spamassassin spamc client library",
long_description=get_readme(),
keywords="spam spamc spamassassin",
author="Andrew Colin Kissa",
author_email="andrew@topdog.za.net",
url="https://github.com/akissa/spamc",
license="AGPLv3+",
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
tests_require=TESTS_REQUIRE,
install_requires=INSTALL_REQUIRES,
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Developers',
lic,
'Natural Language :: English',
'Operating System :: OS Independent'],)
setup(**opts) |
def open_without_clobber(name, *args):
"""
Try to open the given file with the given mode; if that filename exists,
try "name.1", "name.2", etc. until we find an unused filename.
"""
fd = None
count = 1
orig_name = name
while fd is None:
try:
fd = os.open(name, os.O_CREAT | os.O_EXCL, 0o666)
except OSError as err:
if err.errno == errno.EEXIST:
name = "%s.%i" % (orig_name, count)
count += 1
else:
raise IOError(err.errno, err.strerror, err.filename)
fobj = open(name, *args)
if fd != fobj.fileno():
os.close(fd)
return fobj | Try to open the given file with the given mode; if that filename exists,
try "name.1", "name.2", etc. until we find an unused filename. | Below is the the instruction that describes the task:
### Input:
Try to open the given file with the given mode; if that filename exists,
try "name.1", "name.2", etc. until we find an unused filename.
### Response:
def open_without_clobber(name, *args):
"""
Try to open the given file with the given mode; if that filename exists,
try "name.1", "name.2", etc. until we find an unused filename.
"""
fd = None
count = 1
orig_name = name
while fd is None:
try:
fd = os.open(name, os.O_CREAT | os.O_EXCL, 0o666)
except OSError as err:
if err.errno == errno.EEXIST:
name = "%s.%i" % (orig_name, count)
count += 1
else:
raise IOError(err.errno, err.strerror, err.filename)
fobj = open(name, *args)
if fd != fobj.fileno():
os.close(fd)
return fobj |
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None | Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2(). | Below is the the instruction that describes the task:
### Input:
Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
### Response:
def set_seq1(self, a):
"""Set the first sequence to be compared.
The second sequence to be compared is not changed.
>>> s = SequenceMatcher(None, "abcd", "bcde")
>>> s.ratio()
0.75
>>> s.set_seq1("bcde")
>>> s.ratio()
1.0
>>>
SequenceMatcher computes and caches detailed information about the
second sequence, so if you want to compare one sequence S against
many sequences, use .set_seq2(S) once and call .set_seq1(x)
repeatedly for each of the other sequences.
See also set_seqs() and set_seq2().
"""
if a is self.a:
return
self.a = a
self.matching_blocks = self.opcodes = None |
def planetRadiusType(radius):
""" Returns the planet radiustype given the mass and using planetAssumptions['radiusType']
"""
if radius is np.nan:
return None
for radiusLimit, radiusType in planetAssumptions['radiusType']:
if radius < radiusLimit:
return radiusType | Returns the planet radiustype given the mass and using planetAssumptions['radiusType'] | Below is the the instruction that describes the task:
### Input:
Returns the planet radiustype given the mass and using planetAssumptions['radiusType']
### Response:
def planetRadiusType(radius):
""" Returns the planet radiustype given the mass and using planetAssumptions['radiusType']
"""
if radius is np.nan:
return None
for radiusLimit, radiusType in planetAssumptions['radiusType']:
if radius < radiusLimit:
return radiusType |
def _sort_lines(self):
"""Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server.
"""
self._valid_lines = sorted(
self._valid_lines,
key=lambda line: line.accept_date,
) | Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server. | Below is the the instruction that describes the task:
### Input:
Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server.
### Response:
def _sort_lines(self):
"""Haproxy writes its logs after having gathered all information
related to each specific connection. A simple request can be
really quick but others can be really slow, thus even if one connection
is logged later, it could have been accepted before others that are
already processed and logged.
This method sorts all valid log lines by their acceptance date,
providing the real order in which connections where made to the server.
"""
self._valid_lines = sorted(
self._valid_lines,
key=lambda line: line.accept_date,
) |
def _generateMetricsSubstitutions(options, tokenReplacements):
"""Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING
"""
# -----------------------------------------------------------------------
#
options['loggedMetrics'] = [".*"]
# -----------------------------------------------------------------------
# Generate the required metrics
metricList, optimizeMetricLabel = _generateMetricSpecs(options)
metricListString = ",\n".join(metricList)
metricListString = _indentLines(metricListString, 2, indentFirstLine=False)
permOptimizeSettingStr = 'minimize = "%s"' % optimizeMetricLabel
# -----------------------------------------------------------------------
# Specify which metrics should be logged
loggedMetricsListAsStr = "[%s]" % (", ".join(["'%s'"% ptrn
for ptrn in options['loggedMetrics']]))
tokenReplacements['\$LOGGED_METRICS'] \
= loggedMetricsListAsStr
tokenReplacements['\$METRICS'] = metricListString
tokenReplacements['\$PERM_OPTIMIZE_SETTING'] \
= permOptimizeSettingStr | Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING | Below is the the instruction that describes the task:
### Input:
Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING
### Response:
def _generateMetricsSubstitutions(options, tokenReplacements):
"""Generate the token substitution for metrics related fields.
This includes:
\$METRICS
\$LOGGED_METRICS
\$PERM_OPTIMIZE_SETTING
"""
# -----------------------------------------------------------------------
#
options['loggedMetrics'] = [".*"]
# -----------------------------------------------------------------------
# Generate the required metrics
metricList, optimizeMetricLabel = _generateMetricSpecs(options)
metricListString = ",\n".join(metricList)
metricListString = _indentLines(metricListString, 2, indentFirstLine=False)
permOptimizeSettingStr = 'minimize = "%s"' % optimizeMetricLabel
# -----------------------------------------------------------------------
# Specify which metrics should be logged
loggedMetricsListAsStr = "[%s]" % (", ".join(["'%s'"% ptrn
for ptrn in options['loggedMetrics']]))
tokenReplacements['\$LOGGED_METRICS'] \
= loggedMetricsListAsStr
tokenReplacements['\$METRICS'] = metricListString
tokenReplacements['\$PERM_OPTIMIZE_SETTING'] \
= permOptimizeSettingStr |
def main(api_key, markup):
'''Doing the regex parsing and running the create_html function.'''
match = GIPHY.search(markup)
attrs = None
if match:
attrs = dict(
[(key, value.strip())
for (key, value) in match.groupdict().items() if value])
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
return create_html(api_key, attrs) | Doing the regex parsing and running the create_html function. | Below is the the instruction that describes the task:
### Input:
Doing the regex parsing and running the create_html function.
### Response:
def main(api_key, markup):
'''Doing the regex parsing and running the create_html function.'''
match = GIPHY.search(markup)
attrs = None
if match:
attrs = dict(
[(key, value.strip())
for (key, value) in match.groupdict().items() if value])
else:
raise ValueError('Error processing input. '
'Expected syntax: {}'.format(SYNTAX))
return create_html(api_key, attrs) |
def new_file(self, vd, length, isoname, parent, seqnum, rock_ridge, rr_name,
xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, int, bytes, DirectoryRecord, int, str, bytes, bool, int) -> None
'''
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, isoname, parent, seqnum, False, length, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', False, False, False,
file_mode) | Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
### Response:
def new_file(self, vd, length, isoname, parent, seqnum, rock_ridge, rr_name,
xa, file_mode):
# type: (headervd.PrimaryOrSupplementaryVD, int, bytes, DirectoryRecord, int, str, bytes, bool, int) -> None
'''
Create a new file Directory Record.
Parameters:
vd - The Volume Descriptor this record is part of.
length - The length of the data.
isoname - The name for this directory record.
parent - The parent of this directory record.
seqnum - The sequence number for this directory record.
rock_ridge - Whether to make this a Rock Ridge directory record.
rr_name - The Rock Ridge name for this directory record.
xa - True if this is an Extended Attribute record.
file_mode - The POSIX file mode for this entry.
Returns:
Nothing.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record already initialized')
self._new(vd, isoname, parent, seqnum, False, length, xa)
if rock_ridge:
self._rr_new(rock_ridge, rr_name, b'', False, False, False,
file_mode) |
def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None):
"""Find vdwradii.dat and add special entries for lipids.
See :data:`gromacs.setup.vdw_lipid_resnames` for lipid
resnames. Add more if necessary.
"""
vdwradii_dat = os.path.join(outdir, "vdwradii.dat")
if libdir is not None:
filename = os.path.join(libdir, 'vdwradii.dat') # canonical name
if not os.path.exists(filename):
msg = 'No VDW database file found in {filename!r}.'.format(**vars())
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
else:
try:
filename = os.path.join(os.environ['GMXLIB'], 'vdwradii.dat')
except KeyError:
try:
filename = os.path.join(os.environ['GMXDATA'], 'top', 'vdwradii.dat')
except KeyError:
msg = "Cannot find vdwradii.dat. Set GMXLIB (point to 'top') or GMXDATA ('share/gromacs')."
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
if not os.path.exists(filename):
msg = "Cannot find {filename!r}; something is wrong with the Gromacs installation.".format(**vars())
logger.exception(msg, errno.ENOENT)
raise OSError(msg)
# make sure to catch 3 and 4 letter resnames
patterns = vdw_lipid_resnames + list({x[:3] for x in vdw_lipid_resnames})
# TODO: should do a tempfile...
with open(vdwradii_dat, 'w') as outfile:
# write lipid stuff before general
outfile.write('; Special larger vdw radii for solvating lipid membranes\n')
for resname in patterns:
for atom,radius in vdw_lipid_atom_radii.items():
outfile.write('{resname:4!s} {atom:<5!s} {radius:5.3f}\n'.format(**vars()))
with open(filename, 'r') as infile:
for line in infile:
outfile.write(line)
logger.debug('Created lipid vdW radii file {vdwradii_dat!r}.'.format(**vars()))
return realpath(vdwradii_dat) | Find vdwradii.dat and add special entries for lipids.
See :data:`gromacs.setup.vdw_lipid_resnames` for lipid
resnames. Add more if necessary. | Below is the the instruction that describes the task:
### Input:
Find vdwradii.dat and add special entries for lipids.
See :data:`gromacs.setup.vdw_lipid_resnames` for lipid
resnames. Add more if necessary.
### Response:
def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None):
"""Find vdwradii.dat and add special entries for lipids.
See :data:`gromacs.setup.vdw_lipid_resnames` for lipid
resnames. Add more if necessary.
"""
vdwradii_dat = os.path.join(outdir, "vdwradii.dat")
if libdir is not None:
filename = os.path.join(libdir, 'vdwradii.dat') # canonical name
if not os.path.exists(filename):
msg = 'No VDW database file found in {filename!r}.'.format(**vars())
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
else:
try:
filename = os.path.join(os.environ['GMXLIB'], 'vdwradii.dat')
except KeyError:
try:
filename = os.path.join(os.environ['GMXDATA'], 'top', 'vdwradii.dat')
except KeyError:
msg = "Cannot find vdwradii.dat. Set GMXLIB (point to 'top') or GMXDATA ('share/gromacs')."
logger.exception(msg)
raise OSError(msg, errno.ENOENT)
if not os.path.exists(filename):
msg = "Cannot find {filename!r}; something is wrong with the Gromacs installation.".format(**vars())
logger.exception(msg, errno.ENOENT)
raise OSError(msg)
# make sure to catch 3 and 4 letter resnames
patterns = vdw_lipid_resnames + list({x[:3] for x in vdw_lipid_resnames})
# TODO: should do a tempfile...
with open(vdwradii_dat, 'w') as outfile:
# write lipid stuff before general
outfile.write('; Special larger vdw radii for solvating lipid membranes\n')
for resname in patterns:
for atom,radius in vdw_lipid_atom_radii.items():
outfile.write('{resname:4!s} {atom:<5!s} {radius:5.3f}\n'.format(**vars()))
with open(filename, 'r') as infile:
for line in infile:
outfile.write(line)
logger.debug('Created lipid vdW radii file {vdwradii_dat!r}.'.format(**vars()))
return realpath(vdwradii_dat) |
def adjustTitleFont(self):
"""
Adjusts the font used for the title based on the current with and \
display name.
"""
left, top, right, bottom = self.contentsMargins()
r = self.roundingRadius()
# include text padding
left += 5 + r / 2
top += 5 + r / 2
right += 5 + r / 2
bottom += 5 + r / 2
r = self.rect()
rect_l = r.left() + left
rect_r = r.right() - right
rect_t = r.top() + top
rect_b = r.bottom() - bottom
# ensure we have a valid rect
rect = QRect(rect_l, rect_t, rect_r - rect_l, rect_b - rect_t)
if rect.width() < 10:
return
font = XFont(QApplication.font())
font.adaptSize(self.displayName(), rect, wordWrap=self.wordWrap())
self._titleFont = font | Adjusts the font used for the title based on the current with and \
display name. | Below is the the instruction that describes the task:
### Input:
Adjusts the font used for the title based on the current with and \
display name.
### Response:
def adjustTitleFont(self):
"""
Adjusts the font used for the title based on the current with and \
display name.
"""
left, top, right, bottom = self.contentsMargins()
r = self.roundingRadius()
# include text padding
left += 5 + r / 2
top += 5 + r / 2
right += 5 + r / 2
bottom += 5 + r / 2
r = self.rect()
rect_l = r.left() + left
rect_r = r.right() - right
rect_t = r.top() + top
rect_b = r.bottom() - bottom
# ensure we have a valid rect
rect = QRect(rect_l, rect_t, rect_r - rect_l, rect_b - rect_t)
if rect.width() < 10:
return
font = XFont(QApplication.font())
font.adaptSize(self.displayName(), rect, wordWrap=self.wordWrap())
self._titleFont = font |
def freeze(self):
"""
Freeze (disable) all settings
"""
for fields in zip(self.xsll, self.xsul, self.xslr, self.xsur,
self.ys, self.nx, self.ny):
for field in fields:
field.disable()
self.nquad.disable()
self.xbin.disable()
self.ybin.disable()
self.sbutt.disable()
self.frozen = True | Freeze (disable) all settings | Below is the the instruction that describes the task:
### Input:
Freeze (disable) all settings
### Response:
def freeze(self):
"""
Freeze (disable) all settings
"""
for fields in zip(self.xsll, self.xsul, self.xslr, self.xsur,
self.ys, self.nx, self.ny):
for field in fields:
field.disable()
self.nquad.disable()
self.xbin.disable()
self.ybin.disable()
self.sbutt.disable()
self.frozen = True |
async def expand(self, request: Request, layer: BaseLayer):
"""
Expand a layer into a list of layers including the pauses.
"""
if isinstance(layer, lyr.RawText):
t = self.reading_time(layer.text)
yield layer
yield lyr.Sleep(t)
elif isinstance(layer, lyr.MultiText):
texts = await render(layer.text, request, True)
for text in texts:
t = self.reading_time(text)
yield lyr.RawText(text)
yield lyr.Sleep(t)
elif isinstance(layer, lyr.Text):
text = await render(layer.text, request)
t = self.reading_time(text)
yield lyr.RawText(text)
yield lyr.Sleep(t)
else:
yield layer | Expand a layer into a list of layers including the pauses. | Below is the the instruction that describes the task:
### Input:
Expand a layer into a list of layers including the pauses.
### Response:
async def expand(self, request: Request, layer: BaseLayer):
"""
Expand a layer into a list of layers including the pauses.
"""
if isinstance(layer, lyr.RawText):
t = self.reading_time(layer.text)
yield layer
yield lyr.Sleep(t)
elif isinstance(layer, lyr.MultiText):
texts = await render(layer.text, request, True)
for text in texts:
t = self.reading_time(text)
yield lyr.RawText(text)
yield lyr.Sleep(t)
elif isinstance(layer, lyr.Text):
text = await render(layer.text, request)
t = self.reading_time(text)
yield lyr.RawText(text)
yield lyr.Sleep(t)
else:
yield layer |
def add_cloud_bt_task(self, source_url, save_path=None):
'''从服务器上获取种子, 并建立离线下载任务
source_url - BT 种子在服务器上的绝对路径, 或者是磁链的地址.
save_path - 要保存到的路径, 如果为None, 就会弹出目录选择的对话框
'''
def check_vcode(info, error=None):
if error or not info:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
return
if info.get('error_code', -1) != 0:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
if 'task_id' in info or info['error_code'] == 0:
self.reload()
elif info['error_code'] == -19:
vcode_dialog = VCodeDialog(self, self.app, info)
response = vcode_dialog.run()
vcode_input = vcode_dialog.get_vcode()
vcode_dialog.destroy()
if response != Gtk.ResponseType.OK:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path,
selected_idx, file_sha1, info['vcode'],
vcode_input, callback=check_vcode)
else:
self.app.toast(_('Error: {0}').format(info['error_msg']))
self.check_first()
if not save_path:
folder_browser = FolderBrowserDialog(self, self.app, _('Save to..'))
response = folder_browser.run()
save_path = folder_browser.get_path()
folder_browser.destroy()
if response != Gtk.ResponseType.OK:
return
if not save_path:
return
bt_browser = BTBrowserDialog(self, self.app, _('Choose..'),
source_url, save_path)
response = bt_browser.run()
selected_idx, file_sha1 = bt_browser.get_selected()
bt_browser.destroy()
if response != Gtk.ResponseType.OK or not selected_idx:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path, selected_idx,
file_sha1, callback=check_vcode)
self.app.blink_page(self.app.cloud_page) | 从服务器上获取种子, 并建立离线下载任务
source_url - BT 种子在服务器上的绝对路径, 或者是磁链的地址.
save_path - 要保存到的路径, 如果为None, 就会弹出目录选择的对话框 | Below is the the instruction that describes the task:
### Input:
从服务器上获取种子, 并建立离线下载任务
source_url - BT 种子在服务器上的绝对路径, 或者是磁链的地址.
save_path - 要保存到的路径, 如果为None, 就会弹出目录选择的对话框
### Response:
def add_cloud_bt_task(self, source_url, save_path=None):
'''从服务器上获取种子, 并建立离线下载任务
source_url - BT 种子在服务器上的绝对路径, 或者是磁链的地址.
save_path - 要保存到的路径, 如果为None, 就会弹出目录选择的对话框
'''
def check_vcode(info, error=None):
if error or not info:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
return
if info.get('error_code', -1) != 0:
logger.error('CloudPage.check_vcode: %s, %s' % (info, error))
if 'task_id' in info or info['error_code'] == 0:
self.reload()
elif info['error_code'] == -19:
vcode_dialog = VCodeDialog(self, self.app, info)
response = vcode_dialog.run()
vcode_input = vcode_dialog.get_vcode()
vcode_dialog.destroy()
if response != Gtk.ResponseType.OK:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path,
selected_idx, file_sha1, info['vcode'],
vcode_input, callback=check_vcode)
else:
self.app.toast(_('Error: {0}').format(info['error_msg']))
self.check_first()
if not save_path:
folder_browser = FolderBrowserDialog(self, self.app, _('Save to..'))
response = folder_browser.run()
save_path = folder_browser.get_path()
folder_browser.destroy()
if response != Gtk.ResponseType.OK:
return
if not save_path:
return
bt_browser = BTBrowserDialog(self, self.app, _('Choose..'),
source_url, save_path)
response = bt_browser.run()
selected_idx, file_sha1 = bt_browser.get_selected()
bt_browser.destroy()
if response != Gtk.ResponseType.OK or not selected_idx:
return
gutil.async_call(pcs.cloud_add_bt_task, self.app.cookie,
self.app.tokens, source_url, save_path, selected_idx,
file_sha1, callback=check_vcode)
self.app.blink_page(self.app.cloud_page) |
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} --no-environ -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError as e:
return None
for line in output.decode().split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None | return the path to an installed R package | Below is the the instruction that describes the task:
### Input:
return the path to an installed R package
### Response:
def R_package_path(package):
"""
return the path to an installed R package
"""
local_sitelib = R_sitelib()
rscript = Rscript_cmd()
cmd = """{rscript} --no-environ -e '.libPaths(c("{local_sitelib}")); find.package("{package}")'"""
try:
output = subprocess.check_output(cmd.format(**locals()), shell=True)
except subprocess.CalledProcessError as e:
return None
for line in output.decode().split("\n"):
if "[1]" not in line:
continue
dirname = line.split("[1]")[1].replace("\"", "").strip()
if os.path.exists(dirname):
return dirname
return None |
def start(self):
"""Start performing the action."""
self.status = 'pending'
self.thing.action_notify(self)
self.perform_action()
self.finish() | Start performing the action. | Below is the the instruction that describes the task:
### Input:
Start performing the action.
### Response:
def start(self):
"""Start performing the action."""
self.status = 'pending'
self.thing.action_notify(self)
self.perform_action()
self.finish() |
def read_init(key):
"""Parse the package __init__ file to find a variable so that it's not
in multiple places.
"""
filename = os.path.join("traces", "__init__.py")
result = None
with open(filename) as stream:
for line in stream:
if key in line:
result = line.split('=')[-1].strip().replace("'", "")
# throw error if version isn't in __init__ file
if result is None:
raise ValueError('must define %s in %s' % (key, filename))
return result | Parse the package __init__ file to find a variable so that it's not
in multiple places. | Below is the the instruction that describes the task:
### Input:
Parse the package __init__ file to find a variable so that it's not
in multiple places.
### Response:
def read_init(key):
"""Parse the package __init__ file to find a variable so that it's not
in multiple places.
"""
filename = os.path.join("traces", "__init__.py")
result = None
with open(filename) as stream:
for line in stream:
if key in line:
result = line.split('=')[-1].strip().replace("'", "")
# throw error if version isn't in __init__ file
if result is None:
raise ValueError('must define %s in %s' % (key, filename))
return result |
def get_open_port() -> int:
"""
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(("", 0))
free_socket.listen(1)
port = free_socket.getsockname()[1]
free_socket.close()
return port | Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT | Below is the the instruction that describes the task:
### Input:
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
### Response:
def get_open_port() -> int:
"""
Gets a PORT that will (probably) be available on the machine.
It is possible that in-between the time in which the open PORT of found and when it is used, another process may
bind to it instead.
:return: the (probably) available PORT
"""
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind(("", 0))
free_socket.listen(1)
port = free_socket.getsockname()[1]
free_socket.close()
return port |
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, 'replace').replace('\\', '/')
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'):
s = 'file:///' + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),')
anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((url.scheme, url.encode_netloc(),
path, qs, anchor))) | r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string. | Below is the the instruction that describes the task:
### Input:
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
### Response:
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, 'replace').replace('\\', '/')
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'):
s = 'file:///' + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),')
anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((url.scheme, url.encode_netloc(),
path, qs, anchor))) |
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str('HTTP_'+k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ | Build the execution environment. | Below is the the instruction that describes the task:
### Input:
Build the execution environment.
### Response:
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).items():
environ[str('HTTP_'+k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ |
def percentile(a, q):
"""
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
"""
if not a:
return None
if isinstance(q, (float, int)):
qq = [q]
elif isinstance(q, (tuple, list)):
qq = q
else:
raise ValueError("Quantile type {} not understood".format(type(q)))
if isinstance(a, (float, int)):
a = [a]
for i in range(len(qq)):
if qq[i] < 0. or qq[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
qq[i] /= 100.
a = sorted(flatten(a))
r = []
for q in qq:
k = (len(a) - 1) * q
f = math.floor(k)
c = math.ceil(k)
if f == c:
r.append(float(a[int(k)]))
continue
d0 = a[int(f)] * (c - k)
d1 = a[int(c)] * (k - f)
r.append(float(d0 + d1))
if len(r) == 1:
return r[0]
return r | Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements. | Below is the the instruction that describes the task:
### Input:
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
### Response:
def percentile(a, q):
"""
Compute the qth percentile of the data along the specified axis.
Simpler version than the numpy version that always flattens input arrays.
Examples
--------
>>> a = [[10, 7, 4], [3, 2, 1]]
>>> percentile(a, 20)
2.0
>>> percentile(a, 50)
3.5
>>> percentile(a, [20, 80])
[2.0, 7.0]
>>> a = list(range(40))
>>> percentile(a, 25)
9.75
:param a: Input array or object that can be converted to an array.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: the qth percentile(s) of the array elements.
"""
if not a:
return None
if isinstance(q, (float, int)):
qq = [q]
elif isinstance(q, (tuple, list)):
qq = q
else:
raise ValueError("Quantile type {} not understood".format(type(q)))
if isinstance(a, (float, int)):
a = [a]
for i in range(len(qq)):
if qq[i] < 0. or qq[i] > 100.:
raise ValueError("Percentiles must be in the range [0,100]")
qq[i] /= 100.
a = sorted(flatten(a))
r = []
for q in qq:
k = (len(a) - 1) * q
f = math.floor(k)
c = math.ceil(k)
if f == c:
r.append(float(a[int(k)]))
continue
d0 = a[int(f)] * (c - k)
d1 = a[int(c)] * (k - f)
r.append(float(d0 + d1))
if len(r) == 1:
return r[0]
return r |
def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
with pd.option_context('mode.chained_assignment', None):
mappings['sid'] = mappings.index
mappings.reset_index(drop=True, inplace=True)
# take the most recent sid->exchange mapping based on end date
asset_exchange = df[
['exchange', 'end_date']
].sort_values('end_date').groupby(level=0)['exchange'].nth(-1)
_check_symbol_mappings(mappings, exchanges, asset_exchange)
return (
df.groupby(level=0).apply(_check_asset_group),
mappings,
) | Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date. | Below is the the instruction that describes the task:
### Input:
Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
### Response:
def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
exchanges : pd.DataFrame
The exchanges table.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
with pd.option_context('mode.chained_assignment', None):
mappings['sid'] = mappings.index
mappings.reset_index(drop=True, inplace=True)
# take the most recent sid->exchange mapping based on end date
asset_exchange = df[
['exchange', 'end_date']
].sort_values('end_date').groupby(level=0)['exchange'].nth(-1)
_check_symbol_mappings(mappings, exchanges, asset_exchange)
return (
df.groupby(level=0).apply(_check_asset_group),
mappings,
) |
def from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value)) | Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode. | Below is the the instruction that describes the task:
### Input:
Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode.
### Response:
def from_bytes(value):
"""Converts bytes to a string value, if necessary.
Args:
value (Union[str, bytes]): The value to be converted.
Returns:
str: The original value converted to unicode (if bytes) or as passed in
if it started out as unicode.
Raises:
ValueError: If the value could not be converted to unicode.
"""
result = (value.decode('utf-8')
if isinstance(value, six.binary_type) else value)
if isinstance(result, six.text_type):
return result
else:
raise ValueError(
'{0!r} could not be converted to unicode'.format(value)) |
def show_linkinfo_output_show_link_info_linkinfo_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_version = ET.SubElement(show_link_info, "linkinfo-version")
linkinfo_version.text = kwargs.pop('linkinfo_version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_linkinfo_output_show_link_info_linkinfo_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_linkinfo = ET.Element("show_linkinfo")
config = show_linkinfo
output = ET.SubElement(show_linkinfo, "output")
show_link_info = ET.SubElement(output, "show-link-info")
linkinfo_rbridgeid_key = ET.SubElement(show_link_info, "linkinfo-rbridgeid")
linkinfo_rbridgeid_key.text = kwargs.pop('linkinfo_rbridgeid')
linkinfo_version = ET.SubElement(show_link_info, "linkinfo-version")
linkinfo_version.text = kwargs.pop('linkinfo_version')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def maxind_numba(block):
""" filter for indels """
## remove terminal edges
inds = 0
for row in xrange(block.shape[0]):
where = np.where(block[row] != 45)[0]
if len(where) == 0:
obs = 100
else:
left = np.min(where)
right = np.max(where)
obs = np.sum(block[row, left:right] == 45)
if obs > inds:
inds = obs
return inds | filter for indels | Below is the the instruction that describes the task:
### Input:
filter for indels
### Response:
def maxind_numba(block):
""" filter for indels """
## remove terminal edges
inds = 0
for row in xrange(block.shape[0]):
where = np.where(block[row] != 45)[0]
if len(where) == 0:
obs = 100
else:
left = np.min(where)
right = np.max(where)
obs = np.sum(block[row, left:right] == 45)
if obs > inds:
inds = obs
return inds |
def label_from_instance(self, obj):
"""
Creates labels which represent the tree level of each node when
generating option labels.
"""
return '%s %s' % (self.level_indicator * getattr(obj, obj._mptt_meta.level_attr), obj) | Creates labels which represent the tree level of each node when
generating option labels. | Below is the the instruction that describes the task:
### Input:
Creates labels which represent the tree level of each node when
generating option labels.
### Response:
def label_from_instance(self, obj):
"""
Creates labels which represent the tree level of each node when
generating option labels.
"""
return '%s %s' % (self.level_indicator * getattr(obj, obj._mptt_meta.level_attr), obj) |
def _toVec(shape, val):
'''
takes a single value and creates a vecotor / matrix with that value filled
in it
'''
mat = np.empty(shape)
mat.fill(val)
return mat | takes a single value and creates a vecotor / matrix with that value filled
in it | Below is the the instruction that describes the task:
### Input:
takes a single value and creates a vecotor / matrix with that value filled
in it
### Response:
def _toVec(shape, val):
'''
takes a single value and creates a vecotor / matrix with that value filled
in it
'''
mat = np.empty(shape)
mat.fill(val)
return mat |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'exclude') and self.exclude is not None:
_dict['exclude'] = self.exclude
if hasattr(self, 'include') and self.include is not None:
_dict['include'] = self.include
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'exclude') and self.exclude is not None:
_dict['exclude'] = self.exclude
if hasattr(self, 'include') and self.include is not None:
_dict['include'] = self.include
return _dict |
def snake_to_camel(snake_str):
"""
:param snake_str: string
:return: string converted from a snake_case to a CamelCase
"""
components = snake_str.split('_')
return ''.join(x.title() for x in components) | :param snake_str: string
:return: string converted from a snake_case to a CamelCase | Below is the the instruction that describes the task:
### Input:
:param snake_str: string
:return: string converted from a snake_case to a CamelCase
### Response:
def snake_to_camel(snake_str):
"""
:param snake_str: string
:return: string converted from a snake_case to a CamelCase
"""
components = snake_str.split('_')
return ''.join(x.title() for x in components) |
def exec_func_src3(func, globals_, sentinal=None, verbose=False,
start=None, stop=None):
"""
execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw
"""
import utool as ut
sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True)
if sentinal is not None:
sourcecode = ut.replace_between_tags(sourcecode, '', sentinal)
if start is not None or stop is not None:
sourcecode = '\n'.join(sourcecode.splitlines()[slice(start, stop)])
if verbose:
print(ut.color_text(sourcecode, 'python'))
six.exec_(sourcecode, globals_) | execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw | Below is the the instruction that describes the task:
### Input:
execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw
### Response:
def exec_func_src3(func, globals_, sentinal=None, verbose=False,
start=None, stop=None):
"""
execs a func and returns requested local vars.
Does not modify globals unless update=True (or in IPython)
SeeAlso:
ut.execstr_funckw
"""
import utool as ut
sourcecode = ut.get_func_sourcecode(func, stripdef=True, stripret=True)
if sentinal is not None:
sourcecode = ut.replace_between_tags(sourcecode, '', sentinal)
if start is not None or stop is not None:
sourcecode = '\n'.join(sourcecode.splitlines()[slice(start, stop)])
if verbose:
print(ut.color_text(sourcecode, 'python'))
six.exec_(sourcecode, globals_) |
def var_contains(var, value):
'''
Verify if variable contains a value in make.conf
Return True if value is set for var
CLI Example:
.. code-block:: bash
salt '*' makeconf.var_contains 'LINGUAS' 'en'
'''
setval = get_var(var)
# Remove any escaping that was needed to past through salt
value = value.replace('\\', '')
if setval is None:
return False
return value in setval.split() | Verify if variable contains a value in make.conf
Return True if value is set for var
CLI Example:
.. code-block:: bash
salt '*' makeconf.var_contains 'LINGUAS' 'en' | Below is the the instruction that describes the task:
### Input:
Verify if variable contains a value in make.conf
Return True if value is set for var
CLI Example:
.. code-block:: bash
salt '*' makeconf.var_contains 'LINGUAS' 'en'
### Response:
def var_contains(var, value):
'''
Verify if variable contains a value in make.conf
Return True if value is set for var
CLI Example:
.. code-block:: bash
salt '*' makeconf.var_contains 'LINGUAS' 'en'
'''
setval = get_var(var)
# Remove any escaping that was needed to past through salt
value = value.replace('\\', '')
if setval is None:
return False
return value in setval.split() |
def update_event_hub(self, hub_name, hub=None):
'''
Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + '?api-version=2014-01'
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers.append(('If-Match', '*'))
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) | Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub. | Below is the the instruction that describes the task:
### Input:
Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
### Response:
def update_event_hub(self, hub_name, hub=None):
'''
Updates an Event Hub.
hub_name:
Name of event hub.
hub:
Optional. Event hub properties. Instance of EventHub class.
hub.message_retention_in_days:
Number of days to retain the events for this Event Hub.
'''
_validate_not_none('hub_name', hub_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(hub_name) + '?api-version=2014-01'
request.body = _get_request_body(_convert_event_hub_to_xml(hub))
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers.append(('If-Match', '*'))
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_event_hub(response) |
def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node')) | Request to move up the hierarchy to highest-weight parent | Below is the the instruction that describes the task:
### Input:
Request to move up the hierarchy to highest-weight parent
### Response:
def OnUpView(self, event):
"""Request to move up the hierarchy to highest-weight parent"""
node = self.activated_node
parents = []
selected_parent = None
if node:
if hasattr( self.adapter, 'best_parent' ):
selected_parent = self.adapter.best_parent( node )
else:
parents = self.adapter.parents( node )
if parents:
if not selected_parent:
parents.sort(key = lambda a: self.adapter.value(node, a))
selected_parent = parents[-1]
class event:
node = selected_parent
self.OnNodeActivated(event)
else:
self.SetStatusText(_('No parents for the currently selected node: %(node_name)s')
% dict(node_name=self.adapter.label(node)))
else:
self.SetStatusText(_('No currently selected node')) |
def operator_same_class(method):
"""
Intended to wrap operator methods, this decorator ensures the `other`
parameter is of the same type as the `self` parameter.
:param method: The method being decorated.
:return: The wrapper to replace the method with.
"""
def wrapper(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'unsupported operand types: \'{0}\' and \'{1}\''.format(
self.__class__.__name__, other.__class__.__name__))
return method(self, other)
return wrapper | Intended to wrap operator methods, this decorator ensures the `other`
parameter is of the same type as the `self` parameter.
:param method: The method being decorated.
:return: The wrapper to replace the method with. | Below is the the instruction that describes the task:
### Input:
Intended to wrap operator methods, this decorator ensures the `other`
parameter is of the same type as the `self` parameter.
:param method: The method being decorated.
:return: The wrapper to replace the method with.
### Response:
def operator_same_class(method):
"""
Intended to wrap operator methods, this decorator ensures the `other`
parameter is of the same type as the `self` parameter.
:param method: The method being decorated.
:return: The wrapper to replace the method with.
"""
def wrapper(self, other):
if not isinstance(other, self.__class__):
raise TypeError(
'unsupported operand types: \'{0}\' and \'{1}\''.format(
self.__class__.__name__, other.__class__.__name__))
return method(self, other)
return wrapper |
def validate(self):
""" validate: Makes sure node is valid
Args: None
Returns: boolean indicating if node is valid
"""
from .files import File
assert self.source_id is not None, "Assumption Failed: Node must have a source_id"
assert isinstance(self.title, str), "Assumption Failed: Node title is not a string"
assert isinstance(self.description, str) or self.description is None, "Assumption Failed: Node description is not a string"
assert isinstance(self.children, list), "Assumption Failed: Node children is not a list"
for f in self.files:
assert isinstance(f, File), "Assumption Failed: files must be file class"
f.validate()
source_ids = [c.source_id for c in self.children]
duplicates = set([x for x in source_ids if source_ids.count(x) > 1])
assert len(duplicates) == 0, "Assumption Failed: Node must have unique source id among siblings ({} appears multiple times)".format(duplicates)
return True | validate: Makes sure node is valid
Args: None
Returns: boolean indicating if node is valid | Below is the the instruction that describes the task:
### Input:
validate: Makes sure node is valid
Args: None
Returns: boolean indicating if node is valid
### Response:
def validate(self):
""" validate: Makes sure node is valid
Args: None
Returns: boolean indicating if node is valid
"""
from .files import File
assert self.source_id is not None, "Assumption Failed: Node must have a source_id"
assert isinstance(self.title, str), "Assumption Failed: Node title is not a string"
assert isinstance(self.description, str) or self.description is None, "Assumption Failed: Node description is not a string"
assert isinstance(self.children, list), "Assumption Failed: Node children is not a list"
for f in self.files:
assert isinstance(f, File), "Assumption Failed: files must be file class"
f.validate()
source_ids = [c.source_id for c in self.children]
duplicates = set([x for x in source_ids if source_ids.count(x) > 1])
assert len(duplicates) == 0, "Assumption Failed: Node must have unique source id among siblings ({} appears multiple times)".format(duplicates)
return True |
def MakeFITS(model, fitsfile=None):
'''
Generate a FITS file for a given :py:mod:`everest` run.
:param model: An :py:mod:`everest` model instance
'''
# Get the fits file name
if fitsfile is None:
outfile = os.path.join(model.dir, model._mission.FITSFile(
model.ID, model.season, model.cadence))
else:
outfile = os.path.join(model.dir, fitsfile)
if os.path.exists(outfile) and not model.clobber:
return
elif os.path.exists(outfile):
os.remove(outfile)
log.info('Generating FITS file...')
# Create the HDUs
primary = PrimaryHDU(model)
lightcurve = LightcurveHDU(model)
pixels = PixelsHDU(model)
aperture = ApertureHDU(model)
images = ImagesHDU(model)
hires = HiResHDU(model)
# Combine to get the HDUList
hdulist = pyfits.HDUList(
[primary, lightcurve, pixels, aperture, images, hires])
# Output to the FITS file
hdulist.writeto(outfile)
return | Generate a FITS file for a given :py:mod:`everest` run.
:param model: An :py:mod:`everest` model instance | Below is the the instruction that describes the task:
### Input:
Generate a FITS file for a given :py:mod:`everest` run.
:param model: An :py:mod:`everest` model instance
### Response:
def MakeFITS(model, fitsfile=None):
'''
Generate a FITS file for a given :py:mod:`everest` run.
:param model: An :py:mod:`everest` model instance
'''
# Get the fits file name
if fitsfile is None:
outfile = os.path.join(model.dir, model._mission.FITSFile(
model.ID, model.season, model.cadence))
else:
outfile = os.path.join(model.dir, fitsfile)
if os.path.exists(outfile) and not model.clobber:
return
elif os.path.exists(outfile):
os.remove(outfile)
log.info('Generating FITS file...')
# Create the HDUs
primary = PrimaryHDU(model)
lightcurve = LightcurveHDU(model)
pixels = PixelsHDU(model)
aperture = ApertureHDU(model)
images = ImagesHDU(model)
hires = HiResHDU(model)
# Combine to get the HDUList
hdulist = pyfits.HDUList(
[primary, lightcurve, pixels, aperture, images, hires])
# Output to the FITS file
hdulist.writeto(outfile)
return |
def merge_vcf_files(infiles, ref_seqs, outfile, threads=1):
'''infiles: list of input VCF file to be merge.
outfile: name of output VCF file.
threads: number of input files to read in parallel'''
vars_dict = vcf_file_read.vcf_files_to_dict_of_vars(infiles, ref_seqs, threads=threads)
_dict_of_vars_to_vcf_file(vars_dict, outfile) | infiles: list of input VCF file to be merge.
outfile: name of output VCF file.
threads: number of input files to read in parallel | Below is the the instruction that describes the task:
### Input:
infiles: list of input VCF file to be merge.
outfile: name of output VCF file.
threads: number of input files to read in parallel
### Response:
def merge_vcf_files(infiles, ref_seqs, outfile, threads=1):
'''infiles: list of input VCF file to be merge.
outfile: name of output VCF file.
threads: number of input files to read in parallel'''
vars_dict = vcf_file_read.vcf_files_to_dict_of_vars(infiles, ref_seqs, threads=threads)
_dict_of_vars_to_vcf_file(vars_dict, outfile) |
def from_file(self, fname):
"""read in a file and compute digest"""
f = open(fname, "rb")
data = f.read()
self.update(data)
f.close() | read in a file and compute digest | Below is the the instruction that describes the task:
### Input:
read in a file and compute digest
### Response:
def from_file(self, fname):
"""read in a file and compute digest"""
f = open(fname, "rb")
data = f.read()
self.update(data)
f.close() |
def series_resistors(target,
pore_area='pore.area',
throat_area='throat.area',
pore_thermal_conductivity='pore.thermal_conductivity',
throat_thermal_conductivity='throat.thermal_conductivity',
conduit_lengths='throat.conduit_lengths',
conduit_shape_factors='throat.poisson_shape_factors'):
r"""
Calculate the thermal conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing thermal conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
"""
return generic_conductance(target=target, transport_type='diffusion',
pore_area=pore_area,
throat_area=throat_area,
pore_diffusivity=pore_thermal_conductivity,
throat_diffusivity=throat_thermal_conductivity,
conduit_lengths=conduit_lengths,
conduit_shape_factors=conduit_shape_factors) | r"""
Calculate the thermal conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing thermal conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument. | Below is the the instruction that describes the task:
### Input:
r"""
Calculate the thermal conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing thermal conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
### Response:
def series_resistors(target,
pore_area='pore.area',
throat_area='throat.area',
pore_thermal_conductivity='pore.thermal_conductivity',
throat_thermal_conductivity='throat.thermal_conductivity',
conduit_lengths='throat.conduit_lengths',
conduit_shape_factors='throat.poisson_shape_factors'):
r"""
Calculate the thermal conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing thermal conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
"""
return generic_conductance(target=target, transport_type='diffusion',
pore_area=pore_area,
throat_area=throat_area,
pore_diffusivity=pore_thermal_conductivity,
throat_diffusivity=throat_thermal_conductivity,
conduit_lengths=conduit_lengths,
conduit_shape_factors=conduit_shape_factors) |
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d | r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_ | Below is the the instruction that describes the task:
### Input:
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
### Response:
def calc_uniform_lim_glorot(inmaps, outmaps, kernel=(1, 1)):
r"""Calculates the lower bound and the upper bound of the uniform distribution proposed by Glorot et al.
.. math::
b &= \sqrt{\frac{6}{NK + M}}\\
a &= -b
Args:
inmaps (int): Map size of an input Variable, :math:`N`.
outmaps (int): Map size of an output Variable, :math:`M`.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel spatial shape.
In above definition, :math:`K` is the product of shape dimensions.
In Affine, the default value should be used.
Example:
.. code-block:: python
import nnabla as nn
import nnabla.parametric_functions as PF
import nnabla.initializer as I
x = nn.Variable([60,1,28,28])
lb,ub= I.calc_uniform_lim_glorot(x.shape[1],64)
w = I.UniformInitializer((lb,ub))
b = I.ConstantInitializer(0)
h = PF.convolution(x, 64, [3, 3], w_init=w, b_init=b, pad=[1, 1], name='conv')
References:
* `Glorot and Bengio. Understanding the difficulty of training deep
feedforward neural networks
<http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf>`_
"""
d = np.sqrt(6. / (np.prod(kernel) * inmaps + outmaps))
return -d, d |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the String field
:stream: TODO
:returns: TODO
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
data = self._pfp__value + utils.binary("\x00")
if stream is None:
return data
else:
stream.write(data)
return len(data) | Build the String field
:stream: TODO
:returns: TODO | Below is the the instruction that describes the task:
### Input:
Build the String field
:stream: TODO
:returns: TODO
### Response:
def _pfp__build(self, stream=None, save_offset=False):
"""Build the String field
:stream: TODO
:returns: TODO
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
data = self._pfp__value + utils.binary("\x00")
if stream is None:
return data
else:
stream.write(data)
return len(data) |
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
"""
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x | Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V]) | Below is the the instruction that describes the task:
### Input:
Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
### Response:
def _compute_edge_transforms(node_states,
depth,
num_transforms,
name="transform"):
"""Helper function that computes transformation for keys and values.
Let B be the number of batches.
Let N be the number of nodes in the graph.
Let D be the size of the node hidden states.
Let K be the size of the attention keys/queries (total_key_depth).
Let V be the size of the attention values (total_value_depth).
Let T be the total number of transforms (num_transforms).
Computes the transforms for keys or values for attention.
* For each node N_j and edge type t, a key K_jt of size K is computed. When an
edge of type t goes from node N_j to any other node, K_jt is the key that is
in the attention process.
* For each node N_j and edge type t, a value V_jt of size V is computed. When
an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt)
produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt.
Args:
node_states: A tensor of shape [B, L, D]
depth: An integer (K or V)
num_transforms: An integer (T),
name: A name for the function
Returns:
x: A The attention keys or values for each node and edge type
(shape [B, N*T, K or V])
"""
node_shapes = common_layers.shape_list(node_states)
x = common_layers.dense(
node_states,
depth * num_transforms,
use_bias=False,
name=name)
batch = node_shapes[0] # B.
length = node_shapes[1] # N.
# Making the fourth dimension explicit by separating the vectors of size
# K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T]
# (in k) and [V, T] in v.
#
x = tf.reshape(x, [batch, length, num_transforms, depth])
# Flatten out the fourth dimension.
x = tf.reshape(x, [batch, length * num_transforms, depth])
return x |
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
) | >>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true } | Below is the the instruction that describes the task:
### Input:
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
### Response:
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
) |
def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n', 'pkg_resources'}
if packages:
modules = filter(None, modules.union(packages))
return PythonPackageArchive(*sorted(modules)) | Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive. | Below is the the instruction that describes the task:
### Input:
Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
### Response:
def custodian_archive(packages=None):
"""Create a lambda code archive for running custodian.
Lambda archive currently always includes `c7n` and
`pkg_resources`. Add additional packages in the mode block.
Example policy that includes additional packages
.. code-block:: yaml
policy:
name: lambda-archive-example
resource: s3
mode:
packages:
- botocore
packages: List of additional packages to include in the lambda archive.
"""
modules = {'c7n', 'pkg_resources'}
if packages:
modules = filter(None, modules.union(packages))
return PythonPackageArchive(*sorted(modules)) |
def read_wav(self, filename):
"""Read sample data for this sample from a WAV file.
:param filename: the file from which to read
"""
wave_input = None
try:
wave_input = wave.open(filename, 'r')
wave_frames = bytearray(
wave_input.readframes(wave_input.getnframes()))
self.sample_data = [x >> 4 for x in wave_frames]
finally:
if wave_input is not None:
wave_input.close() | Read sample data for this sample from a WAV file.
:param filename: the file from which to read | Below is the the instruction that describes the task:
### Input:
Read sample data for this sample from a WAV file.
:param filename: the file from which to read
### Response:
def read_wav(self, filename):
"""Read sample data for this sample from a WAV file.
:param filename: the file from which to read
"""
wave_input = None
try:
wave_input = wave.open(filename, 'r')
wave_frames = bytearray(
wave_input.readframes(wave_input.getnframes()))
self.sample_data = [x >> 4 for x in wave_frames]
finally:
if wave_input is not None:
wave_input.close() |
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type) | The running processes socket/port information (or None). | Below is the the instruction that describes the task:
### Input:
The running processes socket/port information (or None).
### Response:
def socket(self):
"""The running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type) |
def createWidgets(self):
"""
Instantiate the Gooey Widgets that are used within the RadioGroup
"""
from gooey.gui.components import widgets
return [getattr(widgets, item['type'])(self, item)
for item in getin(self.widgetInfo, ['data', 'widgets'], [])] | Instantiate the Gooey Widgets that are used within the RadioGroup | Below is the the instruction that describes the task:
### Input:
Instantiate the Gooey Widgets that are used within the RadioGroup
### Response:
def createWidgets(self):
"""
Instantiate the Gooey Widgets that are used within the RadioGroup
"""
from gooey.gui.components import widgets
return [getattr(widgets, item['type'])(self, item)
for item in getin(self.widgetInfo, ['data', 'widgets'], [])] |
def smooth(self, noise, strategy=INVERSE_STRATEGY):
""" In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
"""
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self | In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment` | Below is the the instruction that describes the task:
### Input:
In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
### Response:
def smooth(self, noise, strategy=INVERSE_STRATEGY):
""" In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
"""
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self |
def is_contained_in(pe_pe, root):
'''
Determine if a PE_PE is contained within a EP_PKG or a C_C.
'''
if not pe_pe:
return False
if type(pe_pe).__name__ != 'PE_PE':
pe_pe = one(pe_pe).PE_PE[8001]()
ep_pkg = one(pe_pe).EP_PKG[8000]()
c_c = one(pe_pe).C_C[8003]()
if root in [ep_pkg, c_c]:
return True
elif is_contained_in(ep_pkg, root):
return True
elif is_contained_in(c_c, root):
return True
else:
return False | Determine if a PE_PE is contained within a EP_PKG or a C_C. | Below is the the instruction that describes the task:
### Input:
Determine if a PE_PE is contained within a EP_PKG or a C_C.
### Response:
def is_contained_in(pe_pe, root):
'''
Determine if a PE_PE is contained within a EP_PKG or a C_C.
'''
if not pe_pe:
return False
if type(pe_pe).__name__ != 'PE_PE':
pe_pe = one(pe_pe).PE_PE[8001]()
ep_pkg = one(pe_pe).EP_PKG[8000]()
c_c = one(pe_pe).C_C[8003]()
if root in [ep_pkg, c_c]:
return True
elif is_contained_in(ep_pkg, root):
return True
elif is_contained_in(c_c, root):
return True
else:
return False |
def run(self, command, opts):
"""Dispatch the given command & args."""
handlers = {
'create': self.create,
'delete': self.delete,
'list': self.list
}
handler = handlers.get(command, None)
if handler is None:
error("Unrecognized command: %s" % command, 2)
handler(opts) | Dispatch the given command & args. | Below is the the instruction that describes the task:
### Input:
Dispatch the given command & args.
### Response:
def run(self, command, opts):
"""Dispatch the given command & args."""
handlers = {
'create': self.create,
'delete': self.delete,
'list': self.list
}
handler = handlers.get(command, None)
if handler is None:
error("Unrecognized command: %s" % command, 2)
handler(opts) |
def OpenEnumerateInstancePaths(self, ClassName, namespace=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=None, **extra):
# pylint: disable=invalid-name
"""
Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path))
"""
exc = None
result_tuple = None
method_name = 'OpenEnumerateInstancePaths'
if self._operation_recorders:
self.operation_recorder_reset(pull_op=True)
self.operation_recorder_stage_pywbem_args(
method=method_name,
ClassName=ClassName,
namespace=namespace,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
**extra)
try:
stats = self.statistics.start_timer(method_name)
if namespace is None and isinstance(ClassName, CIMClassName):
namespace = ClassName.namespace
namespace = self._iparam_namespace_from_namespace(namespace)
classname = self._iparam_classname(ClassName, 'ClassName')
result = self._imethodcall(
method_name,
namespace,
ClassName=classname,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
has_out_params=True,
**extra)
result_tuple = pull_path_result_tuple(
*self._get_rslt_params(result, namespace))
return result_tuple
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(result_tuple, exc) | Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path)) | Below is the the instruction that describes the task:
### Input:
Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path))
### Response:
def OpenEnumerateInstancePaths(self, ClassName, namespace=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=None, **extra):
# pylint: disable=invalid-name
"""
Open an enumeration session to enumerate the instance paths of
instances of a class (including instances of its subclasses) in
a namespace.
*New in pywbem 0.9.*
This method performs the OpenEnumerateInstancePaths operation
(see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all
methods performing such operations.
If the operation succeeds, this method returns status on the
enumeration session and optionally instance paths.
Otherwise, this method raises an exception.
Use the :meth:`~pywbem.WBEMConnection.PullInstancePaths` method to
retrieve the next set of instance paths or the
:meth:`~pywbem.WBEMConnection.CloseEnumeration` method to close the
enumeration session before it is exhausted.
Parameters:
ClassName (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a :class:`~pywbem.CIMClassName` object, its
`namespace` attribute will be used as a default namespace as
described for the `namespace` parameter, and its `host` attribute
will be ignored.
namespace (:term:`string`):
Name of the CIM namespace to be used (case independent).
Leading and trailing slash characters will be stripped. The lexical
case will be preserved.
If `None`, the namespace of the `ClassName` parameter will be used,
if specified as a :class:`~pywbem.CIMClassName` object. If that is
also `None`, the default namespace of the connection will be used.
FilterQueryLanguage (:term:`string`):
The name of the filter query language used for the `FilterQuery`
parameter. The DMTF-defined Filter Query Language (see
:term:`DSP0212`) is specified as "DMTF:FQL".
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them.
FilterQuery (:term:`string`):
The filter query in the query language defined by the
`FilterQueryLanguage` parameter.
OperationTimeout (:class:`~pywbem.Uint32`):
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client. Once this timeout time has expired, the
WBEM server may close the enumeration session.
* If not `None`, this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session. A value of 0
indicates that the server is expected to never time out. The
server may reject the proposed value, causing a
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_INVALID_OPERATION_TIMEOUT`.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default timeout to be used.
ContinueOnError (:class:`py:bool`):
Indicates to the WBEM server to continue sending responses
after an error response has been sent.
* If `True`, the server is to continue sending responses after
sending an error response. Not all servers support continuation
on error; a server that does not support it must send an error
response if `True` was specified, causing
:class:`~pywbem.CIMError` to be raised with status code
:attr:`~pywbem.CIM_ERR_CONTINUATION_ON_ERROR_NOT_SUPPORTED`.
* If `False`, the server is requested to close the enumeration after
sending an error response.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
`False`.
MaxObjectCount (:class:`~pywbem.Uint32`)
Maximum number of instances the WBEM server may return
for this request.
* If positive, the WBEM server is to return no more than the
specified number of instances.
* If zero, the WBEM server is to return no instances. This may
be used by a client to leave the handling of any returned
instances to a loop of Pull operations.
* If `None`, this parameter is not passed to the WBEM server, and
causes the server-implemented default behaviour to be used.
:term:`DSP0200` defines that the server-implemented default is
to return zero instances.
**extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server.
Note that :term:`DSP0200` does not define any additional parameters
for this operation.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **paths** (:class:`py:list` of :class:`~pywbem.CIMInstanceName`):
Representations of the retrieved instance paths, with their
attributes set as follows:
* `classname`: Name of the creation class of the instance.
* `keybindings`: Keybindings of the instance.
* `namespace`: Name of the CIM namespace containing the instance.
* `host`: Host and optionally port of the WBEM server containing
the CIM namespace.
* **eos** (:class:`py:bool`):
Indicates whether the enumeration session is exhausted after
this operation:
- If `True`, the enumeration session is exhausted, and the
server has closed the enumeration session.
- If `False`, the enumeration session is not exhausted and the
`context` item is the context object for the next operation on
the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session,
including its current enumeration state, and the namespace. This
object must be supplied with the next pull or close operation for
this enumeration session.
The tuple items are:
* server_context (:term:`string`):
Enumeration context string returned by the server if
the session is not exhausted, or `None` otherwise. This string
is opaque for the client.
* namespace (:term:`string`):
Name of the CIM namespace that was used for this operation.
NOTE: This inner tuple hides the need for a CIM namespace
on subsequent operations in the enumeration session. CIM
operations always require target namespace, but it never
makes sense to specify a different one in subsequent
operations on the same enumeration session.
Raises:
Exceptions described in :class:`~pywbem.WBEMConnection`.
Example::
max_object_count = 100
rslt_tuple = conn.OpenEnumerateInstancePaths(
'CIM_Blah', MaxObjectCount=max_object_count)
paths = rslt_tuple.paths
while not rslt_tuple.eos:
rslt_tuple = conn.PullInstancePaths(rslt_tupl.context,
max_object_count)
paths.extend(rslt_tupl.paths)
for path in paths:
print('path {0}'.format(path))
"""
exc = None
result_tuple = None
method_name = 'OpenEnumerateInstancePaths'
if self._operation_recorders:
self.operation_recorder_reset(pull_op=True)
self.operation_recorder_stage_pywbem_args(
method=method_name,
ClassName=ClassName,
namespace=namespace,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
**extra)
try:
stats = self.statistics.start_timer(method_name)
if namespace is None and isinstance(ClassName, CIMClassName):
namespace = ClassName.namespace
namespace = self._iparam_namespace_from_namespace(namespace)
classname = self._iparam_classname(ClassName, 'ClassName')
result = self._imethodcall(
method_name,
namespace,
ClassName=classname,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount,
has_out_params=True,
**extra)
result_tuple = pull_path_result_tuple(
*self._get_rslt_params(result, namespace))
return result_tuple
except (CIMXMLParseError, XMLParseError) as exce:
exce.request_data = self.last_raw_request
exce.response_data = self.last_raw_reply
exc = exce
raise
except Exception as exce:
exc = exce
raise
finally:
self._last_operation_time = stats.stop_timer(
self.last_request_len, self.last_reply_len,
self.last_server_response_time, exc)
if self._operation_recorders:
self.operation_recorder_stage_result(result_tuple, exc) |
def get_serializer(self, node):
"""Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization.
"""
return self.options['serializers'].get(type(node), None)
if type(node) in self.options['serializers']:
return self.options['serializers'][type(node)]
return None | Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization. | Below is the the instruction that describes the task:
### Input:
Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization.
### Response:
def get_serializer(self, node):
"""Returns serializer for specific element.
:Args:
- node (:class:`ooxml.doc.Element`): Element object
:Returns:
Returns reference to a function which will be used for serialization.
"""
return self.options['serializers'].get(type(node), None)
if type(node) in self.options['serializers']:
return self.options['serializers'][type(node)]
return None |
def assert_pickle_idempotent(obj):
'''Assert that obj does not change (w.r.t. ==) under repeated picklings
'''
from six.moves.cPickle import dumps, loads
obj1 = loads(dumps(obj))
obj2 = loads(dumps(obj1))
obj3 = loads(dumps(obj2))
assert_equivalent(obj, obj1)
assert_equivalent(obj, obj2)
assert_equivalent(obj, obj3)
assert type(obj) is type(obj3) | Assert that obj does not change (w.r.t. ==) under repeated picklings | Below is the the instruction that describes the task:
### Input:
Assert that obj does not change (w.r.t. ==) under repeated picklings
### Response:
def assert_pickle_idempotent(obj):
'''Assert that obj does not change (w.r.t. ==) under repeated picklings
'''
from six.moves.cPickle import dumps, loads
obj1 = loads(dumps(obj))
obj2 = loads(dumps(obj1))
obj3 = loads(dumps(obj2))
assert_equivalent(obj, obj1)
assert_equivalent(obj, obj2)
assert_equivalent(obj, obj3)
assert type(obj) is type(obj3) |
def on_scopes_request(self, py_db, request):
'''
Scopes are the top-level items which appear for a frame (so, we receive the frame id
and provide the scopes it has).
:param ScopesRequest request:
'''
frame_id = request.arguments.frameId
variables_reference = frame_id
scopes = [Scope('Locals', int(variables_reference), False).to_dict()]
body = ScopesResponseBody(scopes)
scopes_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
return NetCommand(CMD_RETURN, 0, scopes_response, is_json=True) | Scopes are the top-level items which appear for a frame (so, we receive the frame id
and provide the scopes it has).
:param ScopesRequest request: | Below is the the instruction that describes the task:
### Input:
Scopes are the top-level items which appear for a frame (so, we receive the frame id
and provide the scopes it has).
:param ScopesRequest request:
### Response:
def on_scopes_request(self, py_db, request):
'''
Scopes are the top-level items which appear for a frame (so, we receive the frame id
and provide the scopes it has).
:param ScopesRequest request:
'''
frame_id = request.arguments.frameId
variables_reference = frame_id
scopes = [Scope('Locals', int(variables_reference), False).to_dict()]
body = ScopesResponseBody(scopes)
scopes_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
return NetCommand(CMD_RETURN, 0, scopes_response, is_json=True) |
def validate_uses_tls_for_glance(audit_options):
"""Verify that TLS is used to communicate with Glance."""
section = _config_section(audit_options, 'glance')
assert section is not None, "Missing section 'glance'"
assert not section.get('insecure') and \
"https://" in section.get("api_servers"), \
"TLS is not used for Glance" | Verify that TLS is used to communicate with Glance. | Below is the the instruction that describes the task:
### Input:
Verify that TLS is used to communicate with Glance.
### Response:
def validate_uses_tls_for_glance(audit_options):
"""Verify that TLS is used to communicate with Glance."""
section = _config_section(audit_options, 'glance')
assert section is not None, "Missing section 'glance'"
assert not section.get('insecure') and \
"https://" in section.get("api_servers"), \
"TLS is not used for Glance" |
def generate_valid_keys():
""" create a list of valid keys """
valid_keys = []
for minimum, maximum in RANGES:
for i in range(ord(minimum), ord(maximum) + 1):
valid_keys.append(chr(i))
return valid_keys | create a list of valid keys | Below is the the instruction that describes the task:
### Input:
create a list of valid keys
### Response:
def generate_valid_keys():
""" create a list of valid keys """
valid_keys = []
for minimum, maximum in RANGES:
for i in range(ord(minimum), ord(maximum) + 1):
valid_keys.append(chr(i))
return valid_keys |
def hideOverlay(self, ulOverlayHandle):
"""Hides the VR overlay. For dashboard overlays, only the Dashboard Manager is allowed to call this."""
fn = self.function_table.hideOverlay
result = fn(ulOverlayHandle)
return result | Hides the VR overlay. For dashboard overlays, only the Dashboard Manager is allowed to call this. | Below is the the instruction that describes the task:
### Input:
Hides the VR overlay. For dashboard overlays, only the Dashboard Manager is allowed to call this.
### Response:
def hideOverlay(self, ulOverlayHandle):
"""Hides the VR overlay. For dashboard overlays, only the Dashboard Manager is allowed to call this."""
fn = self.function_table.hideOverlay
result = fn(ulOverlayHandle)
return result |
def get_ctm(self):
"""Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer)
self._check_status()
return matrix | Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object. | Below is the the instruction that describes the task:
### Input:
Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
### Response:
def get_ctm(self):
"""Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer)
self._check_status()
return matrix |
def display(self, *amplExpressions):
"""
Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated.
"""
exprs = list(map(str, amplExpressions))
lock_and_call(
lambda: self._impl.displayLst(exprs, len(exprs)),
self._lock
) | Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated. | Below is the the instruction that describes the task:
### Input:
Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated.
### Response:
def display(self, *amplExpressions):
"""
Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated.
"""
exprs = list(map(str, amplExpressions))
lock_and_call(
lambda: self._impl.displayLst(exprs, len(exprs)),
self._lock
) |
def _print_header(self):
"""Print the header for screen logging"""
header = " Iter Dir "
if self.constraints is not None:
header += ' SC CC'
header += " Function"
if self.convergence_condition is not None:
header += self.convergence_condition.get_header()
header += " Time"
self._screen("-"*(len(header)), newline=True)
self._screen(header, newline=True)
self._screen("-"*(len(header)), newline=True) | Print the header for screen logging | Below is the the instruction that describes the task:
### Input:
Print the header for screen logging
### Response:
def _print_header(self):
"""Print the header for screen logging"""
header = " Iter Dir "
if self.constraints is not None:
header += ' SC CC'
header += " Function"
if self.convergence_condition is not None:
header += self.convergence_condition.get_header()
header += " Time"
self._screen("-"*(len(header)), newline=True)
self._screen(header, newline=True)
self._screen("-"*(len(header)), newline=True) |
def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_artifact_classpath_entry(cp_entry)] | Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`) | Below is the the instruction that describes the task:
### Input:
Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
### Response:
def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if ClasspathEntry.is_artifact_classpath_entry(cp_entry)] |
def _build_archive(self, dir_path):
"""
Creates a zip archive from files in path.
"""
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body | Creates a zip archive from files in path. | Below is the the instruction that describes the task:
### Input:
Creates a zip archive from files in path.
### Response:
def _build_archive(self, dir_path):
"""
Creates a zip archive from files in path.
"""
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body |
def checksum_status(self, area_uuid, filename):
"""
Retrieve checksum status and values for a file
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name of the file within the Upload Area
:return: a dict with checksum information
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
url_safe_filename = urlparse.quote(filename)
path = "/area/{uuid}/{filename}/checksum".format(uuid=area_uuid, filename=url_safe_filename)
response = self._make_request('get', path)
return response.json() | Retrieve checksum status and values for a file
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name of the file within the Upload Area
:return: a dict with checksum information
:rtype: dict
:raises UploadApiException: if information could not be obtained | Below is the the instruction that describes the task:
### Input:
Retrieve checksum status and values for a file
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name of the file within the Upload Area
:return: a dict with checksum information
:rtype: dict
:raises UploadApiException: if information could not be obtained
### Response:
def checksum_status(self, area_uuid, filename):
"""
Retrieve checksum status and values for a file
:param str area_uuid: A RFC4122-compliant ID for the upload area
:param str filename: The name of the file within the Upload Area
:return: a dict with checksum information
:rtype: dict
:raises UploadApiException: if information could not be obtained
"""
url_safe_filename = urlparse.quote(filename)
path = "/area/{uuid}/{filename}/checksum".format(uuid=area_uuid, filename=url_safe_filename)
response = self._make_request('get', path)
return response.json() |
def fit(self, SF, x_range, y_range, matrix_z):
'''
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
'''
xx, yy, zz = [], [], []
m0, n0 = [], []
for m, n in itertools.product(np.arange(0, len(x_range), step=SF), np.arange(0, len(y_range), step=SF)):
if x_range[m] > y_range[n]: # Ha nearly equal Hb
m0.append(m)
n0.append(n)
aa, bb, cc = [], [], []
for m, n in zip(m0, n0):
s = 0
try:
grid_data = []
a_ = x_range[m+s]
b_ = y_range[n-s]
for i, j in itertools.product(np.arange(3*SF+1), np.arange(3*SF+1)):
try:
grid_data.append(
[x_range[m+s+i], y_range[n-s-j], matrix_z.item(n-s-j, m+s+i)])
except:
try:
for i, j in itertools.product(np.arange(3), np.arange(3)):
grid_data.append(
[x_range[m+i], y_range[n-j], matrix_z.item(n-j, m+i)])
except:
pass
# print(grid_data)
'''
#=================================================
/when SF = n
/data grid as (2*n+1)x(2*n+1)
/grid_list: convert grid to list
/every grid produce on FORC distritution p
/the poly fitting use d2_func
#=================================================
'''
x, y, z = grid_list(grid_data)
try:
p = d2_func(x, y, z)
# print(p)
xx.append((a_-b_)/2)
yy.append((a_+b_)/2)
zz.append(p)
except Exception as e:
# print(e)
pass
except:
pass
'''
#=================================================
/the data will be save as pandas dataframe
/all the data with nan values will be delete be dropna()
#=================================================
'''
# print(zz)
df = pd.DataFrame({'x': xx, 'y': yy, 'z': zz})
#df = df.replace(0,np.nan)
df = df.dropna()
'''
#=================================================
/due to the space near Bc = zero
/the Bi values when Bc <0.003 will be mirrored to -Bc
#=================================================
'''
df_negative = df[(df.x < 0.03)].copy()
df_negative.x = df_negative.x*-1
df = df.append(df_negative)
df = df.drop_duplicates(['x', 'y'])
df = df.sort_values('x')
# plt.scatter(df.x,df.y,c=df.z)
# plt.show()
'''
#=================================================
/reset the Bc and Bi range by X,Y
/use linear interpolate to obtain FORC distribution
#=================================================
'''
xrange = [0, int((np.max(df.x)+0.05)*10)/10]
yrange = [int((np.min(df.y)-0.05)*10)/10,
int((np.max(df.y)+0.05)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
self.yi, self.xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#self.xi,self.yi = np.mgrid[0:0.2:400j,-0.15:0.15:400j]
z = df.z/np.max(df.z)
z = np.asarray(z.tolist())
self.zi = griddata((df.x, df.y), z, (self.xi, self.yi), method='cubic') | #=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#================================================= | Below is the the instruction that describes the task:
### Input:
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
### Response:
def fit(self, SF, x_range, y_range, matrix_z):
'''
#=================================================
/the main fitting process
/xx,yy,zz = Hb,Ha,p
/p is the FORC distribution
/m0,n0 is the index of values on Ha = Hb
/then loop m0 and n0
/based on smooth factor(SF)
/select data grid from the matrix_z for curve fitting
#=================================================
'''
xx, yy, zz = [], [], []
m0, n0 = [], []
for m, n in itertools.product(np.arange(0, len(x_range), step=SF), np.arange(0, len(y_range), step=SF)):
if x_range[m] > y_range[n]: # Ha nearly equal Hb
m0.append(m)
n0.append(n)
aa, bb, cc = [], [], []
for m, n in zip(m0, n0):
s = 0
try:
grid_data = []
a_ = x_range[m+s]
b_ = y_range[n-s]
for i, j in itertools.product(np.arange(3*SF+1), np.arange(3*SF+1)):
try:
grid_data.append(
[x_range[m+s+i], y_range[n-s-j], matrix_z.item(n-s-j, m+s+i)])
except:
try:
for i, j in itertools.product(np.arange(3), np.arange(3)):
grid_data.append(
[x_range[m+i], y_range[n-j], matrix_z.item(n-j, m+i)])
except:
pass
# print(grid_data)
'''
#=================================================
/when SF = n
/data grid as (2*n+1)x(2*n+1)
/grid_list: convert grid to list
/every grid produce on FORC distritution p
/the poly fitting use d2_func
#=================================================
'''
x, y, z = grid_list(grid_data)
try:
p = d2_func(x, y, z)
# print(p)
xx.append((a_-b_)/2)
yy.append((a_+b_)/2)
zz.append(p)
except Exception as e:
# print(e)
pass
except:
pass
'''
#=================================================
/the data will be save as pandas dataframe
/all the data with nan values will be delete be dropna()
#=================================================
'''
# print(zz)
df = pd.DataFrame({'x': xx, 'y': yy, 'z': zz})
#df = df.replace(0,np.nan)
df = df.dropna()
'''
#=================================================
/due to the space near Bc = zero
/the Bi values when Bc <0.003 will be mirrored to -Bc
#=================================================
'''
df_negative = df[(df.x < 0.03)].copy()
df_negative.x = df_negative.x*-1
df = df.append(df_negative)
df = df.drop_duplicates(['x', 'y'])
df = df.sort_values('x')
# plt.scatter(df.x,df.y,c=df.z)
# plt.show()
'''
#=================================================
/reset the Bc and Bi range by X,Y
/use linear interpolate to obtain FORC distribution
#=================================================
'''
xrange = [0, int((np.max(df.x)+0.05)*10)/10]
yrange = [int((np.min(df.y)-0.05)*10)/10,
int((np.max(df.y)+0.05)*10)/10]
X = np.linspace(xrange[0], xrange[1], 200)
Y = np.linspace(yrange[0], yrange[1], 200)
self.yi, self.xi = np.mgrid[yrange[0]:yrange[1]:200j, xrange[0]:xrange[1]:200j]
#self.xi,self.yi = np.mgrid[0:0.2:400j,-0.15:0.15:400j]
z = df.z/np.max(df.z)
z = np.asarray(z.tolist())
self.zi = griddata((df.x, df.y), z, (self.xi, self.yi), method='cubic') |
def trans_from_matrix(matrix):
""" Convert a vtk matrix to a numpy.ndarray """
t = np.zeros((4, 4))
for i in range(4):
for j in range(4):
t[i, j] = matrix.GetElement(i, j)
return t | Convert a vtk matrix to a numpy.ndarray | Below is the the instruction that describes the task:
### Input:
Convert a vtk matrix to a numpy.ndarray
### Response:
def trans_from_matrix(matrix):
""" Convert a vtk matrix to a numpy.ndarray """
t = np.zeros((4, 4))
for i in range(4):
for j in range(4):
t[i, j] = matrix.GetElement(i, j)
return t |
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs) | Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar | Below is the the instruction that describes the task:
### Input:
Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
### Response:
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See Also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs) |
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write | pop the most recent capturing buffer from this Context
and return the current writer after the pop. | Below is the the instruction that describes the task:
### Input:
pop the most recent capturing buffer from this Context
and return the current writer after the pop.
### Response:
def _pop_buffer_and_writer(self):
"""pop the most recent capturing buffer from this Context
and return the current writer after the pop.
"""
buf = self._buffer_stack.pop()
return buf, self._buffer_stack[-1].write |
def _get_events(self):
"""
Fetches events from the calendar into a list.
Returns: The list of events.
"""
self.last_update = datetime.datetime.now()
time_min = datetime.datetime.utcnow()
time_max = time_min + datetime.timedelta(hours=self.events_within_hours)
events = []
try:
eventsResult = (
self.service.events()
.list(
calendarId="primary",
timeMax=time_max.isoformat() + "Z", # 'Z' indicates UTC time
timeMin=time_min.isoformat() + "Z", # 'Z' indicates UTC time
singleEvents=True,
orderBy="startTime",
)
.execute(num_retries=5)
)
except Exception:
return self.events or events
else:
for event in eventsResult.get("items", []):
# filter out events that we did not accept (default)
# unless we organized them with no attendees
i_organized = event.get("organizer", {}).get("self", False)
has_attendees = event.get("attendees", [])
for attendee in event.get("attendees", []):
if attendee.get("self") is True:
if attendee["responseStatus"] in self.response:
break
else:
# we did not organize the event or we did not accept it
if not i_organized or has_attendees:
continue
# strip and lower case output if needed
for key in ["description", "location", "summary"]:
event[key] = event.get(key, "").strip()
if self.force_lowercase is True:
event[key] = event[key].lower()
# ignore all day events if configured
if event["start"].get("date") is not None:
if self.ignore_all_day_events:
continue
# filter out blacklisted event names
if event["summary"] is not None:
if event["summary"].lower() in map(
lambda e: e.lower(), self.blacklist_events
):
continue
events.append(event)
return events[: self.num_events] | Fetches events from the calendar into a list.
Returns: The list of events. | Below is the the instruction that describes the task:
### Input:
Fetches events from the calendar into a list.
Returns: The list of events.
### Response:
def _get_events(self):
"""
Fetches events from the calendar into a list.
Returns: The list of events.
"""
self.last_update = datetime.datetime.now()
time_min = datetime.datetime.utcnow()
time_max = time_min + datetime.timedelta(hours=self.events_within_hours)
events = []
try:
eventsResult = (
self.service.events()
.list(
calendarId="primary",
timeMax=time_max.isoformat() + "Z", # 'Z' indicates UTC time
timeMin=time_min.isoformat() + "Z", # 'Z' indicates UTC time
singleEvents=True,
orderBy="startTime",
)
.execute(num_retries=5)
)
except Exception:
return self.events or events
else:
for event in eventsResult.get("items", []):
# filter out events that we did not accept (default)
# unless we organized them with no attendees
i_organized = event.get("organizer", {}).get("self", False)
has_attendees = event.get("attendees", [])
for attendee in event.get("attendees", []):
if attendee.get("self") is True:
if attendee["responseStatus"] in self.response:
break
else:
# we did not organize the event or we did not accept it
if not i_organized or has_attendees:
continue
# strip and lower case output if needed
for key in ["description", "location", "summary"]:
event[key] = event.get(key, "").strip()
if self.force_lowercase is True:
event[key] = event[key].lower()
# ignore all day events if configured
if event["start"].get("date") is not None:
if self.ignore_all_day_events:
continue
# filter out blacklisted event names
if event["summary"] is not None:
if event["summary"].lower() in map(
lambda e: e.lower(), self.blacklist_events
):
continue
events.append(event)
return events[: self.num_events] |
def lookup_domain(self, domain, nameserver=None, log_prefix=''):
"""Most basic DNS primitive that looks up a domain, waits for a
second response, then returns all of the results
:param domain: the domain to lookup
:param nameserver: the nameserver to use
:param log_prefix:
:return:
Note: if you want to lookup multiple domains you *should not* use
this function, you should use lookup_domains because this does
blocking IO to wait for the second response
"""
if domain not in self.results:
self.results[domain] = []
# get the resolver to use
if nameserver is None:
logging.debug("Nameserver not specified, using %s" % self.nameservers[0])
nameserver = self.nameservers[0]
results = {'domain': domain, 'nameserver': nameserver}
# construct the socket to use
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(self.timeout)
# set port number and increment index:
interrupt = False
with self.port_lock:
counter = 1
while True:
if counter > 100:
logging.warning("Stop trying to get an available port")
interrupt = True
break
try:
sock.bind(('', self.port))
break
except socket.error:
logging.debug("Port {} already in use, try next one".format(self.port))
self.port += 1
counter += 1
self.port += 1
if interrupt:
sock.close()
results['error'] = 'Failed to run DNS test'
self.results[domain].append(results)
return results
logging.debug("%sQuerying DNS enteries for "
"%s (nameserver: %s)." % (log_prefix, domain, nameserver))
# construct and send the request
request = dns.message.make_query(domain,
dns.rdatatype.from_text(self.rtype))
results['request'] = b64encode(request.to_wire())
sock.sendto(request.to_wire(), (nameserver, 53))
# read the first response from the socket
try:
response = sock.recvfrom(4096)[0]
results['response1'] = b64encode(response)
resp = dns.message.from_wire(response)
results['response1-ips'] = parse_out_ips(resp)
# first domain name in response should be the same with query
# domain name
for entry in resp.answer:
if domain.lower() != entry.name.to_text().lower()[:-1]:
logging.debug("%sWrong domain name %s for %s!"
% (log_prefix, entry.name.to_text().lower()[:-1], domain))
results['response1-domain'] = entry.name.to_text().lower()[:-1]
break
except socket.timeout:
# if we didn't get anything, then set the results to nothing
logging.debug("%sQuerying DNS enteries for "
"%s (nameserver: %s) timed out!" % (log_prefix, domain, nameserver))
sock.close()
results['response1'] = None
self.results[domain].append(results)
return results
# if we have made it this far, then wait for the next response
try:
response2 = sock.recvfrom(4096)[0]
results['response2'] = b64encode(response2)
resp2 = dns.message.from_wire(response2)
results['response2-ips'] = parse_out_ips(resp2)
# first domain name in response should be the same with query
# domain name
for entry in resp2.answer:
if domain.lower != entry.name.to_text().lower()[:-1]:
logging.debug("%sWrong domain name %s for %s!"
% (log_prefix, entry.name.to_text().lower()[:-1], domain))
results['response2-domain'] = entry.name.to_text().lower()[:-1]
break
except socket.timeout:
# no second response
results['response2'] = None
sock.close()
self.results[domain].append(results)
return results | Most basic DNS primitive that looks up a domain, waits for a
second response, then returns all of the results
:param domain: the domain to lookup
:param nameserver: the nameserver to use
:param log_prefix:
:return:
Note: if you want to lookup multiple domains you *should not* use
this function, you should use lookup_domains because this does
blocking IO to wait for the second response | Below is the the instruction that describes the task:
### Input:
Most basic DNS primitive that looks up a domain, waits for a
second response, then returns all of the results
:param domain: the domain to lookup
:param nameserver: the nameserver to use
:param log_prefix:
:return:
Note: if you want to lookup multiple domains you *should not* use
this function, you should use lookup_domains because this does
blocking IO to wait for the second response
### Response:
def lookup_domain(self, domain, nameserver=None, log_prefix=''):
"""Most basic DNS primitive that looks up a domain, waits for a
second response, then returns all of the results
:param domain: the domain to lookup
:param nameserver: the nameserver to use
:param log_prefix:
:return:
Note: if you want to lookup multiple domains you *should not* use
this function, you should use lookup_domains because this does
blocking IO to wait for the second response
"""
if domain not in self.results:
self.results[domain] = []
# get the resolver to use
if nameserver is None:
logging.debug("Nameserver not specified, using %s" % self.nameservers[0])
nameserver = self.nameservers[0]
results = {'domain': domain, 'nameserver': nameserver}
# construct the socket to use
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(self.timeout)
# set port number and increment index:
interrupt = False
with self.port_lock:
counter = 1
while True:
if counter > 100:
logging.warning("Stop trying to get an available port")
interrupt = True
break
try:
sock.bind(('', self.port))
break
except socket.error:
logging.debug("Port {} already in use, try next one".format(self.port))
self.port += 1
counter += 1
self.port += 1
if interrupt:
sock.close()
results['error'] = 'Failed to run DNS test'
self.results[domain].append(results)
return results
logging.debug("%sQuerying DNS enteries for "
"%s (nameserver: %s)." % (log_prefix, domain, nameserver))
# construct and send the request
request = dns.message.make_query(domain,
dns.rdatatype.from_text(self.rtype))
results['request'] = b64encode(request.to_wire())
sock.sendto(request.to_wire(), (nameserver, 53))
# read the first response from the socket
try:
response = sock.recvfrom(4096)[0]
results['response1'] = b64encode(response)
resp = dns.message.from_wire(response)
results['response1-ips'] = parse_out_ips(resp)
# first domain name in response should be the same with query
# domain name
for entry in resp.answer:
if domain.lower() != entry.name.to_text().lower()[:-1]:
logging.debug("%sWrong domain name %s for %s!"
% (log_prefix, entry.name.to_text().lower()[:-1], domain))
results['response1-domain'] = entry.name.to_text().lower()[:-1]
break
except socket.timeout:
# if we didn't get anything, then set the results to nothing
logging.debug("%sQuerying DNS enteries for "
"%s (nameserver: %s) timed out!" % (log_prefix, domain, nameserver))
sock.close()
results['response1'] = None
self.results[domain].append(results)
return results
# if we have made it this far, then wait for the next response
try:
response2 = sock.recvfrom(4096)[0]
results['response2'] = b64encode(response2)
resp2 = dns.message.from_wire(response2)
results['response2-ips'] = parse_out_ips(resp2)
# first domain name in response should be the same with query
# domain name
for entry in resp2.answer:
if domain.lower != entry.name.to_text().lower()[:-1]:
logging.debug("%sWrong domain name %s for %s!"
% (log_prefix, entry.name.to_text().lower()[:-1], domain))
results['response2-domain'] = entry.name.to_text().lower()[:-1]
break
except socket.timeout:
# no second response
results['response2'] = None
sock.close()
self.results[domain].append(results)
return results |
def _get_all_tables(self, dataset_id, cache=False, project_id=None):
"""Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
"""
do_fetch = True
if cache and self.cache.get(dataset_id):
time, result = self.cache.get(dataset_id)
if datetime.now() - time < CACHE_TIMEOUT:
do_fetch = False
if do_fetch:
result = self._get_all_tables_for_dataset(dataset_id, project_id)
self.cache[dataset_id] = (datetime.now(), result)
return self._parse_table_list_response(result) | Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names | Below is the the instruction that describes the task:
### Input:
Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
### Response:
def _get_all_tables(self, dataset_id, cache=False, project_id=None):
"""Retrieve the list of tables for dataset, that respect the formats:
* appid_YYYY_MM
* YYYY_MM_appid
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
cache : bool, optional
To use cached value or not (default False). Timeout value equals
CACHE_TIMEOUT.
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` of app ids mapped to their table names
"""
do_fetch = True
if cache and self.cache.get(dataset_id):
time, result = self.cache.get(dataset_id)
if datetime.now() - time < CACHE_TIMEOUT:
do_fetch = False
if do_fetch:
result = self._get_all_tables_for_dataset(dataset_id, project_id)
self.cache[dataset_id] = (datetime.now(), result)
return self._parse_table_list_response(result) |
def del_node(self, node):
"""
Delete a given node from the hypergraph.
@type node: node
@param node: Node identifier.
"""
if self.has_node(node):
for e in self.node_links[node]:
self.edge_links[e].remove(node)
self.node_links.pop(node)
self.graph.del_node((node,'n')) | Delete a given node from the hypergraph.
@type node: node
@param node: Node identifier. | Below is the the instruction that describes the task:
### Input:
Delete a given node from the hypergraph.
@type node: node
@param node: Node identifier.
### Response:
def del_node(self, node):
"""
Delete a given node from the hypergraph.
@type node: node
@param node: Node identifier.
"""
if self.has_node(node):
for e in self.node_links[node]:
self.edge_links[e].remove(node)
self.node_links.pop(node)
self.graph.del_node((node,'n')) |
def parse_name(name):
""" Split a query name into field name, operator and whether it is
inverted. """
inverted, op = False, OP_EQ
if name is not None:
for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE):
if name.endswith(op_):
op = op_
name = name[:len(name) - len(op)]
break
if name.startswith('!'):
inverted = True
name = name[1:]
return name, inverted, op | Split a query name into field name, operator and whether it is
inverted. | Below is the the instruction that describes the task:
### Input:
Split a query name into field name, operator and whether it is
inverted.
### Response:
def parse_name(name):
""" Split a query name into field name, operator and whether it is
inverted. """
inverted, op = False, OP_EQ
if name is not None:
for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE):
if name.endswith(op_):
op = op_
name = name[:len(name) - len(op)]
break
if name.startswith('!'):
inverted = True
name = name[1:]
return name, inverted, op |
def is_tp(self, atol=None, rtol=None):
"""Test if a channel is completely-positive (CP)"""
choi = _to_choi(self.rep, self._data, *self.dim)
return self._is_tp_helper(choi, atol, rtol) | Test if a channel is completely-positive (CP) | Below is the the instruction that describes the task:
### Input:
Test if a channel is completely-positive (CP)
### Response:
def is_tp(self, atol=None, rtol=None):
"""Test if a channel is completely-positive (CP)"""
choi = _to_choi(self.rep, self._data, *self.dim)
return self._is_tp_helper(choi, atol, rtol) |
def prob_imf(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
alpha = kwargs.get('alpha', -2.35)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_imf = max_mass**(alpha + 1)/(alpha + 1)
C_imf -= min_mass**(alpha + 1)/(alpha + 1)
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = np.zeros_like(m1)
idx = np.where(m1 <= max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(m1[idx] - min_mass)
idx = np.where(m1 > max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(max_mass - m1[idx])
p_m1_m2[idx] = 0
return p_m1_m2/2. | Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair | Below is the the instruction that describes the task:
### Input:
Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
### Response:
def prob_imf(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
alpha = kwargs.get('alpha', -2.35)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_imf = max_mass**(alpha + 1)/(alpha + 1)
C_imf -= min_mass**(alpha + 1)/(alpha + 1)
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = np.zeros_like(m1)
idx = np.where(m1 <= max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(m1[idx] - min_mass)
idx = np.where(m1 > max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(max_mass - m1[idx])
p_m1_m2[idx] = 0
return p_m1_m2/2. |
def RunStateMethod(self,
method_name,
request=None,
responses=None,
event=None,
direct_response=None):
"""Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one.
"""
client_id = None
try:
self.context.current_state = method_name
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debug("%s Running %s with %d responses from %s",
self.session_id, method_name, len(responses), client_id)
else:
logging.debug("%s Running state method %s", self.session_id,
method_name)
# Extend our lease if needed.
self.hunt_obj.HeartBeat()
try:
method = getattr(self.hunt_obj, method_name)
except AttributeError:
raise flow_runner.FlowRunnerError(
"Flow %s has no state method %s" %
(self.hunt_obj.__class__.__name__, method_name))
if direct_response:
method(direct_response)
elif method_name == "Start":
method()
else:
# Prepare a responses object for the state method to use:
responses = flow_responses.Responses.FromLegacyResponses(
request=request, responses=responses)
if responses.status:
self.SaveResourceUsage(request.client_id, responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
method(responses)
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
# TODO(user): Deprecate in favor of 'flow_errors'.
stats_collector_instance.Get().IncrementCounter("grr_flow_errors")
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.hunt_obj.Name()])
logging.exception("Hunt %s raised %s.", self.session_id, e)
self.Error(traceback.format_exc(), client_id=client_id)
finally:
if event:
event.set() | Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one. | Below is the the instruction that describes the task:
### Input:
Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one.
### Response:
def RunStateMethod(self,
method_name,
request=None,
responses=None,
event=None,
direct_response=None):
"""Completes the request by calling the state method.
Args:
method_name: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one.
"""
client_id = None
try:
self.context.current_state = method_name
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debug("%s Running %s with %d responses from %s",
self.session_id, method_name, len(responses), client_id)
else:
logging.debug("%s Running state method %s", self.session_id,
method_name)
# Extend our lease if needed.
self.hunt_obj.HeartBeat()
try:
method = getattr(self.hunt_obj, method_name)
except AttributeError:
raise flow_runner.FlowRunnerError(
"Flow %s has no state method %s" %
(self.hunt_obj.__class__.__name__, method_name))
if direct_response:
method(direct_response)
elif method_name == "Start":
method()
else:
# Prepare a responses object for the state method to use:
responses = flow_responses.Responses.FromLegacyResponses(
request=request, responses=responses)
if responses.status:
self.SaveResourceUsage(request.client_id, responses.status)
stats_collector_instance.Get().IncrementCounter("grr_worker_states_run")
method(responses)
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
# TODO(user): Deprecate in favor of 'flow_errors'.
stats_collector_instance.Get().IncrementCounter("grr_flow_errors")
stats_collector_instance.Get().IncrementCounter(
"flow_errors", fields=[self.hunt_obj.Name()])
logging.exception("Hunt %s raised %s.", self.session_id, e)
self.Error(traceback.format_exc(), client_id=client_id)
finally:
if event:
event.set() |
def _utc_datetime_to_epoch(self, activity_datetime):
"""
Convert the specified datetime value to a unix epoch timestamp (seconds since epoch).
:param activity_datetime: A string which may contain tzinfo (offset) or a datetime object (naive datetime will
be considered to be UTC).
:return: Epoch timestamp.
:rtype: int
"""
if isinstance(activity_datetime, str):
activity_datetime = arrow.get(activity_datetime).datetime
assert isinstance(activity_datetime, datetime)
if activity_datetime.tzinfo:
activity_datetime = activity_datetime.astimezone(pytz.utc)
return calendar.timegm(activity_datetime.timetuple()) | Convert the specified datetime value to a unix epoch timestamp (seconds since epoch).
:param activity_datetime: A string which may contain tzinfo (offset) or a datetime object (naive datetime will
be considered to be UTC).
:return: Epoch timestamp.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Convert the specified datetime value to a unix epoch timestamp (seconds since epoch).
:param activity_datetime: A string which may contain tzinfo (offset) or a datetime object (naive datetime will
be considered to be UTC).
:return: Epoch timestamp.
:rtype: int
### Response:
def _utc_datetime_to_epoch(self, activity_datetime):
"""
Convert the specified datetime value to a unix epoch timestamp (seconds since epoch).
:param activity_datetime: A string which may contain tzinfo (offset) or a datetime object (naive datetime will
be considered to be UTC).
:return: Epoch timestamp.
:rtype: int
"""
if isinstance(activity_datetime, str):
activity_datetime = arrow.get(activity_datetime).datetime
assert isinstance(activity_datetime, datetime)
if activity_datetime.tzinfo:
activity_datetime = activity_datetime.astimezone(pytz.utc)
return calendar.timegm(activity_datetime.timetuple()) |
def get_request_headers(self, *args, **kwds):
"""
A convenience method for obtaining the headers that were sent to the
S3 server.
The AWS S3 API depends upon setting headers. This method is provided as
a convenience for debugging issues with the S3 communications.
"""
if self.request_headers:
return self._unpack_headers(self.request_headers) | A convenience method for obtaining the headers that were sent to the
S3 server.
The AWS S3 API depends upon setting headers. This method is provided as
a convenience for debugging issues with the S3 communications. | Below is the the instruction that describes the task:
### Input:
A convenience method for obtaining the headers that were sent to the
S3 server.
The AWS S3 API depends upon setting headers. This method is provided as
a convenience for debugging issues with the S3 communications.
### Response:
def get_request_headers(self, *args, **kwds):
"""
A convenience method for obtaining the headers that were sent to the
S3 server.
The AWS S3 API depends upon setting headers. This method is provided as
a convenience for debugging issues with the S3 communications.
"""
if self.request_headers:
return self._unpack_headers(self.request_headers) |
def add_cli_options(cli_options, action):
# type: (dict, str) -> None
"""Adds CLI options to the configuration object
:param dict cli_options: CLI options dict
:param TransferAction action: action
"""
cli_options['_action'] = action.name.lower()
# if url is present, convert to constituent options
if blobxfer.util.is_not_empty(cli_options.get('storage_url')):
if (blobxfer.util.is_not_empty(cli_options.get('storage_account')) or
blobxfer.util.is_not_empty(cli_options.get('mode')) or
blobxfer.util.is_not_empty(cli_options.get('endpoint')) or
blobxfer.util.is_not_empty(cli_options.get('remote_path'))):
raise ValueError(
'Specified both --storage-url and --storage-account, '
'--mode, --endpoint, or --remote-path')
cli_options['storage_account'], mode, \
cli_options['endpoint'], \
cli_options['remote_path'], \
sas = blobxfer.util.explode_azure_storage_url(
cli_options['storage_url'])
if blobxfer.util.is_not_empty(sas):
if blobxfer.util.is_not_empty(cli_options['sas']):
raise ValueError(
'Specified both --storage-url with a SAS token and --sas')
cli_options['sas'] = sas
if mode != 'blob' and mode != 'file':
raise ValueError(
'Invalid derived mode from --storage-url: {}'.format(mode))
if mode == 'file':
cli_options['mode'] = mode
del mode
del sas
storage_account = cli_options.get('storage_account')
azstorage = {
'endpoint': cli_options.get('endpoint')
}
if blobxfer.util.is_not_empty(storage_account):
azstorage['accounts'] = {
storage_account: (
cli_options.get('access_key') or cli_options.get('sas')
)
}
sa_rp = {
storage_account: cli_options.get('remote_path')
}
local_resource = cli_options.get('local_resource')
# construct "argument" from cli options
if action == TransferAction.Download:
arg = {
'source': [sa_rp] if sa_rp[storage_account] is not None else None,
'destination': local_resource if local_resource is not None else
None,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'check_file_md5': cli_options.get('file_md5'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'delete_extraneous_destination': cli_options.get('delete'),
'max_single_object_concurrency': cli_options.get(
'max_single_object_concurrency'),
'mode': cli_options.get('mode'),
'overwrite': cli_options.get('overwrite'),
'recursive': cli_options.get('recursive'),
'rename': cli_options.get('rename'),
'rsa_private_key': cli_options.get('rsa_private_key'),
'rsa_private_key_passphrase': cli_options.get(
'rsa_private_key_passphrase'),
'restore_file_properties': {
'attributes': cli_options.get('file_attributes'),
'lmt': cli_options.get('restore_file_lmt'),
'md5': None,
},
'strip_components': cli_options.get('strip_components'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
},
}
elif action == TransferAction.Synccopy:
# if url is present, convert to constituent options
if blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_storage_url')):
if (blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_storage_account')) or
blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_mode')) or
blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_remote_path'))):
raise ValueError(
'Specified both --sync-copy-dest-storage-url and '
'--sync-copy-dest-storage-account, '
'--sync-copy-dest-mode, or'
'--sync-copy-dest-remote-path')
cli_options['sync_copy_dest_storage_account'], mode, _, \
cli_options['sync_copy_dest_remote_path'], sas = \
blobxfer.util.explode_azure_storage_url(
cli_options['sync_copy_dest_storage_url'])
if blobxfer.util.is_not_empty(sas):
if blobxfer.util.is_not_empty(
cli_options['sync_copy_dest_sas']):
raise ValueError(
'Specified both --sync-copy-dest-storage-url with '
'a SAS token and --sync-copy-dest-sas')
cli_options['sync_copy_dest_sas'] = sas
if mode != 'blob' and mode != 'file':
raise ValueError(
'Invalid derived destination mode from '
'--sync-copy-dest-storage-url: {}'.format(mode))
if mode == 'file':
cli_options['dest_mode'] = mode
del mode
del sas
sync_copy_dest_storage_account = cli_options.get(
'sync_copy_dest_storage_account')
sync_copy_dest_remote_path = cli_options.get(
'sync_copy_dest_remote_path')
if (sync_copy_dest_storage_account is not None and
sync_copy_dest_remote_path is not None):
sync_copy_dest = [
{
sync_copy_dest_storage_account:
sync_copy_dest_remote_path
}
]
azstorage['accounts'][sync_copy_dest_storage_account] = (
cli_options.get('sync_copy_dest_access_key') or
cli_options.get('sync_copy_dest_sas')
)
else:
sync_copy_dest = None
arg = {
'source': [sa_rp] if sa_rp[storage_account] is not None else None,
'destination': sync_copy_dest,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'access_tier': cli_options.get('access_tier'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'dest_mode': cli_options.get('sync_copy_dest_mode'),
'mode': cli_options.get('mode'),
'overwrite': cli_options.get('overwrite'),
'rename': cli_options.get('rename'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
},
}
elif action == TransferAction.Upload:
arg = {
'source': [local_resource] if local_resource is not None else None,
'destination': [sa_rp] if sa_rp[storage_account] is not None else
None,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'access_tier': cli_options.get('access_tier'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'delete_extraneous_destination': cli_options.get('delete'),
'mode': cli_options.get('mode'),
'one_shot_bytes': cli_options.get('one_shot_bytes'),
'overwrite': cli_options.get('overwrite'),
'recursive': cli_options.get('recursive'),
'rename': cli_options.get('rename'),
'rsa_private_key': cli_options.get('rsa_private_key'),
'rsa_private_key_passphrase': cli_options.get(
'rsa_private_key_passphrase'),
'rsa_public_key': cli_options.get('rsa_public_key'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
'stdin_as_page_blob_size': cli_options.get(
'stdin_as_page_blob_size'),
'store_file_properties': {
'attributes': cli_options.get('file_attributes'),
'cache_control': cli_options.get('file_cache_control'),
'lmt': None,
'md5': cli_options.get('file_md5'),
},
'strip_components': cli_options.get('strip_components'),
'vectored_io': {
'stripe_chunk_size_bytes': cli_options.get(
'stripe_chunk_size_bytes'),
'distribution_mode': cli_options.get('distribution_mode'),
},
},
}
count = 0
if arg['source'] is None:
arg.pop('source')
count += 1
if arg['destination'] is None:
arg.pop('destination')
count += 1
if count == 1:
if action == TransferAction.Synccopy:
raise ValueError(
'--remote-path and --sync-copy-dest-remote-path must be '
'specified together through the commandline')
else:
raise ValueError(
'--local-path and --remote-path must be specified together '
'through the commandline')
if 'accounts' in azstorage:
cli_options['azure_storage'] = azstorage
cli_options[action.name.lower()] = arg | Adds CLI options to the configuration object
:param dict cli_options: CLI options dict
:param TransferAction action: action | Below is the the instruction that describes the task:
### Input:
Adds CLI options to the configuration object
:param dict cli_options: CLI options dict
:param TransferAction action: action
### Response:
def add_cli_options(cli_options, action):
# type: (dict, str) -> None
"""Adds CLI options to the configuration object
:param dict cli_options: CLI options dict
:param TransferAction action: action
"""
cli_options['_action'] = action.name.lower()
# if url is present, convert to constituent options
if blobxfer.util.is_not_empty(cli_options.get('storage_url')):
if (blobxfer.util.is_not_empty(cli_options.get('storage_account')) or
blobxfer.util.is_not_empty(cli_options.get('mode')) or
blobxfer.util.is_not_empty(cli_options.get('endpoint')) or
blobxfer.util.is_not_empty(cli_options.get('remote_path'))):
raise ValueError(
'Specified both --storage-url and --storage-account, '
'--mode, --endpoint, or --remote-path')
cli_options['storage_account'], mode, \
cli_options['endpoint'], \
cli_options['remote_path'], \
sas = blobxfer.util.explode_azure_storage_url(
cli_options['storage_url'])
if blobxfer.util.is_not_empty(sas):
if blobxfer.util.is_not_empty(cli_options['sas']):
raise ValueError(
'Specified both --storage-url with a SAS token and --sas')
cli_options['sas'] = sas
if mode != 'blob' and mode != 'file':
raise ValueError(
'Invalid derived mode from --storage-url: {}'.format(mode))
if mode == 'file':
cli_options['mode'] = mode
del mode
del sas
storage_account = cli_options.get('storage_account')
azstorage = {
'endpoint': cli_options.get('endpoint')
}
if blobxfer.util.is_not_empty(storage_account):
azstorage['accounts'] = {
storage_account: (
cli_options.get('access_key') or cli_options.get('sas')
)
}
sa_rp = {
storage_account: cli_options.get('remote_path')
}
local_resource = cli_options.get('local_resource')
# construct "argument" from cli options
if action == TransferAction.Download:
arg = {
'source': [sa_rp] if sa_rp[storage_account] is not None else None,
'destination': local_resource if local_resource is not None else
None,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'check_file_md5': cli_options.get('file_md5'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'delete_extraneous_destination': cli_options.get('delete'),
'max_single_object_concurrency': cli_options.get(
'max_single_object_concurrency'),
'mode': cli_options.get('mode'),
'overwrite': cli_options.get('overwrite'),
'recursive': cli_options.get('recursive'),
'rename': cli_options.get('rename'),
'rsa_private_key': cli_options.get('rsa_private_key'),
'rsa_private_key_passphrase': cli_options.get(
'rsa_private_key_passphrase'),
'restore_file_properties': {
'attributes': cli_options.get('file_attributes'),
'lmt': cli_options.get('restore_file_lmt'),
'md5': None,
},
'strip_components': cli_options.get('strip_components'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
},
}
elif action == TransferAction.Synccopy:
# if url is present, convert to constituent options
if blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_storage_url')):
if (blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_storage_account')) or
blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_mode')) or
blobxfer.util.is_not_empty(
cli_options.get('sync_copy_dest_remote_path'))):
raise ValueError(
'Specified both --sync-copy-dest-storage-url and '
'--sync-copy-dest-storage-account, '
'--sync-copy-dest-mode, or'
'--sync-copy-dest-remote-path')
cli_options['sync_copy_dest_storage_account'], mode, _, \
cli_options['sync_copy_dest_remote_path'], sas = \
blobxfer.util.explode_azure_storage_url(
cli_options['sync_copy_dest_storage_url'])
if blobxfer.util.is_not_empty(sas):
if blobxfer.util.is_not_empty(
cli_options['sync_copy_dest_sas']):
raise ValueError(
'Specified both --sync-copy-dest-storage-url with '
'a SAS token and --sync-copy-dest-sas')
cli_options['sync_copy_dest_sas'] = sas
if mode != 'blob' and mode != 'file':
raise ValueError(
'Invalid derived destination mode from '
'--sync-copy-dest-storage-url: {}'.format(mode))
if mode == 'file':
cli_options['dest_mode'] = mode
del mode
del sas
sync_copy_dest_storage_account = cli_options.get(
'sync_copy_dest_storage_account')
sync_copy_dest_remote_path = cli_options.get(
'sync_copy_dest_remote_path')
if (sync_copy_dest_storage_account is not None and
sync_copy_dest_remote_path is not None):
sync_copy_dest = [
{
sync_copy_dest_storage_account:
sync_copy_dest_remote_path
}
]
azstorage['accounts'][sync_copy_dest_storage_account] = (
cli_options.get('sync_copy_dest_access_key') or
cli_options.get('sync_copy_dest_sas')
)
else:
sync_copy_dest = None
arg = {
'source': [sa_rp] if sa_rp[storage_account] is not None else None,
'destination': sync_copy_dest,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'access_tier': cli_options.get('access_tier'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'dest_mode': cli_options.get('sync_copy_dest_mode'),
'mode': cli_options.get('mode'),
'overwrite': cli_options.get('overwrite'),
'rename': cli_options.get('rename'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
},
}
elif action == TransferAction.Upload:
arg = {
'source': [local_resource] if local_resource is not None else None,
'destination': [sa_rp] if sa_rp[storage_account] is not None else
None,
'include': cli_options.get('include'),
'exclude': cli_options.get('exclude'),
'options': {
'access_tier': cli_options.get('access_tier'),
'chunk_size_bytes': cli_options.get('chunk_size_bytes'),
'delete_extraneous_destination': cli_options.get('delete'),
'mode': cli_options.get('mode'),
'one_shot_bytes': cli_options.get('one_shot_bytes'),
'overwrite': cli_options.get('overwrite'),
'recursive': cli_options.get('recursive'),
'rename': cli_options.get('rename'),
'rsa_private_key': cli_options.get('rsa_private_key'),
'rsa_private_key_passphrase': cli_options.get(
'rsa_private_key_passphrase'),
'rsa_public_key': cli_options.get('rsa_public_key'),
'skip_on': {
'filesize_match': cli_options.get(
'skip_on_filesize_match'),
'lmt_ge': cli_options.get('skip_on_lmt_ge'),
'md5_match': cli_options.get('skip_on_md5_match'),
},
'stdin_as_page_blob_size': cli_options.get(
'stdin_as_page_blob_size'),
'store_file_properties': {
'attributes': cli_options.get('file_attributes'),
'cache_control': cli_options.get('file_cache_control'),
'lmt': None,
'md5': cli_options.get('file_md5'),
},
'strip_components': cli_options.get('strip_components'),
'vectored_io': {
'stripe_chunk_size_bytes': cli_options.get(
'stripe_chunk_size_bytes'),
'distribution_mode': cli_options.get('distribution_mode'),
},
},
}
count = 0
if arg['source'] is None:
arg.pop('source')
count += 1
if arg['destination'] is None:
arg.pop('destination')
count += 1
if count == 1:
if action == TransferAction.Synccopy:
raise ValueError(
'--remote-path and --sync-copy-dest-remote-path must be '
'specified together through the commandline')
else:
raise ValueError(
'--local-path and --remote-path must be specified together '
'through the commandline')
if 'accounts' in azstorage:
cli_options['azure_storage'] = azstorage
cli_options[action.name.lower()] = arg |
def add_remote(name, location):
'''
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
Example:
.. code-block:: yaml
add_flathub:
flatpack.add_remote:
- name: flathub
- location: https://flathub.org/repo/flathub.flatpakrepo
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
old = __salt__['flatpak.is_remote_added'](name)
if not old:
if __opts__['test']:
ret['comment'] = 'Remote "{0}" would have been added'.format(name)
ret['changes']['new'] = name
ret['changes']['old'] = None
ret['result'] = None
return ret
install_ret = __salt__['flatpak.add_remote'](name)
if __salt__['flatpak.is_remote_added'](name):
ret['comment'] = 'Remote "{0}" was added'.format(name)
ret['changes']['new'] = name
ret['changes']['old'] = None
ret['result'] = True
return ret
ret['comment'] = 'Failed to add remote "{0}"'.format(name)
ret['comment'] += '\noutput:\n' + install_ret['output']
ret['result'] = False
return ret
ret['comment'] = 'Remote "{0}" already exists'.format(name)
if __opts__['test']:
ret['result'] = None
return ret
ret['result'] = True
return ret | Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
Example:
.. code-block:: yaml
add_flathub:
flatpack.add_remote:
- name: flathub
- location: https://flathub.org/repo/flathub.flatpakrepo | Below is the the instruction that describes the task:
### Input:
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
Example:
.. code-block:: yaml
add_flathub:
flatpack.add_remote:
- name: flathub
- location: https://flathub.org/repo/flathub.flatpakrepo
### Response:
def add_remote(name, location):
'''
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
Example:
.. code-block:: yaml
add_flathub:
flatpack.add_remote:
- name: flathub
- location: https://flathub.org/repo/flathub.flatpakrepo
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
old = __salt__['flatpak.is_remote_added'](name)
if not old:
if __opts__['test']:
ret['comment'] = 'Remote "{0}" would have been added'.format(name)
ret['changes']['new'] = name
ret['changes']['old'] = None
ret['result'] = None
return ret
install_ret = __salt__['flatpak.add_remote'](name)
if __salt__['flatpak.is_remote_added'](name):
ret['comment'] = 'Remote "{0}" was added'.format(name)
ret['changes']['new'] = name
ret['changes']['old'] = None
ret['result'] = True
return ret
ret['comment'] = 'Failed to add remote "{0}"'.format(name)
ret['comment'] += '\noutput:\n' + install_ret['output']
ret['result'] = False
return ret
ret['comment'] = 'Remote "{0}" already exists'.format(name)
if __opts__['test']:
ret['result'] = None
return ret
ret['result'] = True
return ret |
def cur_space(self, name=None):
"""Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
"""
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space() | Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned. | Below is the the instruction that describes the task:
### Input:
Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
### Response:
def cur_space(self, name=None):
"""Set the current space to Space ``name`` and return it.
If called without arguments, the current space is returned.
Otherwise, the current space is set to the space named ``name``
and the space is returned.
"""
if name is None:
return self._impl.model.currentspace.interface
else:
self._impl.model.currentspace = self._impl.spaces[name]
return self.cur_space() |
def add_prerequisite(self, prerequisite):
"""Adds prerequisites"""
if self.prerequisites is None:
self.prerequisites = SCons.Util.UniqueList()
self.prerequisites.extend(prerequisite)
self._children_reset() | Adds prerequisites | Below is the the instruction that describes the task:
### Input:
Adds prerequisites
### Response:
def add_prerequisite(self, prerequisite):
"""Adds prerequisites"""
if self.prerequisites is None:
self.prerequisites = SCons.Util.UniqueList()
self.prerequisites.extend(prerequisite)
self._children_reset() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.