code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def _try_resolving_indirect_jumps(self, sim_successors, cfg_node, func_addr, successors, exception_info, artifacts):
"""
Resolve indirect jumps specified by sim_successors.addr.
:param SimSuccessors sim_successors: The SimSuccessors instance.
:param CFGNode cfg_node: The CFGNode instance.
:param int func_addr: Current function address.
:param list successors: A list of successors.
:param tuple exception_info: The sys.exc_info() of the exception or None if none occured.
:param artifacts: A container of collected information.
:return: Resolved successors
:rtype: list
"""
# Try to resolve indirect jumps with advanced backward slicing (if enabled)
if sim_successors.sort == 'IRSB' and \
self._is_indirect_jump(cfg_node, sim_successors):
l.debug('IRSB %#x has an indirect jump as its default exit', cfg_node.addr)
# We need input states to perform backward slicing
if self._advanced_backward_slicing and self._keep_state:
# Optimization: make sure we only try to resolve an indirect jump if any of the following criteria holds
# - It's a jump (Ijk_Boring), and its target is either fully symbolic, or its resolved target is within
# the current binary
# - It's a call (Ijk_Call), and its target is fully symbolic
# TODO: This is very hackish, please refactor this part of code later
should_resolve = True
legit_successors = [suc for suc in successors if suc.history.jumpkind in ('Ijk_Boring', 'Ijk_InvalICache', 'Ijk_Call')]
if legit_successors:
legit_successor = legit_successors[0]
if legit_successor.ip.symbolic:
if not legit_successor.history.jumpkind == 'Ijk_Call':
should_resolve = False
else:
if legit_successor.history.jumpkind == 'Ijk_Call':
should_resolve = False
else:
concrete_target = legit_successor.solver.eval(legit_successor.ip)
if not self.project.loader.find_object_containing(
concrete_target) is self.project.loader.main_object:
should_resolve = False
else:
# No interesting successors... skip
should_resolve = False
# TODO: Handle those successors
if not should_resolve:
l.debug("This might not be an indirect jump that has multiple targets. Skipped.")
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
else:
more_successors = self._backward_slice_indirect(cfg_node, sim_successors, func_addr)
if more_successors:
# Remove the symbolic successor
# TODO: Now we are removing all symbolic successors. Is it possible
# TODO: that there is more than one symbolic successor?
all_successors = [suc for suc in successors if not suc.solver.symbolic(suc.ip)]
# Insert new successors
# We insert new successors in the beginning of all_successors list so that we don't break the
# assumption that Ijk_FakeRet is always the last element in the list
for suc_addr in more_successors:
a = sim_successors.all_successors[0].copy()
a.ip = suc_addr
all_successors.insert(0, a)
l.debug('The indirect jump is successfully resolved.')
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
l.debug('Failed to resolve the indirect jump.')
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
else:
if not successors:
l.debug('Cannot resolve the indirect jump without advanced backward slicing enabled: %s',
cfg_node)
# Try to find more successors if we failed to resolve the indirect jump before
if exception_info is None and (cfg_node.is_simprocedure or self._is_indirect_jump(cfg_node, sim_successors)):
has_call_jumps = any(suc_state.history.jumpkind == 'Ijk_Call' for suc_state in successors)
if has_call_jumps:
concrete_successors = [suc_state for suc_state in successors if
suc_state.history.jumpkind != 'Ijk_FakeRet' and not suc_state.solver.symbolic(
suc_state.ip)]
else:
concrete_successors = [suc_state for suc_state in successors if
not suc_state.solver.symbolic(suc_state.ip)]
symbolic_successors = [suc_state for suc_state in successors if suc_state.solver.symbolic(suc_state.ip)]
resolved = True if not symbolic_successors else False
if symbolic_successors:
for suc in symbolic_successors:
if o.SYMBOLIC in suc.options:
targets = suc.solver.eval_upto(suc.ip, 32)
if len(targets) < 32:
all_successors = []
resolved = True
for t in targets:
new_ex = suc.copy()
new_ex.ip = suc.solver.BVV(t, suc.ip.size())
all_successors.append(new_ex)
else:
break
if not resolved and (
(symbolic_successors and not concrete_successors) or
(not cfg_node.is_simprocedure and self._is_indirect_jump(cfg_node, sim_successors))
):
l.debug("%s has an indirect jump. See what we can do about it.", cfg_node)
if sim_successors.sort == 'SimProcedure' and \
sim_successors.artifacts['adds_exits']:
# Skip those SimProcedures that don't create new SimExits
l.debug('We got a SimProcedure %s in fastpath mode that creates new exits.', sim_successors.description)
if self._enable_symbolic_back_traversal:
successors = self._symbolically_back_traverse(sim_successors, artifacts, cfg_node)
# mark jump as resolved if we got successors
if successors:
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
l.debug("Got %d concrete exits in symbolic mode.", len(successors))
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
# keep fake_rets
successors = [s for s in successors if s.history.jumpkind == "Ijk_FakeRet"]
elif sim_successors.sort == 'IRSB'and \
any([ex.history.jumpkind != 'Ijk_Ret' for ex in successors]):
# We cannot properly handle Return as that requires us start execution from the caller...
l.debug("Try traversal backwards in symbolic mode on %s.", cfg_node)
if self._enable_symbolic_back_traversal:
successors = self._symbolically_back_traverse(sim_successors, artifacts, cfg_node)
# Remove successors whose IP doesn't make sense
successors = [suc for suc in successors
if self._is_address_executable(suc.solver.eval_one(suc.ip))]
# mark jump as resolved if we got successors
if successors:
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
l.debug('Got %d concrete exits in symbolic mode', len(successors))
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
successors = []
elif successors and all([ex.history.jumpkind == 'Ijk_Ret' for ex in successors]):
l.debug('All exits are returns (Ijk_Ret). It will be handled by pending exits.')
else:
l.debug('Cannot resolve this indirect jump: %s', cfg_node)
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
return successors | Resolve indirect jumps specified by sim_successors.addr.
:param SimSuccessors sim_successors: The SimSuccessors instance.
:param CFGNode cfg_node: The CFGNode instance.
:param int func_addr: Current function address.
:param list successors: A list of successors.
:param tuple exception_info: The sys.exc_info() of the exception or None if none occured.
:param artifacts: A container of collected information.
:return: Resolved successors
:rtype: list | Below is the the instruction that describes the task:
### Input:
Resolve indirect jumps specified by sim_successors.addr.
:param SimSuccessors sim_successors: The SimSuccessors instance.
:param CFGNode cfg_node: The CFGNode instance.
:param int func_addr: Current function address.
:param list successors: A list of successors.
:param tuple exception_info: The sys.exc_info() of the exception or None if none occured.
:param artifacts: A container of collected information.
:return: Resolved successors
:rtype: list
### Response:
def _try_resolving_indirect_jumps(self, sim_successors, cfg_node, func_addr, successors, exception_info, artifacts):
"""
Resolve indirect jumps specified by sim_successors.addr.
:param SimSuccessors sim_successors: The SimSuccessors instance.
:param CFGNode cfg_node: The CFGNode instance.
:param int func_addr: Current function address.
:param list successors: A list of successors.
:param tuple exception_info: The sys.exc_info() of the exception or None if none occured.
:param artifacts: A container of collected information.
:return: Resolved successors
:rtype: list
"""
# Try to resolve indirect jumps with advanced backward slicing (if enabled)
if sim_successors.sort == 'IRSB' and \
self._is_indirect_jump(cfg_node, sim_successors):
l.debug('IRSB %#x has an indirect jump as its default exit', cfg_node.addr)
# We need input states to perform backward slicing
if self._advanced_backward_slicing and self._keep_state:
# Optimization: make sure we only try to resolve an indirect jump if any of the following criteria holds
# - It's a jump (Ijk_Boring), and its target is either fully symbolic, or its resolved target is within
# the current binary
# - It's a call (Ijk_Call), and its target is fully symbolic
# TODO: This is very hackish, please refactor this part of code later
should_resolve = True
legit_successors = [suc for suc in successors if suc.history.jumpkind in ('Ijk_Boring', 'Ijk_InvalICache', 'Ijk_Call')]
if legit_successors:
legit_successor = legit_successors[0]
if legit_successor.ip.symbolic:
if not legit_successor.history.jumpkind == 'Ijk_Call':
should_resolve = False
else:
if legit_successor.history.jumpkind == 'Ijk_Call':
should_resolve = False
else:
concrete_target = legit_successor.solver.eval(legit_successor.ip)
if not self.project.loader.find_object_containing(
concrete_target) is self.project.loader.main_object:
should_resolve = False
else:
# No interesting successors... skip
should_resolve = False
# TODO: Handle those successors
if not should_resolve:
l.debug("This might not be an indirect jump that has multiple targets. Skipped.")
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
else:
more_successors = self._backward_slice_indirect(cfg_node, sim_successors, func_addr)
if more_successors:
# Remove the symbolic successor
# TODO: Now we are removing all symbolic successors. Is it possible
# TODO: that there is more than one symbolic successor?
all_successors = [suc for suc in successors if not suc.solver.symbolic(suc.ip)]
# Insert new successors
# We insert new successors in the beginning of all_successors list so that we don't break the
# assumption that Ijk_FakeRet is always the last element in the list
for suc_addr in more_successors:
a = sim_successors.all_successors[0].copy()
a.ip = suc_addr
all_successors.insert(0, a)
l.debug('The indirect jump is successfully resolved.')
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
l.debug('Failed to resolve the indirect jump.')
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
else:
if not successors:
l.debug('Cannot resolve the indirect jump without advanced backward slicing enabled: %s',
cfg_node)
# Try to find more successors if we failed to resolve the indirect jump before
if exception_info is None and (cfg_node.is_simprocedure or self._is_indirect_jump(cfg_node, sim_successors)):
has_call_jumps = any(suc_state.history.jumpkind == 'Ijk_Call' for suc_state in successors)
if has_call_jumps:
concrete_successors = [suc_state for suc_state in successors if
suc_state.history.jumpkind != 'Ijk_FakeRet' and not suc_state.solver.symbolic(
suc_state.ip)]
else:
concrete_successors = [suc_state for suc_state in successors if
not suc_state.solver.symbolic(suc_state.ip)]
symbolic_successors = [suc_state for suc_state in successors if suc_state.solver.symbolic(suc_state.ip)]
resolved = True if not symbolic_successors else False
if symbolic_successors:
for suc in symbolic_successors:
if o.SYMBOLIC in suc.options:
targets = suc.solver.eval_upto(suc.ip, 32)
if len(targets) < 32:
all_successors = []
resolved = True
for t in targets:
new_ex = suc.copy()
new_ex.ip = suc.solver.BVV(t, suc.ip.size())
all_successors.append(new_ex)
else:
break
if not resolved and (
(symbolic_successors and not concrete_successors) or
(not cfg_node.is_simprocedure and self._is_indirect_jump(cfg_node, sim_successors))
):
l.debug("%s has an indirect jump. See what we can do about it.", cfg_node)
if sim_successors.sort == 'SimProcedure' and \
sim_successors.artifacts['adds_exits']:
# Skip those SimProcedures that don't create new SimExits
l.debug('We got a SimProcedure %s in fastpath mode that creates new exits.', sim_successors.description)
if self._enable_symbolic_back_traversal:
successors = self._symbolically_back_traverse(sim_successors, artifacts, cfg_node)
# mark jump as resolved if we got successors
if successors:
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
l.debug("Got %d concrete exits in symbolic mode.", len(successors))
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
# keep fake_rets
successors = [s for s in successors if s.history.jumpkind == "Ijk_FakeRet"]
elif sim_successors.sort == 'IRSB'and \
any([ex.history.jumpkind != 'Ijk_Ret' for ex in successors]):
# We cannot properly handle Return as that requires us start execution from the caller...
l.debug("Try traversal backwards in symbolic mode on %s.", cfg_node)
if self._enable_symbolic_back_traversal:
successors = self._symbolically_back_traverse(sim_successors, artifacts, cfg_node)
# Remove successors whose IP doesn't make sense
successors = [suc for suc in successors
if self._is_address_executable(suc.solver.eval_one(suc.ip))]
# mark jump as resolved if we got successors
if successors:
self.kb.resolved_indirect_jumps.add(cfg_node.addr)
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
l.debug('Got %d concrete exits in symbolic mode', len(successors))
else:
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
successors = []
elif successors and all([ex.history.jumpkind == 'Ijk_Ret' for ex in successors]):
l.debug('All exits are returns (Ijk_Ret). It will be handled by pending exits.')
else:
l.debug('Cannot resolve this indirect jump: %s', cfg_node)
self.kb.unresolved_indirect_jumps.add(cfg_node.addr)
return successors |
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback) | The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call). | Below is the the instruction that describes the task:
### Input:
The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
### Response:
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback) |
def get_optional_env(key):
"""
Return the value of an optional environment variable, and use
the provided default if it's not set.
"""
environment_variable_value = os.environ.get(key)
if environment_variable_value:
return environment_variable_value
elif key in CONSTANTS:
return CONSTANTS[key]
else:
raise Exception("The variable {1} is not set".format(key)) | Return the value of an optional environment variable, and use
the provided default if it's not set. | Below is the the instruction that describes the task:
### Input:
Return the value of an optional environment variable, and use
the provided default if it's not set.
### Response:
def get_optional_env(key):
"""
Return the value of an optional environment variable, and use
the provided default if it's not set.
"""
environment_variable_value = os.environ.get(key)
if environment_variable_value:
return environment_variable_value
elif key in CONSTANTS:
return CONSTANTS[key]
else:
raise Exception("The variable {1} is not set".format(key)) |
def get_i_name(self, num, is_oai=None):
"""
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property.
"""
if num not in (1, 2):
raise ValueError("`num` parameter have to be 1 or 2!")
if is_oai is None:
is_oai = self.oai_marc
i_name = "ind" if not is_oai else "i"
return i_name + str(num) | This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property. | Below is the the instruction that describes the task:
### Input:
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property.
### Response:
def get_i_name(self, num, is_oai=None):
"""
This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` property.
"""
if num not in (1, 2):
raise ValueError("`num` parameter have to be 1 or 2!")
if is_oai is None:
is_oai = self.oai_marc
i_name = "ind" if not is_oai else "i"
return i_name + str(num) |
def upload(ctx, repo):
"""Upload the package to an index server.
This implies cleaning and re-building the package.
:param repo: Required. Name of the index server to upload to, as specifies
in your .pypirc configuration file.
"""
artifacts = ' '.join(
shlex.quote(str(n))
for n in ROOT.joinpath('dist').glob('pipfile[-_]cli-*')
)
ctx.run(f'twine upload --repository="{repo}" {artifacts}') | Upload the package to an index server.
This implies cleaning and re-building the package.
:param repo: Required. Name of the index server to upload to, as specifies
in your .pypirc configuration file. | Below is the the instruction that describes the task:
### Input:
Upload the package to an index server.
This implies cleaning and re-building the package.
:param repo: Required. Name of the index server to upload to, as specifies
in your .pypirc configuration file.
### Response:
def upload(ctx, repo):
"""Upload the package to an index server.
This implies cleaning and re-building the package.
:param repo: Required. Name of the index server to upload to, as specifies
in your .pypirc configuration file.
"""
artifacts = ' '.join(
shlex.quote(str(n))
for n in ROOT.joinpath('dist').glob('pipfile[-_]cli-*')
)
ctx.run(f'twine upload --repository="{repo}" {artifacts}') |
def get_filename(self, instance):
"""Get the filename
"""
filename = self.field.getFilename(instance)
if filename:
return filename
fieldname = self.get_field_name()
content_type = self.get_content_type(instance)
extension = mimetypes.guess_extension(content_type)
return fieldname + extension | Get the filename | Below is the the instruction that describes the task:
### Input:
Get the filename
### Response:
def get_filename(self, instance):
"""Get the filename
"""
filename = self.field.getFilename(instance)
if filename:
return filename
fieldname = self.get_field_name()
content_type = self.get_content_type(instance)
extension = mimetypes.guess_extension(content_type)
return fieldname + extension |
def update_country_by_id(cls, country_id, country, **kwargs):
"""Update Country
Update attributes of Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_country_by_id(country_id, country, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to update. (required)
:param Country country: Attributes of country to update. (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_country_by_id_with_http_info(country_id, country, **kwargs)
else:
(data) = cls._update_country_by_id_with_http_info(country_id, country, **kwargs)
return data | Update Country
Update attributes of Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_country_by_id(country_id, country, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to update. (required)
:param Country country: Attributes of country to update. (required)
:return: Country
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update Country
Update attributes of Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_country_by_id(country_id, country, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to update. (required)
:param Country country: Attributes of country to update. (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
### Response:
def update_country_by_id(cls, country_id, country, **kwargs):
"""Update Country
Update attributes of Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_country_by_id(country_id, country, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to update. (required)
:param Country country: Attributes of country to update. (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_country_by_id_with_http_info(country_id, country, **kwargs)
else:
(data) = cls._update_country_by_id_with_http_info(country_id, country, **kwargs)
return data |
def import_from_string(value):
"""Copy of rest_framework.settings.import_from_string"""
value = value.replace('-', '_')
try:
module_path, class_name = value.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as ex:
raise ImportError("Could not import '{}'. {}: {}.".format(
value, ex.__class__.__name__, ex)) | Copy of rest_framework.settings.import_from_string | Below is the the instruction that describes the task:
### Input:
Copy of rest_framework.settings.import_from_string
### Response:
def import_from_string(value):
"""Copy of rest_framework.settings.import_from_string"""
value = value.replace('-', '_')
try:
module_path, class_name = value.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as ex:
raise ImportError("Could not import '{}'. {}: {}.".format(
value, ex.__class__.__name__, ex)) |
def versioned_bucket_lister(bucket, prefix='', delimiter='',
key_marker='', version_id_marker='', headers=None):
"""
A generator function for listing versions in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers,
max_keys=999)
for k in rs:
yield k
key_marker = rs.next_key_marker
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated | A generator function for listing versions in a bucket. | Below is the the instruction that describes the task:
### Input:
A generator function for listing versions in a bucket.
### Response:
def versioned_bucket_lister(bucket, prefix='', delimiter='',
key_marker='', version_id_marker='', headers=None):
"""
A generator function for listing versions in a bucket.
"""
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers,
max_keys=999)
for k in rs:
yield k
key_marker = rs.next_key_marker
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated |
def start(config, args):
"""Start Glances."""
# Load mode
global mode
if core.is_standalone():
from glances.standalone import GlancesStandalone as GlancesMode
elif core.is_client():
if core.is_client_browser():
from glances.client_browser import GlancesClientBrowser as GlancesMode
else:
from glances.client import GlancesClient as GlancesMode
elif core.is_server():
from glances.server import GlancesServer as GlancesMode
elif core.is_webserver():
from glances.webserver import GlancesWebServer as GlancesMode
# Init the mode
logger.info("Start {} mode".format(GlancesMode.__name__))
mode = GlancesMode(config=config, args=args)
# Start the main loop
mode.serve_forever()
# Shutdown
mode.end() | Start Glances. | Below is the the instruction that describes the task:
### Input:
Start Glances.
### Response:
def start(config, args):
"""Start Glances."""
# Load mode
global mode
if core.is_standalone():
from glances.standalone import GlancesStandalone as GlancesMode
elif core.is_client():
if core.is_client_browser():
from glances.client_browser import GlancesClientBrowser as GlancesMode
else:
from glances.client import GlancesClient as GlancesMode
elif core.is_server():
from glances.server import GlancesServer as GlancesMode
elif core.is_webserver():
from glances.webserver import GlancesWebServer as GlancesMode
# Init the mode
logger.info("Start {} mode".format(GlancesMode.__name__))
mode = GlancesMode(config=config, args=args)
# Start the main loop
mode.serve_forever()
# Shutdown
mode.end() |
def __ldap_attr(self, fname, lname, type, group,
group_api): # pragma: no cover
"""User LDAP attributes."""
return {
'uid':
str(self.username).encode(),
'cn':
' '.join([fname, lname]).encode(),
'sn':
str(lname).encode(),
'givenname':
str(fname).encode(),
'homedirectory':
os.path.join(os.path.sep, 'home', self.username).encode(),
'loginshell':
os.path.join(os.path.sep, 'bin', 'bash').encode(),
'mail':
'@'.join([self.username, self.client.mail_domain]).encode(),
'uidnumber':
self.__uidnumber(type),
'gidnumber':
API.__gidnumber(group, group_api),
'userpassword':
str('{SSHA}' + API.__create_password().decode()).encode(),
} | User LDAP attributes. | Below is the the instruction that describes the task:
### Input:
User LDAP attributes.
### Response:
def __ldap_attr(self, fname, lname, type, group,
group_api): # pragma: no cover
"""User LDAP attributes."""
return {
'uid':
str(self.username).encode(),
'cn':
' '.join([fname, lname]).encode(),
'sn':
str(lname).encode(),
'givenname':
str(fname).encode(),
'homedirectory':
os.path.join(os.path.sep, 'home', self.username).encode(),
'loginshell':
os.path.join(os.path.sep, 'bin', 'bash').encode(),
'mail':
'@'.join([self.username, self.client.mail_domain]).encode(),
'uidnumber':
self.__uidnumber(type),
'gidnumber':
API.__gidnumber(group, group_api),
'userpassword':
str('{SSHA}' + API.__create_password().decode()).encode(),
} |
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw) | Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`) | Below is the the instruction that describes the task:
### Input:
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
### Response:
def write_point(self, **kw):
"""
Write a task point to the file::
with writer.write_point(type=PointType.TURN):
writer.write_waypoint(...)
writer.write_observation_zone(...)
# <Point type="Turn"> ... </Point>
Inside the with clause the
:meth:`~aerofiles.xcsoar.Writer.write_waypoint` and
:meth:`~aerofiles.xcsoar.Writer.write_observation_zone` methods must be
used to write the details of the task point.
:param type: type of the task point (one of the constants in
:class:`~aerofiles.xcsoar.constants.PointType`)
"""
assert 'type' in kw
self.convert_bool(kw, 'score_exit')
return self.write_tag_with_content('Point', **kw) |
def polygon_list(self):
'''return a list of polygons for the waypoints'''
done = set()
ret = []
while len(done) != self.count():
p = self.polygon(done)
if len(p) > 0:
ret.append(p)
return ret | return a list of polygons for the waypoints | Below is the the instruction that describes the task:
### Input:
return a list of polygons for the waypoints
### Response:
def polygon_list(self):
'''return a list of polygons for the waypoints'''
done = set()
ret = []
while len(done) != self.count():
p = self.polygon(done)
if len(p) > 0:
ret.append(p)
return ret |
def encode_matrix_parameters(parameters):
"""
Performs encoding of url matrix parameters from dictionary to
a string.
See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.
"""
result = []
for param in iter(sorted(parameters)):
if isinstance(parameters[param], (list, tuple)):
value = (';%s=' % (param)).join(parameters[param])
else:
value = parameters[param]
result.append("%s=%s" % (param, value))
return ';'.join(result) | Performs encoding of url matrix parameters from dictionary to
a string.
See http://www.w3.org/DesignIssues/MatrixURIs.html for specs. | Below is the the instruction that describes the task:
### Input:
Performs encoding of url matrix parameters from dictionary to
a string.
See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.
### Response:
def encode_matrix_parameters(parameters):
"""
Performs encoding of url matrix parameters from dictionary to
a string.
See http://www.w3.org/DesignIssues/MatrixURIs.html for specs.
"""
result = []
for param in iter(sorted(parameters)):
if isinstance(parameters[param], (list, tuple)):
value = (';%s=' % (param)).join(parameters[param])
else:
value = parameters[param]
result.append("%s=%s" % (param, value))
return ';'.join(result) |
def _check_cmd(call):
'''
Check the output of the cmd.run_all function call.
'''
if call['retcode'] != 0:
comment = ''
std_err = call.get('stderr')
std_out = call.get('stdout')
if std_err:
comment += std_err
if std_out:
comment += std_out
raise CommandExecutionError('Error running command: {0}'.format(comment))
return call | Check the output of the cmd.run_all function call. | Below is the the instruction that describes the task:
### Input:
Check the output of the cmd.run_all function call.
### Response:
def _check_cmd(call):
'''
Check the output of the cmd.run_all function call.
'''
if call['retcode'] != 0:
comment = ''
std_err = call.get('stderr')
std_out = call.get('stdout')
if std_err:
comment += std_err
if std_out:
comment += std_out
raise CommandExecutionError('Error running command: {0}'.format(comment))
return call |
def eggs_clean(context):
"Remove egg directories"
#pylint: disable=unused-argument
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs) | Remove egg directories | Below is the the instruction that describes the task:
### Input:
Remove egg directories
### Response:
def eggs_clean(context):
"Remove egg directories"
#pylint: disable=unused-argument
dirs = set()
dirs.add('.eggs')
for name in os.listdir(os.curdir):
if name.endswith('.egg-info'):
dirs.add(name)
if name.endswith('.egg'):
dirs.add(name)
rmrf(dirs) |
def users(store):
"""Display a list of all users and which institutes they belong to."""
user_objs = list(store.users())
total_events = store.user_events().count()
for user_obj in user_objs:
if user_obj.get('institutes'):
user_obj['institutes'] = [store.institute(inst_id) for inst_id in user_obj.get('institutes')]
else:
user_obj['institutes'] = []
user_obj['events'] = store.user_events(user_obj).count()
user_obj['events_rank'] = event_rank(user_obj['events'])
return dict(
users=sorted(user_objs, key=lambda user: -user['events']),
total_events=total_events,
) | Display a list of all users and which institutes they belong to. | Below is the the instruction that describes the task:
### Input:
Display a list of all users and which institutes they belong to.
### Response:
def users(store):
"""Display a list of all users and which institutes they belong to."""
user_objs = list(store.users())
total_events = store.user_events().count()
for user_obj in user_objs:
if user_obj.get('institutes'):
user_obj['institutes'] = [store.institute(inst_id) for inst_id in user_obj.get('institutes')]
else:
user_obj['institutes'] = []
user_obj['events'] = store.user_events(user_obj).count()
user_obj['events_rank'] = event_rank(user_obj['events'])
return dict(
users=sorted(user_objs, key=lambda user: -user['events']),
total_events=total_events,
) |
def list_nodes(call=None, **kwargs):
'''
Return a list of the VMs that in this location
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
conn = get_conn()
server_list = conn.server_list()
if not server_list:
return {}
for server in server_list:
server_tmp = conn.server_show(server_list[server]['id']).get(server)
# If the server is deleted while looking it up, skip
if server_tmp is None:
continue
private = []
public = []
if 'addresses' not in server_tmp:
server_tmp['addresses'] = {}
for network in server_tmp['addresses']:
for address in server_tmp['addresses'][network]:
if salt.utils.cloud.is_public_ip(address.get('addr', '')):
public.append(address['addr'])
elif ':' in address['addr']:
public.append(address['addr'])
elif '.' in address['addr']:
private.append(address['addr'])
if server_tmp['accessIPv4']:
if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']):
public.append(server_tmp['accessIPv4'])
else:
private.append(server_tmp['accessIPv4'])
if server_tmp['accessIPv6']:
public.append(server_tmp['accessIPv6'])
ret[server] = {
'id': server_tmp['id'],
'image': server_tmp['image']['id'],
'size': server_tmp['flavor']['id'],
'state': server_tmp['state'],
'private_ips': private,
'public_ips': public,
}
return ret | Return a list of the VMs that in this location | Below is the the instruction that describes the task:
### Input:
Return a list of the VMs that in this location
### Response:
def list_nodes(call=None, **kwargs):
'''
Return a list of the VMs that in this location
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
conn = get_conn()
server_list = conn.server_list()
if not server_list:
return {}
for server in server_list:
server_tmp = conn.server_show(server_list[server]['id']).get(server)
# If the server is deleted while looking it up, skip
if server_tmp is None:
continue
private = []
public = []
if 'addresses' not in server_tmp:
server_tmp['addresses'] = {}
for network in server_tmp['addresses']:
for address in server_tmp['addresses'][network]:
if salt.utils.cloud.is_public_ip(address.get('addr', '')):
public.append(address['addr'])
elif ':' in address['addr']:
public.append(address['addr'])
elif '.' in address['addr']:
private.append(address['addr'])
if server_tmp['accessIPv4']:
if salt.utils.cloud.is_public_ip(server_tmp['accessIPv4']):
public.append(server_tmp['accessIPv4'])
else:
private.append(server_tmp['accessIPv4'])
if server_tmp['accessIPv6']:
public.append(server_tmp['accessIPv6'])
ret[server] = {
'id': server_tmp['id'],
'image': server_tmp['image']['id'],
'size': server_tmp['flavor']['id'],
'state': server_tmp['state'],
'private_ips': private,
'public_ips': public,
}
return ret |
def xminvsks(self, **kwargs):
"""
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._xmins,self._xmin_kstest,'.')
pylab.plot(self._xmin,self._ks,'s')
#pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+')
ax=pylab.gca()
ax.set_ylabel("KS statistic")
ax.set_xlabel("min(x)")
pylab.draw()
return ax | Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function. | Below is the the instruction that describes the task:
### Input:
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
### Response:
def xminvsks(self, **kwargs):
"""
Plot xmin versus the ks value for derived alpha. This plot can be used
as a diagnostic of whether you have derived the 'best' fit: if there are
multiple local minima, your data set may be well suited to a broken
powerlaw or a different function.
"""
pylab.plot(self._xmins,self._xmin_kstest,'.')
pylab.plot(self._xmin,self._ks,'s')
#pylab.errorbar([self._ks],self._alpha,yerr=self._alphaerr,fmt='+')
ax=pylab.gca()
ax.set_ylabel("KS statistic")
ax.set_xlabel("min(x)")
pylab.draw()
return ax |
def as_detail(cls, protocol=Protocol.http, *args, **kwargs):
''' returns detail views '''
return cls.as_view('detail', protocol, *args, **kwargs) | returns detail views | Below is the the instruction that describes the task:
### Input:
returns detail views
### Response:
def as_detail(cls, protocol=Protocol.http, *args, **kwargs):
''' returns detail views '''
return cls.as_view('detail', protocol, *args, **kwargs) |
def set_substitution(self, what, rep):
"""Set a substitution.
Equivalent to ``! sub`` in RiveScript code.
:param str what: The original text to replace.
:param str rep: The text to replace it with.
Set this to ``None`` to delete the substitution.
"""
if rep is None:
# Unset the variable.
if what in self._subs:
del self._subs[what]
self._subs[what] = rep | Set a substitution.
Equivalent to ``! sub`` in RiveScript code.
:param str what: The original text to replace.
:param str rep: The text to replace it with.
Set this to ``None`` to delete the substitution. | Below is the the instruction that describes the task:
### Input:
Set a substitution.
Equivalent to ``! sub`` in RiveScript code.
:param str what: The original text to replace.
:param str rep: The text to replace it with.
Set this to ``None`` to delete the substitution.
### Response:
def set_substitution(self, what, rep):
"""Set a substitution.
Equivalent to ``! sub`` in RiveScript code.
:param str what: The original text to replace.
:param str rep: The text to replace it with.
Set this to ``None`` to delete the substitution.
"""
if rep is None:
# Unset the variable.
if what in self._subs:
del self._subs[what]
self._subs[what] = rep |
def first_frame(obj):
"Only display the first frame of an animated plot"
plot, renderer, fmt = single_frame_plot(obj)
plot.update(0)
return {'text/html': renderer.html(plot, fmt)} | Only display the first frame of an animated plot | Below is the the instruction that describes the task:
### Input:
Only display the first frame of an animated plot
### Response:
def first_frame(obj):
"Only display the first frame of an animated plot"
plot, renderer, fmt = single_frame_plot(obj)
plot.update(0)
return {'text/html': renderer.html(plot, fmt)} |
def meyer_penny_program():
"""
Returns the program to simulate the Meyer-Penny Game
The full description is available in docs/source/examples.rst
:return: pyQuil Program
"""
prog = pq.Program()
ro = prog.declare('ro', memory_size=2)
picard_register = ro[1]
answer_register = ro[0]
then_branch = pq.Program(X(0))
else_branch = pq.Program(I(0))
# Prepare Qubits in Heads state or superposition, respectively
prog.inst(X(0), H(1))
# Q puts the coin into a superposition
prog.inst(H(0))
# Picard makes a decision and acts accordingly
prog.measure(1, picard_register)
prog.if_then(picard_register, then_branch, else_branch)
# Q undoes his superposition operation
prog.inst(H(0))
# The outcome is recorded into the answer register
prog.measure(0, answer_register)
return prog | Returns the program to simulate the Meyer-Penny Game
The full description is available in docs/source/examples.rst
:return: pyQuil Program | Below is the the instruction that describes the task:
### Input:
Returns the program to simulate the Meyer-Penny Game
The full description is available in docs/source/examples.rst
:return: pyQuil Program
### Response:
def meyer_penny_program():
"""
Returns the program to simulate the Meyer-Penny Game
The full description is available in docs/source/examples.rst
:return: pyQuil Program
"""
prog = pq.Program()
ro = prog.declare('ro', memory_size=2)
picard_register = ro[1]
answer_register = ro[0]
then_branch = pq.Program(X(0))
else_branch = pq.Program(I(0))
# Prepare Qubits in Heads state or superposition, respectively
prog.inst(X(0), H(1))
# Q puts the coin into a superposition
prog.inst(H(0))
# Picard makes a decision and acts accordingly
prog.measure(1, picard_register)
prog.if_then(picard_register, then_branch, else_branch)
# Q undoes his superposition operation
prog.inst(H(0))
# The outcome is recorded into the answer register
prog.measure(0, answer_register)
return prog |
def to_serializable(self, use_bytes=False, bytes_type=bytes):
"""Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
"""
schema_version = "2.0.0"
record = {name: array2bytes(vector)
for name, vector in self.data_vectors.items()}
record['sample'] = array2bytes(np.packbits(self.record.sample > 0))
if not use_bytes:
for name in record:
record[name] = base64.b64encode(record[name]).decode("UTF-8")
return {"basetype": "SampleSet",
"type": type(self).__name__,
"record": record,
"sample_dtype": str(self.record.sample.dtype), # need this to unpack
"sample_shape": self.record.sample.shape, # need this to unpack
"variable_type": self.vartype.name,
"info": self.info,
"version": {"dimod": __version__,
"sampleset_schema": schema_version},
"variable_labels": list(self.variables),
"use_bytes": bool(use_bytes)} | Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable` | Below is the the instruction that describes the task:
### Input:
Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
### Response:
def to_serializable(self, use_bytes=False, bytes_type=bytes):
"""Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation representing the biases as bytes is used.
bytes_type (class, optional, default=bytes):
This class will be used to wrap the bytes objects in the
serialization if `use_bytes` is true. Useful for when using
Python 2 and using BSON encoding, which will not accept the raw
`bytes` type, so `bson.Binary` can be used instead.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import dimod
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
"""
schema_version = "2.0.0"
record = {name: array2bytes(vector)
for name, vector in self.data_vectors.items()}
record['sample'] = array2bytes(np.packbits(self.record.sample > 0))
if not use_bytes:
for name in record:
record[name] = base64.b64encode(record[name]).decode("UTF-8")
return {"basetype": "SampleSet",
"type": type(self).__name__,
"record": record,
"sample_dtype": str(self.record.sample.dtype), # need this to unpack
"sample_shape": self.record.sample.shape, # need this to unpack
"variable_type": self.vartype.name,
"info": self.info,
"version": {"dimod": __version__,
"sampleset_schema": schema_version},
"variable_labels": list(self.variables),
"use_bytes": bool(use_bytes)} |
def GetConsoleOriginalTitle() -> str:
"""
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
"""
if IsNT6orHigher:
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)
return values.value
else:
raise RuntimeError('GetConsoleOriginalTitle is not supported on Windows XP or lower.') | GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher. | Below is the the instruction that describes the task:
### Input:
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
### Response:
def GetConsoleOriginalTitle() -> str:
"""
GetConsoleOriginalTitle from Win32.
Return str.
Only available on Windows Vista or higher.
"""
if IsNT6orHigher:
arrayType = ctypes.c_wchar * MAX_PATH
values = arrayType()
ctypes.windll.kernel32.GetConsoleOriginalTitleW(values, MAX_PATH)
return values.value
else:
raise RuntimeError('GetConsoleOriginalTitle is not supported on Windows XP or lower.') |
def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character) | Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion | Below is the the instruction that describes the task:
### Input:
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
### Response:
def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character) |
def map_vals(val, *names, **extra_opts):
'''
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
provides common code to handle these instances.
'''
fill = extra_opts.pop('fill', NOTSET)
expected_num_elements = len(names)
val = translate_stringlist(val)
for idx, item in enumerate(val):
if not isinstance(item, dict):
elements = [x.strip() for x in item.split(':')]
num_elements = len(elements)
if num_elements < expected_num_elements:
if fill is NOTSET:
raise SaltInvocationError(
'\'{0}\' contains {1} value(s) (expected {2})'.format(
item, num_elements, expected_num_elements
)
)
elements.extend([fill] * (expected_num_elements - num_elements))
elif num_elements > expected_num_elements:
raise SaltInvocationError(
'\'{0}\' contains {1} value(s) (expected {2})'.format(
item,
num_elements,
expected_num_elements if fill is NOTSET
else 'up to {0}'.format(expected_num_elements)
)
)
val[idx] = dict(zip(names, elements))
return val | Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
provides common code to handle these instances. | Below is the the instruction that describes the task:
### Input:
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
provides common code to handle these instances.
### Response:
def map_vals(val, *names, **extra_opts):
'''
Many arguments come in as a list of VAL1:VAL2 pairs, but map to a list
of dicts in the format {NAME1: VAL1, NAME2: VAL2}. This function
provides common code to handle these instances.
'''
fill = extra_opts.pop('fill', NOTSET)
expected_num_elements = len(names)
val = translate_stringlist(val)
for idx, item in enumerate(val):
if not isinstance(item, dict):
elements = [x.strip() for x in item.split(':')]
num_elements = len(elements)
if num_elements < expected_num_elements:
if fill is NOTSET:
raise SaltInvocationError(
'\'{0}\' contains {1} value(s) (expected {2})'.format(
item, num_elements, expected_num_elements
)
)
elements.extend([fill] * (expected_num_elements - num_elements))
elif num_elements > expected_num_elements:
raise SaltInvocationError(
'\'{0}\' contains {1} value(s) (expected {2})'.format(
item,
num_elements,
expected_num_elements if fill is NOTSET
else 'up to {0}'.format(expected_num_elements)
)
)
val[idx] = dict(zip(names, elements))
return val |
def int2magic(magic_int):
"""Given a magic int like 62211, compute the corresponding magic byte string
b'\x03\xf3\r\n' using the conversion method that does this.
See also dictionary magic2nt2version which has precomputed these values
for knonwn magic_int's.
"""
if (sys.version_info >= (3, 0)):
return struct.pack('<Hcc', magic_int, bytes('\r', 'utf-8'), bytes('\n', 'utf-8'))
else:
return struct.pack('<Hcc', magic_int, '\r', '\n') | Given a magic int like 62211, compute the corresponding magic byte string
b'\x03\xf3\r\n' using the conversion method that does this.
See also dictionary magic2nt2version which has precomputed these values
for knonwn magic_int's. | Below is the the instruction that describes the task:
### Input:
Given a magic int like 62211, compute the corresponding magic byte string
b'\x03\xf3\r\n' using the conversion method that does this.
See also dictionary magic2nt2version which has precomputed these values
for knonwn magic_int's.
### Response:
def int2magic(magic_int):
"""Given a magic int like 62211, compute the corresponding magic byte string
b'\x03\xf3\r\n' using the conversion method that does this.
See also dictionary magic2nt2version which has precomputed these values
for knonwn magic_int's.
"""
if (sys.version_info >= (3, 0)):
return struct.pack('<Hcc', magic_int, bytes('\r', 'utf-8'), bytes('\n', 'utf-8'))
else:
return struct.pack('<Hcc', magic_int, '\r', '\n') |
def get_remote_host_environment_variable(host, environment_variable):
"""Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param environment_variable: (str) variable to query
:return: (str) value of the environment variable
:raises: TypeError, CommandError
"""
log = logging.getLogger(mod_logger + '.get_remote_host_environment_variable')
if not isinstance(host, basestring):
msg = 'host argument must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(environment_variable, basestring):
msg = 'environment_variable argument must be a string'
log.error(msg)
raise TypeError(msg)
log.info('Checking host {h} for environment variable: {v}...'.format(h=host, v=environment_variable))
command = ['ssh', '{h}'.format(h=host), 'echo ${v}'.format(v=environment_variable)]
try:
result = run_command(command, timeout_sec=5.0)
code = result['code']
except CommandError:
raise
if code != 0:
msg = 'There was a problem checking the remote host {h} over SSH, return code: {c}'.format(
h=host, c=code)
log.error(msg)
raise CommandError(msg)
else:
value = result['output'].strip()
log.info('Environment variable {e} on host {h} value is: {v}'.format(
e=environment_variable, h=host, v=value))
return value | Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param environment_variable: (str) variable to query
:return: (str) value of the environment variable
:raises: TypeError, CommandError | Below is the the instruction that describes the task:
### Input:
Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param environment_variable: (str) variable to query
:return: (str) value of the environment variable
:raises: TypeError, CommandError
### Response:
def get_remote_host_environment_variable(host, environment_variable):
"""Retrieves the value of an environment variable of a
remote host over SSH
:param host: (str) host to query
:param environment_variable: (str) variable to query
:return: (str) value of the environment variable
:raises: TypeError, CommandError
"""
log = logging.getLogger(mod_logger + '.get_remote_host_environment_variable')
if not isinstance(host, basestring):
msg = 'host argument must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(environment_variable, basestring):
msg = 'environment_variable argument must be a string'
log.error(msg)
raise TypeError(msg)
log.info('Checking host {h} for environment variable: {v}...'.format(h=host, v=environment_variable))
command = ['ssh', '{h}'.format(h=host), 'echo ${v}'.format(v=environment_variable)]
try:
result = run_command(command, timeout_sec=5.0)
code = result['code']
except CommandError:
raise
if code != 0:
msg = 'There was a problem checking the remote host {h} over SSH, return code: {c}'.format(
h=host, c=code)
log.error(msg)
raise CommandError(msg)
else:
value = result['output'].strip()
log.info('Environment variable {e} on host {h} value is: {v}'.format(
e=environment_variable, h=host, v=value))
return value |
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message("wrong-import-position", node=node, args=node.as_string()) | Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction | Below is the the instruction that describes the task:
### Input:
Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
### Response:
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message("wrong-import-position", node=node, args=node.as_string()) |
def arc_negative(self, xc, yc, radius, angle1, angle2):
"""Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at :obj:`angle1`
and proceeds in the direction of decreasing angles
to end at :obj:`angle2`.
If :obj:`angle2` is greater than :obj:`angle1`
it will be progressively decreased by ``2 * pi``
until it is greater than :obj:`angle1`.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
"""
cairo.cairo_arc_negative(self._pointer, xc, yc, radius, angle1, angle2)
self._check_status() | Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at :obj:`angle1`
and proceeds in the direction of decreasing angles
to end at :obj:`angle2`.
If :obj:`angle2` is greater than :obj:`angle1`
it will be progressively decreased by ``2 * pi``
until it is greater than :obj:`angle1`.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float | Below is the the instruction that describes the task:
### Input:
Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at :obj:`angle1`
and proceeds in the direction of decreasing angles
to end at :obj:`angle2`.
If :obj:`angle2` is greater than :obj:`angle1`
it will be progressively decreased by ``2 * pi``
until it is greater than :obj:`angle1`.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
### Response:
def arc_negative(self, xc, yc, radius, angle1, angle2):
"""Adds a circular arc of the given radius to the current path.
The arc is centered at ``(xc, yc)``,
begins at :obj:`angle1`
and proceeds in the direction of decreasing angles
to end at :obj:`angle2`.
If :obj:`angle2` is greater than :obj:`angle1`
it will be progressively decreased by ``2 * pi``
until it is greater than :obj:`angle1`.
See :meth:`arc` for more details.
This method differs only in
the direction of the arc between the two angles.
:param xc: X position of the center of the arc.
:param yc: Y position of the center of the arc.
:param radius: The radius of the arc.
:param angle1: The start angle, in radians.
:param angle2: The end angle, in radians.
:type xc: float
:type yc: float
:type radius: float
:type angle1: float
:type angle2: float
"""
cairo.cairo_arc_negative(self._pointer, xc, yc, radius, angle1, angle2)
self._check_status() |
def __read_countries_file(self):
"""Read countries from a CSV file"""
import csv
import pkg_resources
filename = pkg_resources.resource_filename('sortinghat', 'data/countries.csv')
with open(filename, 'r') as f:
reader = csv.DictReader(f, fieldnames=['name', 'code', 'alpha3'])
countries = [Country(**c) for c in reader]
return countries | Read countries from a CSV file | Below is the the instruction that describes the task:
### Input:
Read countries from a CSV file
### Response:
def __read_countries_file(self):
"""Read countries from a CSV file"""
import csv
import pkg_resources
filename = pkg_resources.resource_filename('sortinghat', 'data/countries.csv')
with open(filename, 'r') as f:
reader = csv.DictReader(f, fieldnames=['name', 'code', 'alpha3'])
countries = [Country(**c) for c in reader]
return countries |
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
ia.do_assert(nb_classes is not None)
ia.do_assert(min(class_indices) >= 0)
ia.do_assert(max(class_indices) < nb_classes)
ia.do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
for heatmap_channel, mapped_channel in enumerate(class_indices):
arr_0to1_full[:, :, mapped_channel] = arr_0to1[:, :, heatmap_channel]
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape) | Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps. | Below is the the instruction that describes the task:
### Input:
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
### Response:
def from_heatmaps(heatmaps, class_indices=None, nb_classes=None):
"""
Convert heatmaps to segmentation map.
Assumes that each class is represented as a single heatmap channel.
Parameters
----------
heatmaps : imgaug.HeatmapsOnImage
Heatmaps to convert.
class_indices : None or list of int, optional
List of class indices represented by each heatmap channel. See also the
secondary output of :func:`imgaug.SegmentationMapOnImage.to_heatmap`.
If this is provided, it must have the same length as the number of heatmap channels.
nb_classes : None or int, optional
Number of classes. Must be provided if class_indices is set.
Returns
-------
imgaug.SegmentationMapOnImage
Segmentation map derived from heatmaps.
"""
if class_indices is None:
return SegmentationMapOnImage(heatmaps.arr_0to1, shape=heatmaps.shape)
else:
ia.do_assert(nb_classes is not None)
ia.do_assert(min(class_indices) >= 0)
ia.do_assert(max(class_indices) < nb_classes)
ia.do_assert(len(class_indices) == heatmaps.arr_0to1.shape[2])
arr_0to1 = heatmaps.arr_0to1
arr_0to1_full = np.zeros((arr_0to1.shape[0], arr_0to1.shape[1], nb_classes), dtype=np.float32)
for heatmap_channel, mapped_channel in enumerate(class_indices):
arr_0to1_full[:, :, mapped_channel] = arr_0to1[:, :, heatmap_channel]
return SegmentationMapOnImage(arr_0to1_full, shape=heatmaps.shape) |
def panels(self):
"""
Add 2 panels to the figure, top for signal and bottom for gene models
"""
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel) | Add 2 panels to the figure, top for signal and bottom for gene models | Below is the the instruction that describes the task:
### Input:
Add 2 panels to the figure, top for signal and bottom for gene models
### Response:
def panels(self):
"""
Add 2 panels to the figure, top for signal and bottom for gene models
"""
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel) |
def validate_field(self, field_name: str) -> bool:
"""
Complain if a field in not in the schema
Args:
field_name:
Returns: True if the field is present.
"""
if field_name in {"@id", "@type"}:
return True
result = self.schema.has_field(field_name)
if not result:
# todo: how to comply with our error handling policies?
raise UndefinedFieldError("'{}' should be present in the knowledge graph schema.".format(field_name))
return result | Complain if a field in not in the schema
Args:
field_name:
Returns: True if the field is present. | Below is the the instruction that describes the task:
### Input:
Complain if a field in not in the schema
Args:
field_name:
Returns: True if the field is present.
### Response:
def validate_field(self, field_name: str) -> bool:
"""
Complain if a field in not in the schema
Args:
field_name:
Returns: True if the field is present.
"""
if field_name in {"@id", "@type"}:
return True
result = self.schema.has_field(field_name)
if not result:
# todo: how to comply with our error handling policies?
raise UndefinedFieldError("'{}' should be present in the knowledge graph schema.".format(field_name))
return result |
def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
'''
Attempt to convert a snapshot ID to a snapshot name. If the snapshot has
no name or if the ID is not found or invalid, an empty string will be returned
:param str name:
Name/ID of VM whose snapshots are inspected
:param str snap_id:
ID of the snapshot
:param bool strict:
Raise an exception if a name cannot be found for the given ``snap_id``
:param str runas:
The user that the prlctl command will be run as
Example data
.. code-block:: yaml
ID: {a5b8999f-5d95-4aff-82de-e515b0101b66}
Name: original
Date: 2016-03-04 10:50:34
Current: yes
State: poweroff
Description: original state
CLI Example:
.. code-block:: bash
salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev
'''
# Validate VM name and snapshot ID
name = salt.utils.data.decode(name)
if not re.match(GUID_REGEX, snap_id):
raise SaltInvocationError(
'Snapshot ID "{0}" is not a GUID'.format(salt.utils.data.decode(snap_id))
)
# Get the snapshot information of the snapshot having the requested ID
info = prlctl('snapshot-list', [name, '--id', snap_id], runas=runas)
# Parallels desktop returned no information for snap_id
if not info:
raise SaltInvocationError(
'No snapshots for VM "{0}" have ID "{1}"'.format(name, snap_id)
)
# Try to interpret the information
try:
data = salt.utils.yaml.safe_load(info)
except salt.utils.yaml.YAMLError as err:
log.warning(
'Could not interpret snapshot data returned from prlctl: %s', err
)
data = {}
# Find the snapshot name
if isinstance(data, dict):
snap_name = data.get('Name', '')
# If snapshot name is of type NoneType, then the snapshot is unnamed
if snap_name is None:
snap_name = ''
else:
log.warning(
'Could not interpret snapshot data returned from prlctl: '
'data is not formed as a dictionary: %s', data
)
snap_name = ''
# Raise or return the result
if not snap_name and strict:
raise SaltInvocationError(
'Could not find a snapshot name for snapshot ID "{0}" of VM '
'"{1}"'.format(snap_id, name)
)
return salt.utils.data.decode(snap_name) | Attempt to convert a snapshot ID to a snapshot name. If the snapshot has
no name or if the ID is not found or invalid, an empty string will be returned
:param str name:
Name/ID of VM whose snapshots are inspected
:param str snap_id:
ID of the snapshot
:param bool strict:
Raise an exception if a name cannot be found for the given ``snap_id``
:param str runas:
The user that the prlctl command will be run as
Example data
.. code-block:: yaml
ID: {a5b8999f-5d95-4aff-82de-e515b0101b66}
Name: original
Date: 2016-03-04 10:50:34
Current: yes
State: poweroff
Description: original state
CLI Example:
.. code-block:: bash
salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev | Below is the the instruction that describes the task:
### Input:
Attempt to convert a snapshot ID to a snapshot name. If the snapshot has
no name or if the ID is not found or invalid, an empty string will be returned
:param str name:
Name/ID of VM whose snapshots are inspected
:param str snap_id:
ID of the snapshot
:param bool strict:
Raise an exception if a name cannot be found for the given ``snap_id``
:param str runas:
The user that the prlctl command will be run as
Example data
.. code-block:: yaml
ID: {a5b8999f-5d95-4aff-82de-e515b0101b66}
Name: original
Date: 2016-03-04 10:50:34
Current: yes
State: poweroff
Description: original state
CLI Example:
.. code-block:: bash
salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev
### Response:
def snapshot_id_to_name(name, snap_id, strict=False, runas=None):
'''
Attempt to convert a snapshot ID to a snapshot name. If the snapshot has
no name or if the ID is not found or invalid, an empty string will be returned
:param str name:
Name/ID of VM whose snapshots are inspected
:param str snap_id:
ID of the snapshot
:param bool strict:
Raise an exception if a name cannot be found for the given ``snap_id``
:param str runas:
The user that the prlctl command will be run as
Example data
.. code-block:: yaml
ID: {a5b8999f-5d95-4aff-82de-e515b0101b66}
Name: original
Date: 2016-03-04 10:50:34
Current: yes
State: poweroff
Description: original state
CLI Example:
.. code-block:: bash
salt '*' parallels.snapshot_id_to_name macvm a5b8999f-5d95-4aff-82de-e515b0101b66 runas=macdev
'''
# Validate VM name and snapshot ID
name = salt.utils.data.decode(name)
if not re.match(GUID_REGEX, snap_id):
raise SaltInvocationError(
'Snapshot ID "{0}" is not a GUID'.format(salt.utils.data.decode(snap_id))
)
# Get the snapshot information of the snapshot having the requested ID
info = prlctl('snapshot-list', [name, '--id', snap_id], runas=runas)
# Parallels desktop returned no information for snap_id
if not info:
raise SaltInvocationError(
'No snapshots for VM "{0}" have ID "{1}"'.format(name, snap_id)
)
# Try to interpret the information
try:
data = salt.utils.yaml.safe_load(info)
except salt.utils.yaml.YAMLError as err:
log.warning(
'Could not interpret snapshot data returned from prlctl: %s', err
)
data = {}
# Find the snapshot name
if isinstance(data, dict):
snap_name = data.get('Name', '')
# If snapshot name is of type NoneType, then the snapshot is unnamed
if snap_name is None:
snap_name = ''
else:
log.warning(
'Could not interpret snapshot data returned from prlctl: '
'data is not formed as a dictionary: %s', data
)
snap_name = ''
# Raise or return the result
if not snap_name and strict:
raise SaltInvocationError(
'Could not find a snapshot name for snapshot ID "{0}" of VM '
'"{1}"'.format(snap_id, name)
)
return salt.utils.data.decode(snap_name) |
def process_response(self, request, response):
"""Let's handle old-style response processing here, as usual."""
# For debug only.
if not settings.DEBUG:
return response
# Check for responses where the data can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
pattern = re.escape('</body>')
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += debug_payload(request, response, self.view_data)
response.content = "</body>".join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response | Let's handle old-style response processing here, as usual. | Below is the the instruction that describes the task:
### Input:
Let's handle old-style response processing here, as usual.
### Response:
def process_response(self, request, response):
"""Let's handle old-style response processing here, as usual."""
# For debug only.
if not settings.DEBUG:
return response
# Check for responses where the data can't be inserted.
content_encoding = response.get('Content-Encoding', '')
content_type = response.get('Content-Type', '').split(';')[0]
if any((getattr(response, 'streaming', False),
'gzip' in content_encoding,
content_type not in _HTML_TYPES)):
return response
content = force_text(response.content, encoding=settings.DEFAULT_CHARSET)
pattern = re.escape('</body>')
bits = re.split(pattern, content, flags=re.IGNORECASE)
if len(bits) > 1:
bits[-2] += debug_payload(request, response, self.view_data)
response.content = "</body>".join(bits)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response |
def formfield(self, **kwargs):
"""
Apply the widget class defined by the
``RICHTEXT_WIDGET_CLASS`` setting.
"""
default = kwargs.get("widget", None) or AdminTextareaWidget
if default is AdminTextareaWidget:
from yacms.conf import settings
richtext_widget_path = settings.RICHTEXT_WIDGET_CLASS
try:
widget_class = import_dotted_path(richtext_widget_path)
except ImportError:
raise ImproperlyConfigured(_("Could not import the value of "
"settings.RICHTEXT_WIDGET_CLASS: "
"%s" % richtext_widget_path))
kwargs["widget"] = widget_class()
kwargs.setdefault("required", False)
formfield = super(RichTextField, self).formfield(**kwargs)
return formfield | Apply the widget class defined by the
``RICHTEXT_WIDGET_CLASS`` setting. | Below is the the instruction that describes the task:
### Input:
Apply the widget class defined by the
``RICHTEXT_WIDGET_CLASS`` setting.
### Response:
def formfield(self, **kwargs):
"""
Apply the widget class defined by the
``RICHTEXT_WIDGET_CLASS`` setting.
"""
default = kwargs.get("widget", None) or AdminTextareaWidget
if default is AdminTextareaWidget:
from yacms.conf import settings
richtext_widget_path = settings.RICHTEXT_WIDGET_CLASS
try:
widget_class = import_dotted_path(richtext_widget_path)
except ImportError:
raise ImproperlyConfigured(_("Could not import the value of "
"settings.RICHTEXT_WIDGET_CLASS: "
"%s" % richtext_widget_path))
kwargs["widget"] = widget_class()
kwargs.setdefault("required", False)
formfield = super(RichTextField, self).formfield(**kwargs)
return formfield |
def traverse(self, traverser, **kwargs):
"""
Implementation of mandatory interface for traversing the whole rule tree.
This method will call the implementation of :py:func:`pynspect.rules.RuleTreeTraverser.binary_operation_logical`
method with reference to ``self`` instance as first argument and with the
result of traversing left subtree as second argument. The optional ``kwargs``
are passed down to traverser callback as additional arguments and can be
used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback.
"""
rrt = self.right.traverse(traverser, **kwargs)
return traverser.unary_operation(self, rrt, **kwargs) | Implementation of mandatory interface for traversing the whole rule tree.
This method will call the implementation of :py:func:`pynspect.rules.RuleTreeTraverser.binary_operation_logical`
method with reference to ``self`` instance as first argument and with the
result of traversing left subtree as second argument. The optional ``kwargs``
are passed down to traverser callback as additional arguments and can be
used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback. | Below is the the instruction that describes the task:
### Input:
Implementation of mandatory interface for traversing the whole rule tree.
This method will call the implementation of :py:func:`pynspect.rules.RuleTreeTraverser.binary_operation_logical`
method with reference to ``self`` instance as first argument and with the
result of traversing left subtree as second argument. The optional ``kwargs``
are passed down to traverser callback as additional arguments and can be
used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback.
### Response:
def traverse(self, traverser, **kwargs):
"""
Implementation of mandatory interface for traversing the whole rule tree.
This method will call the implementation of :py:func:`pynspect.rules.RuleTreeTraverser.binary_operation_logical`
method with reference to ``self`` instance as first argument and with the
result of traversing left subtree as second argument. The optional ``kwargs``
are passed down to traverser callback as additional arguments and can be
used to provide additional data or context.
:param pynspect.rules.RuleTreeTraverser traverser: Traverser object providing appropriate interface.
:param dict kwargs: Additional optional keyword arguments to be passed down to traverser callback.
"""
rrt = self.right.traverse(traverser, **kwargs)
return traverser.unary_operation(self, rrt, **kwargs) |
def check_update():
"""
Return True if an update is available on pypi
"""
r = requests.get("https://pypi.python.org/pypi/prof/json")
data = r.json()
if versiontuple(data['info']['version']) > versiontuple(__version__):
return True
return False | Return True if an update is available on pypi | Below is the the instruction that describes the task:
### Input:
Return True if an update is available on pypi
### Response:
def check_update():
"""
Return True if an update is available on pypi
"""
r = requests.get("https://pypi.python.org/pypi/prof/json")
data = r.json()
if versiontuple(data['info']['version']) > versiontuple(__version__):
return True
return False |
def get_fixture_node(self, app_label, fixture_prefix):
"""
Get all fixtures in given app with given prefix.
:param str app_label: App label
:param str fixture_prefix: first part of the fixture name
:return: list of found fixtures.
"""
app_nodes = self.get_app_nodes(app_label=app_label)
nodes = [
node for node in app_nodes if node[1].startswith(fixture_prefix)
]
if len(nodes) > 1:
raise MultipleFixturesFound(
"The following fixtures with prefix '%s' are found in app '%s'"
": %s" % (
fixture_prefix, app_label, ', '.join(
[node[1] for node in nodes]
)
)
)
elif len(nodes) == 0:
raise FixtureNotFound("Fixture with prefix '%s' not found in app "
"'%s'" % (fixture_prefix, app_label))
return nodes | Get all fixtures in given app with given prefix.
:param str app_label: App label
:param str fixture_prefix: first part of the fixture name
:return: list of found fixtures. | Below is the the instruction that describes the task:
### Input:
Get all fixtures in given app with given prefix.
:param str app_label: App label
:param str fixture_prefix: first part of the fixture name
:return: list of found fixtures.
### Response:
def get_fixture_node(self, app_label, fixture_prefix):
"""
Get all fixtures in given app with given prefix.
:param str app_label: App label
:param str fixture_prefix: first part of the fixture name
:return: list of found fixtures.
"""
app_nodes = self.get_app_nodes(app_label=app_label)
nodes = [
node for node in app_nodes if node[1].startswith(fixture_prefix)
]
if len(nodes) > 1:
raise MultipleFixturesFound(
"The following fixtures with prefix '%s' are found in app '%s'"
": %s" % (
fixture_prefix, app_label, ', '.join(
[node[1] for node in nodes]
)
)
)
elif len(nodes) == 0:
raise FixtureNotFound("Fixture with prefix '%s' not found in app "
"'%s'" % (fixture_prefix, app_label))
return nodes |
def _set_up_figure(self, x_mins, x_maxs, y_mins, y_maxs):
"""
Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function.
"""
self.fig = plt.figure()
# Make room for the sliders:
bot = 0.1 + 0.05*len(self.model.params)
self.fig.subplots_adjust(bottom=bot)
# If these are not ints, matplotlib will crash and burn with an utterly
# vague error.
nrows = int(np.ceil(len(self._projections)**0.5))
ncols = int(np.ceil(len(self._projections)/nrows))
# Make all the subplots: set the x and y limits, scatter the data, and
# plot the putative function.
self._plots = {}
for plotnr, proj in enumerate(self._projections, 1):
x, y = proj
if Derivative(y, x) in self.model:
title_format = '$\\frac{{\\partial {dependant}}}{{\\partial {independant}}} = {expression}$'
else:
title_format = '${dependant}({independant}) = {expression}$'
plotlabel = title_format.format(
dependant=latex(y, mode='plain'),
independant=latex(x, mode='plain'),
expression=latex(self.model[y], mode='plain'))
ax = self.fig.add_subplot(ncols, nrows, plotnr,
label=plotlabel)
ax.set_title(ax.get_label())
ax.set_ylim(y_mins[y], y_maxs[y])
ax.set_xlim(x_mins[x], x_maxs[x])
ax.set_xlabel('${}$'.format(x))
ax.set_ylabel('${}$'.format(y))
self._plot_data(proj, ax)
plot = self._plot_model(proj, ax)
self._plots[proj] = plot | Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function. | Below is the the instruction that describes the task:
### Input:
Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function.
### Response:
def _set_up_figure(self, x_mins, x_maxs, y_mins, y_maxs):
"""
Prepare the matplotlib figure: make all the subplots; adjust their
x and y range; plot the data; and plot an putative function.
"""
self.fig = plt.figure()
# Make room for the sliders:
bot = 0.1 + 0.05*len(self.model.params)
self.fig.subplots_adjust(bottom=bot)
# If these are not ints, matplotlib will crash and burn with an utterly
# vague error.
nrows = int(np.ceil(len(self._projections)**0.5))
ncols = int(np.ceil(len(self._projections)/nrows))
# Make all the subplots: set the x and y limits, scatter the data, and
# plot the putative function.
self._plots = {}
for plotnr, proj in enumerate(self._projections, 1):
x, y = proj
if Derivative(y, x) in self.model:
title_format = '$\\frac{{\\partial {dependant}}}{{\\partial {independant}}} = {expression}$'
else:
title_format = '${dependant}({independant}) = {expression}$'
plotlabel = title_format.format(
dependant=latex(y, mode='plain'),
independant=latex(x, mode='plain'),
expression=latex(self.model[y], mode='plain'))
ax = self.fig.add_subplot(ncols, nrows, plotnr,
label=plotlabel)
ax.set_title(ax.get_label())
ax.set_ylim(y_mins[y], y_maxs[y])
ax.set_xlim(x_mins[x], x_maxs[x])
ax.set_xlabel('${}$'.format(x))
ax.set_ylabel('${}$'.format(y))
self._plot_data(proj, ax)
plot = self._plot_model(proj, ax)
self._plots[proj] = plot |
def execute(self, cmd):
"""Execute a command to modify storage[cmd.filePath]"""
if not cmd.filePath in self._history.keys():
self._history[cmd.filePath] = SubtitleUndoStack(self)
try:
self._history[cmd.filePath].push(cmd)
except:
self._history[cmd.filePath].deleteLater()
del self._history[cmd.filePath]
raise
else:
self._history[cmd.filePath].clear()
else:
self._history[cmd.filePath].push(cmd) | Execute a command to modify storage[cmd.filePath] | Below is the the instruction that describes the task:
### Input:
Execute a command to modify storage[cmd.filePath]
### Response:
def execute(self, cmd):
"""Execute a command to modify storage[cmd.filePath]"""
if not cmd.filePath in self._history.keys():
self._history[cmd.filePath] = SubtitleUndoStack(self)
try:
self._history[cmd.filePath].push(cmd)
except:
self._history[cmd.filePath].deleteLater()
del self._history[cmd.filePath]
raise
else:
self._history[cmd.filePath].clear()
else:
self._history[cmd.filePath].push(cmd) |
def ones(dur=None):
"""
Ones stream generator.
You may multiply your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "1.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 1.0
for x in xrange(int(.5 + dur)):
yield 1.0 | Ones stream generator.
You may multiply your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "1.0" during a given time duration (if any) or
endlessly. | Below is the the instruction that describes the task:
### Input:
Ones stream generator.
You may multiply your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "1.0" during a given time duration (if any) or
endlessly.
### Response:
def ones(dur=None):
"""
Ones stream generator.
You may multiply your endless stream by this to enforce an end to it.
Parameters
----------
dur :
Duration, in number of samples; endless if not given.
Returns
-------
Stream that repeats "1.0" during a given time duration (if any) or
endlessly.
"""
if dur is None or (isinf(dur) and dur > 0):
while True:
yield 1.0
for x in xrange(int(.5 + dur)):
yield 1.0 |
def get_position(self, focus=None):
"""
Get the position of the UI elements.
Args:
focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details
Returns:
2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system
Raises:
TypeError: raised when unsupported focus type is specified
"""
focus = focus or self._focus or 'anchor'
if focus == 'anchor':
pos = self.attr('pos')
elif focus == 'center':
x, y = self.attr('pos')
w, h = self.get_size()
ap_x, ap_y = self.attr("anchorPoint")
fx, fy = 0.5, 0.5
pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)]
elif type(focus) in (list, tuple):
x, y = self.attr('pos')
w, h = self.get_size()
ap_x, ap_y = self.attr("anchorPoint")
fx, fy = focus
pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)]
else:
raise TypeError('Unsupported focus type {}. '
'Only "anchor/center" or 2-list/2-tuple available.'.format(type(focus)))
return pos | Get the position of the UI elements.
Args:
focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details
Returns:
2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system
Raises:
TypeError: raised when unsupported focus type is specified | Below is the the instruction that describes the task:
### Input:
Get the position of the UI elements.
Args:
focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details
Returns:
2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system
Raises:
TypeError: raised when unsupported focus type is specified
### Response:
def get_position(self, focus=None):
"""
Get the position of the UI elements.
Args:
focus: focus point of UI proxy, see :py:meth:`.focus() <poco.proxy.UIObjectProxy.focus>` for more details
Returns:
2-list/2-tuple: coordinates (x, y) in NormalizedCoordinate system
Raises:
TypeError: raised when unsupported focus type is specified
"""
focus = focus or self._focus or 'anchor'
if focus == 'anchor':
pos = self.attr('pos')
elif focus == 'center':
x, y = self.attr('pos')
w, h = self.get_size()
ap_x, ap_y = self.attr("anchorPoint")
fx, fy = 0.5, 0.5
pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)]
elif type(focus) in (list, tuple):
x, y = self.attr('pos')
w, h = self.get_size()
ap_x, ap_y = self.attr("anchorPoint")
fx, fy = focus
pos = [x + w * (fx - ap_x), y + h * (fy - ap_y)]
else:
raise TypeError('Unsupported focus type {}. '
'Only "anchor/center" or 2-list/2-tuple available.'.format(type(focus)))
return pos |
def options(self, context, module_options):
'''
THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target)
'''
self.threads = 3
self.csv_path = 'C:\\'
self.collection_method = 'Default'
self.neo4j_URI = ""
self.neo4j_user = ""
self.neo4j_pass = ""
if module_options and 'THREADS' in module_options:
self.threads = module_options['THREADS']
if module_options and 'CSVPATH' in module_options:
self.csv_path = module_options['CSVPATH']
if module_options and 'COLLECTIONMETHOD' in module_options:
self.collection_method = module_options['COLLECTIONMETHOD']
if module_options and 'NEO4JURI' in module_options:
self.neo4j_URI = module_options['NEO4JURI']
if module_options and 'NEO4JUSER' in module_options:
self.neo4j_user = module_options['NEO4JUSER']
if module_options and 'NEO4JPASS' in module_options:
self.neo4j_pass = module_options['NEO4JPASS']
if self.neo4j_URI != "" and self.neo4j_user != "" and self.neo4j_pass != "" :
self.opsec_safe= True
self.ps_script = obfs_ps_script('BloodHound-modified.ps1') | THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target) | Below is the the instruction that describes the task:
### Input:
THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target)
### Response:
def options(self, context, module_options):
'''
THREADS Max numbers of threads to execute on target (defaults to 20)
COLLECTIONMETHOD Method used by BloodHound ingestor to collect data (defaults to 'Default')
CSVPATH (optional) Path where csv files will be written on target (defaults to C:\)
NEO4JURI (optional) URI for direct Neo4j ingestion (defaults to blank)
NEO4JUSER (optional) Username for direct Neo4j ingestion
NEO4JPASS (optional) Pass for direct Neo4j ingestion
Give NEO4J options to perform direct Neo4j ingestion (no CSVs on target)
'''
self.threads = 3
self.csv_path = 'C:\\'
self.collection_method = 'Default'
self.neo4j_URI = ""
self.neo4j_user = ""
self.neo4j_pass = ""
if module_options and 'THREADS' in module_options:
self.threads = module_options['THREADS']
if module_options and 'CSVPATH' in module_options:
self.csv_path = module_options['CSVPATH']
if module_options and 'COLLECTIONMETHOD' in module_options:
self.collection_method = module_options['COLLECTIONMETHOD']
if module_options and 'NEO4JURI' in module_options:
self.neo4j_URI = module_options['NEO4JURI']
if module_options and 'NEO4JUSER' in module_options:
self.neo4j_user = module_options['NEO4JUSER']
if module_options and 'NEO4JPASS' in module_options:
self.neo4j_pass = module_options['NEO4JPASS']
if self.neo4j_URI != "" and self.neo4j_user != "" and self.neo4j_pass != "" :
self.opsec_safe= True
self.ps_script = obfs_ps_script('BloodHound-modified.ps1') |
def qstat(self, queue_name, return_dict=False):
"""
Return the status of the queue (currently unimplemented).
Future support / testing of QSTAT support in Disque
QSTAT <qname>
Return produced ... consumed ... idle ... sources [...] ctime ...
"""
rtn = self.execute_command('QSTAT', queue_name)
if return_dict:
grouped = self._grouper(rtn, 2)
rtn = dict((a, b) for a, b in grouped)
return rtn | Return the status of the queue (currently unimplemented).
Future support / testing of QSTAT support in Disque
QSTAT <qname>
Return produced ... consumed ... idle ... sources [...] ctime ... | Below is the the instruction that describes the task:
### Input:
Return the status of the queue (currently unimplemented).
Future support / testing of QSTAT support in Disque
QSTAT <qname>
Return produced ... consumed ... idle ... sources [...] ctime ...
### Response:
def qstat(self, queue_name, return_dict=False):
"""
Return the status of the queue (currently unimplemented).
Future support / testing of QSTAT support in Disque
QSTAT <qname>
Return produced ... consumed ... idle ... sources [...] ctime ...
"""
rtn = self.execute_command('QSTAT', queue_name)
if return_dict:
grouped = self._grouper(rtn, 2)
rtn = dict((a, b) for a, b in grouped)
return rtn |
def printdeps(obj, # type: Mapping[Text, Any]
document_loader, # type: Loader
stdout, # type: Union[TextIO, StreamWriter]
relative_deps, # type: bool
uri, # type: Text
basedir=None, # type: Text
nestdirs=True # type: bool
): # type: (...) -> None
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir,
nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(
make_relative, base))
stdout.write(json_dumps(deps, indent=4)) | Print a JSON representation of the dependencies of the CWL document. | Below is the the instruction that describes the task:
### Input:
Print a JSON representation of the dependencies of the CWL document.
### Response:
def printdeps(obj, # type: Mapping[Text, Any]
document_loader, # type: Loader
stdout, # type: Union[TextIO, StreamWriter]
relative_deps, # type: bool
uri, # type: Text
basedir=None, # type: Text
nestdirs=True # type: bool
): # type: (...) -> None
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir,
nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(
make_relative, base))
stdout.write(json_dumps(deps, indent=4)) |
def datetime_stored(self):
"""Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_stored'):
return dateutil.parser.parse(self.info()['datetime_stored']) | Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``. | Below is the the instruction that describes the task:
### Input:
Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
### Response:
def datetime_stored(self):
"""Returns file's store aware *datetime* in UTC format.
It might do API request once because it depends on ``info()``.
"""
if self.info().get('datetime_stored'):
return dateutil.parser.parse(self.info()['datetime_stored']) |
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid | Get a task from queue when bucket available | Below is the the instruction that describes the task:
### Input:
Get a task from queue when bucket available
### Response:
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid |
def autoparse(
func=None, *,
description=None,
epilog=None,
add_nos=False,
parser=None):
'''
This decorator converts a function that takes normal arguments into a
function which takes a single optional argument, argv, parses it using an
argparse.ArgumentParser, and calls the underlying function with the parsed
arguments. If it is not given, sys.argv[1:] is used. This is so that the
function can be used as a setuptools entry point, as well as a normal main
function. sys.argv[1:] is not evaluated until the function is called, to
allow injecting different arguments for testing.
It uses the argument signature of the function to create an
ArgumentParser. Parameters without defaults become positional parameters,
while parameters *with* defaults become --options. Use annotations to set
the type of the parameter.
The `desctiption` and `epilog` parameters corrospond to the same respective
argparse parameters. If no description is given, it defaults to the
decorated functions's docstring, if present.
If add_nos is True, every boolean option (that is, every parameter with a
default of True/False or a type of bool) will have a --no- version created
as well, which inverts the option. For instance, the --verbose option will
have a --no-verbose counterpart. These are not mutually exclusive-
whichever one appears last in the argument list will have precedence.
If a parser is given, it is used instead of one generated from the function
signature. In this case, no parser is created; instead, the given parser is
used to parse the argv argument. The parser's results' argument names must
match up with the parameter names of the decorated function.
The decorated function is attached to the result as the `func` attribute,
and the parser is attached as the `parser` attribute.
'''
# If @autoparse(...) is used instead of @autoparse
if func is None:
return lambda f: autoparse(
f, description=description,
epilog=epilog,
add_nos=add_nos,
parser=parser)
func_sig = signature(func)
docstr_description, docstr_epilog = parse_docstring(getdoc(func))
if parser is None:
parser = make_parser(
func_sig,
description or docstr_description,
epilog or docstr_epilog,
add_nos)
@wraps(func)
def autoparse_wrapper(argv=None):
if argv is None:
argv = sys.argv[1:]
# Get empty argument binding, to fill with parsed arguments. This
# object does all the heavy lifting of turning named arguments into
# into correctly bound *args and **kwargs.
parsed_args = func_sig.bind_partial()
parsed_args.arguments.update(vars(parser.parse_args(argv)))
return func(*parsed_args.args, **parsed_args.kwargs)
# TODO: attach an updated __signature__ to autoparse_wrapper, just in case.
# Attach the wrapped function and parser, and return the wrapper.
autoparse_wrapper.func = func
autoparse_wrapper.parser = parser
return autoparse_wrapper | This decorator converts a function that takes normal arguments into a
function which takes a single optional argument, argv, parses it using an
argparse.ArgumentParser, and calls the underlying function with the parsed
arguments. If it is not given, sys.argv[1:] is used. This is so that the
function can be used as a setuptools entry point, as well as a normal main
function. sys.argv[1:] is not evaluated until the function is called, to
allow injecting different arguments for testing.
It uses the argument signature of the function to create an
ArgumentParser. Parameters without defaults become positional parameters,
while parameters *with* defaults become --options. Use annotations to set
the type of the parameter.
The `desctiption` and `epilog` parameters corrospond to the same respective
argparse parameters. If no description is given, it defaults to the
decorated functions's docstring, if present.
If add_nos is True, every boolean option (that is, every parameter with a
default of True/False or a type of bool) will have a --no- version created
as well, which inverts the option. For instance, the --verbose option will
have a --no-verbose counterpart. These are not mutually exclusive-
whichever one appears last in the argument list will have precedence.
If a parser is given, it is used instead of one generated from the function
signature. In this case, no parser is created; instead, the given parser is
used to parse the argv argument. The parser's results' argument names must
match up with the parameter names of the decorated function.
The decorated function is attached to the result as the `func` attribute,
and the parser is attached as the `parser` attribute. | Below is the the instruction that describes the task:
### Input:
This decorator converts a function that takes normal arguments into a
function which takes a single optional argument, argv, parses it using an
argparse.ArgumentParser, and calls the underlying function with the parsed
arguments. If it is not given, sys.argv[1:] is used. This is so that the
function can be used as a setuptools entry point, as well as a normal main
function. sys.argv[1:] is not evaluated until the function is called, to
allow injecting different arguments for testing.
It uses the argument signature of the function to create an
ArgumentParser. Parameters without defaults become positional parameters,
while parameters *with* defaults become --options. Use annotations to set
the type of the parameter.
The `desctiption` and `epilog` parameters corrospond to the same respective
argparse parameters. If no description is given, it defaults to the
decorated functions's docstring, if present.
If add_nos is True, every boolean option (that is, every parameter with a
default of True/False or a type of bool) will have a --no- version created
as well, which inverts the option. For instance, the --verbose option will
have a --no-verbose counterpart. These are not mutually exclusive-
whichever one appears last in the argument list will have precedence.
If a parser is given, it is used instead of one generated from the function
signature. In this case, no parser is created; instead, the given parser is
used to parse the argv argument. The parser's results' argument names must
match up with the parameter names of the decorated function.
The decorated function is attached to the result as the `func` attribute,
and the parser is attached as the `parser` attribute.
### Response:
def autoparse(
func=None, *,
description=None,
epilog=None,
add_nos=False,
parser=None):
'''
This decorator converts a function that takes normal arguments into a
function which takes a single optional argument, argv, parses it using an
argparse.ArgumentParser, and calls the underlying function with the parsed
arguments. If it is not given, sys.argv[1:] is used. This is so that the
function can be used as a setuptools entry point, as well as a normal main
function. sys.argv[1:] is not evaluated until the function is called, to
allow injecting different arguments for testing.
It uses the argument signature of the function to create an
ArgumentParser. Parameters without defaults become positional parameters,
while parameters *with* defaults become --options. Use annotations to set
the type of the parameter.
The `desctiption` and `epilog` parameters corrospond to the same respective
argparse parameters. If no description is given, it defaults to the
decorated functions's docstring, if present.
If add_nos is True, every boolean option (that is, every parameter with a
default of True/False or a type of bool) will have a --no- version created
as well, which inverts the option. For instance, the --verbose option will
have a --no-verbose counterpart. These are not mutually exclusive-
whichever one appears last in the argument list will have precedence.
If a parser is given, it is used instead of one generated from the function
signature. In this case, no parser is created; instead, the given parser is
used to parse the argv argument. The parser's results' argument names must
match up with the parameter names of the decorated function.
The decorated function is attached to the result as the `func` attribute,
and the parser is attached as the `parser` attribute.
'''
# If @autoparse(...) is used instead of @autoparse
if func is None:
return lambda f: autoparse(
f, description=description,
epilog=epilog,
add_nos=add_nos,
parser=parser)
func_sig = signature(func)
docstr_description, docstr_epilog = parse_docstring(getdoc(func))
if parser is None:
parser = make_parser(
func_sig,
description or docstr_description,
epilog or docstr_epilog,
add_nos)
@wraps(func)
def autoparse_wrapper(argv=None):
if argv is None:
argv = sys.argv[1:]
# Get empty argument binding, to fill with parsed arguments. This
# object does all the heavy lifting of turning named arguments into
# into correctly bound *args and **kwargs.
parsed_args = func_sig.bind_partial()
parsed_args.arguments.update(vars(parser.parse_args(argv)))
return func(*parsed_args.args, **parsed_args.kwargs)
# TODO: attach an updated __signature__ to autoparse_wrapper, just in case.
# Attach the wrapped function and parser, and return the wrapper.
autoparse_wrapper.func = func
autoparse_wrapper.parser = parser
return autoparse_wrapper |
def prompt_y_or_n(self, prompt):
"""
Wrapper around prompt_input for simple yes/no queries.
"""
ch = self.prompt_input(prompt, key=True)
if ch in (ord('Y'), ord('y')):
return True
elif ch in (ord('N'), ord('n'), None):
return False
else:
self.flash()
return False | Wrapper around prompt_input for simple yes/no queries. | Below is the the instruction that describes the task:
### Input:
Wrapper around prompt_input for simple yes/no queries.
### Response:
def prompt_y_or_n(self, prompt):
"""
Wrapper around prompt_input for simple yes/no queries.
"""
ch = self.prompt_input(prompt, key=True)
if ch in (ord('Y'), ord('y')):
return True
elif ch in (ord('N'), ord('n'), None):
return False
else:
self.flash()
return False |
def database_path(self):
"""
Full database path. Includes the default location + the database filename.
"""
filename = self.database_filename
db_path = ":memory:" if filename == ":memory:" else (
path.abspath(path.join(__file__, "../..", "..", "data", filename)))
return db_path | Full database path. Includes the default location + the database filename. | Below is the the instruction that describes the task:
### Input:
Full database path. Includes the default location + the database filename.
### Response:
def database_path(self):
"""
Full database path. Includes the default location + the database filename.
"""
filename = self.database_filename
db_path = ":memory:" if filename == ":memory:" else (
path.abspath(path.join(__file__, "../..", "..", "data", filename)))
return db_path |
def _legend(self):
"""Make the legend box"""
if not self.show_legend:
return
truncation = self.truncate_legend
if self.legend_at_bottom:
x = self.margin_box.left + self.spacing
y = (
self.margin_box.top + self.view.height + self._x_title_height +
self._x_labels_height + self.spacing
)
cols = self.legend_at_bottom_columns or ceil(sqrt(self._order)
) or 1
if not truncation:
available_space = self.view.width / cols - (
self.legend_box_size + 5
)
truncation = reverse_text_len(
available_space, self.style.legend_font_size
)
else:
x = self.spacing
y = self.margin_box.top + self.spacing
cols = 1
if not truncation:
truncation = 15
legends = self.svg.node(
self.nodes['graph'],
class_='legends',
transform='translate(%d, %d)' % (x, y)
)
h = max(self.legend_box_size, self.style.legend_font_size)
x_step = self.view.width / cols
if self.legend_at_bottom:
secondary_legends = legends # svg node is the same
else:
# draw secondary axis on right
x = self.margin_box.left + self.view.width + self.spacing
if self._y_2nd_labels:
h, w = get_texts_box(
cut(self._y_2nd_labels), self.style.label_font_size
)
x += self.spacing + max(
w * abs(cos(rad(self.y_label_rotation))), h
)
y = self.margin_box.top + self.spacing
secondary_legends = self.svg.node(
self.nodes['graph'],
class_='legends',
transform='translate(%d, %d)' % (x, y)
)
serie_number = -1
i = 0
for titles, is_secondary in ((self._legends, False),
(self._secondary_legends, True)):
if not self.legend_at_bottom and is_secondary:
i = 0
for title in titles:
serie_number += 1
if title is None:
continue
col = i % cols
row = i // cols
legend = self.svg.node(
secondary_legends if is_secondary else legends,
class_='legend reactive activate-serie',
id="activate-serie-%d" % serie_number
)
self.svg.node(
legend,
'rect',
x=col * x_step,
y=1.5 * row * h + (
self.style.legend_font_size - self.legend_box_size
if self.style.legend_font_size > self.legend_box_size
else 0
) / 2,
width=self.legend_box_size,
height=self.legend_box_size,
class_="color-%d reactive" % serie_number
)
if isinstance(title, dict):
node = decorate(self.svg, legend, title)
title = title['title']
else:
node = legend
truncated = truncate(title, truncation)
self.svg.node(
node,
'text',
x=col * x_step + self.legend_box_size + 5,
y=1.5 * row * h + .5 * h + .3 * self.style.legend_font_size
).text = truncated
if truncated != title:
self.svg.node(legend, 'title').text = title
i += 1 | Make the legend box | Below is the the instruction that describes the task:
### Input:
Make the legend box
### Response:
def _legend(self):
"""Make the legend box"""
if not self.show_legend:
return
truncation = self.truncate_legend
if self.legend_at_bottom:
x = self.margin_box.left + self.spacing
y = (
self.margin_box.top + self.view.height + self._x_title_height +
self._x_labels_height + self.spacing
)
cols = self.legend_at_bottom_columns or ceil(sqrt(self._order)
) or 1
if not truncation:
available_space = self.view.width / cols - (
self.legend_box_size + 5
)
truncation = reverse_text_len(
available_space, self.style.legend_font_size
)
else:
x = self.spacing
y = self.margin_box.top + self.spacing
cols = 1
if not truncation:
truncation = 15
legends = self.svg.node(
self.nodes['graph'],
class_='legends',
transform='translate(%d, %d)' % (x, y)
)
h = max(self.legend_box_size, self.style.legend_font_size)
x_step = self.view.width / cols
if self.legend_at_bottom:
secondary_legends = legends # svg node is the same
else:
# draw secondary axis on right
x = self.margin_box.left + self.view.width + self.spacing
if self._y_2nd_labels:
h, w = get_texts_box(
cut(self._y_2nd_labels), self.style.label_font_size
)
x += self.spacing + max(
w * abs(cos(rad(self.y_label_rotation))), h
)
y = self.margin_box.top + self.spacing
secondary_legends = self.svg.node(
self.nodes['graph'],
class_='legends',
transform='translate(%d, %d)' % (x, y)
)
serie_number = -1
i = 0
for titles, is_secondary in ((self._legends, False),
(self._secondary_legends, True)):
if not self.legend_at_bottom and is_secondary:
i = 0
for title in titles:
serie_number += 1
if title is None:
continue
col = i % cols
row = i // cols
legend = self.svg.node(
secondary_legends if is_secondary else legends,
class_='legend reactive activate-serie',
id="activate-serie-%d" % serie_number
)
self.svg.node(
legend,
'rect',
x=col * x_step,
y=1.5 * row * h + (
self.style.legend_font_size - self.legend_box_size
if self.style.legend_font_size > self.legend_box_size
else 0
) / 2,
width=self.legend_box_size,
height=self.legend_box_size,
class_="color-%d reactive" % serie_number
)
if isinstance(title, dict):
node = decorate(self.svg, legend, title)
title = title['title']
else:
node = legend
truncated = truncate(title, truncation)
self.svg.node(
node,
'text',
x=col * x_step + self.legend_box_size + 5,
y=1.5 * row * h + .5 * h + .3 * self.style.legend_font_size
).text = truncated
if truncated != title:
self.svg.node(legend, 'title').text = title
i += 1 |
def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys())
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO')
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO')
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG')
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG')
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) | Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return: | Below is the the instruction that describes the task:
### Input:
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
### Response:
def init_output_formatters(output_verbosity='normal', stderr=sys.stderr, logfile=None, debug_logfile=None):
"""
Initialize the CLI logging scheme.
:param output_verbosity: 'quiet','normal','verbose', or 'debug' controls the output to stdout and its format
:param stderr: stream for stderr output, default=stderr, pass a file path/string to have stderr in a file
:return:
"""
if output_verbosity not in console_verbosity_options:
raise ValueError('output_verbosity must be one of: %s' % console_verbosity_options.keys())
# Initialize debug log file, 'anchore-debug.log'. This log has stack-traces and is expected to be human read
# and intended for developers and debugging, not an operational log.
# Configure stderr behavior. All errors go to screen
stderr_handler = logging.StreamHandler(stderr)
if output_verbosity == 'quiet':
stderr_handler.setLevel(level='ERROR')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('ERROR') # Allow all at top level, filter specifics for each handler
elif output_verbosity == 'normal':
# The specific console logger
stderr_handler.setLevel('INFO')
stderr_formatter = NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.addFilter(LoggerNamePrefixFilter(prefix='anchore', non_match_loglevel='ERROR'))
logging.root.setLevel('INFO')
elif output_verbosity == 'verbose':
stderr_handler.setLevel('INFO')
stderr_handler.setFormatter(NoTracebackFormatter(fmt=NORMAL_FORMAT, err_fmt=ERR_FORMAT))
logging.root.setLevel('INFO')
elif output_verbosity == 'debug':
stderr_handler.setLevel(level='DEBUG')
stderr_handler.setFormatter(logging.Formatter(fmt=DEBUG_FORMAT))
logging.root.setLevel('DEBUG')
logging.root.addHandler(stderr_handler)
if debug_logfile:
debug_filehandler = logging.FileHandler(debug_logfile)
debug_filehandler.setLevel('DEBUG')
formatter = logging.Formatter(fmt=DEBUG_LOGFILE_FORMAT)
debug_filehandler.setFormatter(formatter)
logging.root.addHandler(debug_filehandler)
logging.root.setLevel('DEBUG')
if logfile:
filehandler = logging.FileHandler(logfile)
filehandler.setLevel('INFO')
filehandler.setFormatter(NoTracebackFormatter(fmt=LOGFILE_FORMAT, err_fmt=LOGFILE_FORMAT))
logging.root.addHandler(filehandler) |
def add_transition(self, source: str, dest: str):
""" Adds a transition from one state to another.
Args:
source (str): the name of the state from where the transition starts
dest (str): the name of the state where the transition ends
"""
self._transitions[source].append(dest) | Adds a transition from one state to another.
Args:
source (str): the name of the state from where the transition starts
dest (str): the name of the state where the transition ends | Below is the the instruction that describes the task:
### Input:
Adds a transition from one state to another.
Args:
source (str): the name of the state from where the transition starts
dest (str): the name of the state where the transition ends
### Response:
def add_transition(self, source: str, dest: str):
""" Adds a transition from one state to another.
Args:
source (str): the name of the state from where the transition starts
dest (str): the name of the state where the transition ends
"""
self._transitions[source].append(dest) |
def fetch_path(self, name):
"""
Fetch contents from the path retrieved via lookup_path.
No caching will be done.
"""
with codecs.open(self.lookup_path(name), encoding='utf-8') as fd:
return fd.read() | Fetch contents from the path retrieved via lookup_path.
No caching will be done. | Below is the the instruction that describes the task:
### Input:
Fetch contents from the path retrieved via lookup_path.
No caching will be done.
### Response:
def fetch_path(self, name):
"""
Fetch contents from the path retrieved via lookup_path.
No caching will be done.
"""
with codecs.open(self.lookup_path(name), encoding='utf-8') as fd:
return fd.read() |
def set_main_fan(self, main_fan):
"""Set the main fan config.
:param main_fan: Value to set the main fan
:type main_fan: int [0-10]
:returns: None
:raises: InvalidInput
"""
if type(main_fan) != int and main_fan not in range(0, 11):
raise InvalidInput("Main fan value must be int between 0-10")
self._config['main_fan'] = main_fan
self._q.put(self._config) | Set the main fan config.
:param main_fan: Value to set the main fan
:type main_fan: int [0-10]
:returns: None
:raises: InvalidInput | Below is the the instruction that describes the task:
### Input:
Set the main fan config.
:param main_fan: Value to set the main fan
:type main_fan: int [0-10]
:returns: None
:raises: InvalidInput
### Response:
def set_main_fan(self, main_fan):
"""Set the main fan config.
:param main_fan: Value to set the main fan
:type main_fan: int [0-10]
:returns: None
:raises: InvalidInput
"""
if type(main_fan) != int and main_fan not in range(0, 11):
raise InvalidInput("Main fan value must be int between 0-10")
self._config['main_fan'] = main_fan
self._q.put(self._config) |
def on_message(self, message):
""" When we get an event from js, lookup the node and invoke the
action on the enaml node.
"""
change = json.loads(message)
log.debug(f'Update from js: {change}')
# Lookup the node
ref = change.get('ref')
if not ref:
return
nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref)
if not nodes:
return # Unknown node
node = nodes[0]
# Trigger the change on the enaml node
if change.get('type') and change.get('name'):
if change['type'] == 'event':
trigger = getattr(node, change['name'])
trigger()
elif change['type'] == 'update':
# Trigger the update
setattr(node, change['name'], change['value'])
else:
log.warning(f"Unhandled event {self} {node}: {change}") | When we get an event from js, lookup the node and invoke the
action on the enaml node. | Below is the the instruction that describes the task:
### Input:
When we get an event from js, lookup the node and invoke the
action on the enaml node.
### Response:
def on_message(self, message):
""" When we get an event from js, lookup the node and invoke the
action on the enaml node.
"""
change = json.loads(message)
log.debug(f'Update from js: {change}')
# Lookup the node
ref = change.get('ref')
if not ref:
return
nodes = self.viewer.xpath('//*[@ref=$ref]', ref=ref)
if not nodes:
return # Unknown node
node = nodes[0]
# Trigger the change on the enaml node
if change.get('type') and change.get('name'):
if change['type'] == 'event':
trigger = getattr(node, change['name'])
trigger()
elif change['type'] == 'update':
# Trigger the update
setattr(node, change['name'], change['value'])
else:
log.warning(f"Unhandled event {self} {node}: {change}") |
def parse(self, msg, name):
"""Parses the message.
We check that the message is properly formatted.
:param msg: a json-encoded value containing a JWS or JWE+JWS token
:raises InvalidMessage: if the message cannot be parsed or validated
:returns: A verified payload
"""
try:
jtok = JWT(jwt=msg)
except Exception as e:
raise InvalidMessage('Failed to parse message: %s' % str(e))
try:
token = jtok.token
if isinstance(token, JWE):
token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC])
# If an encrypted payload is received then there must be
# a nested signed payload to verify the provenance.
payload = token.payload.decode('utf-8')
token = JWS()
token.deserialize(payload)
elif isinstance(token, JWS):
pass
else:
raise TypeError("Invalid Token type: %s" % type(jtok))
# Retrieve client keys for later use
self.client_keys = [
JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)),
JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))]
# verify token and get payload
token.verify(self.client_keys[KEY_USAGE_SIG])
claims = json_decode(token.payload)
except Exception as e:
logger.debug('Failed to validate message', exc_info=True)
raise InvalidMessage('Failed to validate message: %s' % str(e))
check_kem_claims(claims, name)
self.name = name
self.payload = claims.get('value')
self.msg_type = 'kem'
return {'type': self.msg_type,
'value': {'kid': self.client_keys[KEY_USAGE_ENC].key_id,
'claims': claims}} | Parses the message.
We check that the message is properly formatted.
:param msg: a json-encoded value containing a JWS or JWE+JWS token
:raises InvalidMessage: if the message cannot be parsed or validated
:returns: A verified payload | Below is the the instruction that describes the task:
### Input:
Parses the message.
We check that the message is properly formatted.
:param msg: a json-encoded value containing a JWS or JWE+JWS token
:raises InvalidMessage: if the message cannot be parsed or validated
:returns: A verified payload
### Response:
def parse(self, msg, name):
"""Parses the message.
We check that the message is properly formatted.
:param msg: a json-encoded value containing a JWS or JWE+JWS token
:raises InvalidMessage: if the message cannot be parsed or validated
:returns: A verified payload
"""
try:
jtok = JWT(jwt=msg)
except Exception as e:
raise InvalidMessage('Failed to parse message: %s' % str(e))
try:
token = jtok.token
if isinstance(token, JWE):
token.decrypt(self.kkstore.server_keys[KEY_USAGE_ENC])
# If an encrypted payload is received then there must be
# a nested signed payload to verify the provenance.
payload = token.payload.decode('utf-8')
token = JWS()
token.deserialize(payload)
elif isinstance(token, JWS):
pass
else:
raise TypeError("Invalid Token type: %s" % type(jtok))
# Retrieve client keys for later use
self.client_keys = [
JWK(**self._get_key(token.jose_header, KEY_USAGE_SIG)),
JWK(**self._get_key(token.jose_header, KEY_USAGE_ENC))]
# verify token and get payload
token.verify(self.client_keys[KEY_USAGE_SIG])
claims = json_decode(token.payload)
except Exception as e:
logger.debug('Failed to validate message', exc_info=True)
raise InvalidMessage('Failed to validate message: %s' % str(e))
check_kem_claims(claims, name)
self.name = name
self.payload = claims.get('value')
self.msg_type = 'kem'
return {'type': self.msg_type,
'value': {'kid': self.client_keys[KEY_USAGE_ENC].key_id,
'claims': claims}} |
def collection(name=None):
"""Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
"""
if name is None:
collection = Collection.query.get_or_404(1)
else:
collection = Collection.query.filter(
Collection.name == name).first_or_404()
# TODO add breadcrumbs
# breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:]
return render_template([
'invenio_collections/collection_{0}.html'.format(collection.id),
'invenio_collections/collection_{0}.html'.format(slugify(name, '_')),
current_app.config['COLLECTIONS_DEFAULT_TEMPLATE']
], collection=collection) | Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html). | Below is the the instruction that describes the task:
### Input:
Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
### Response:
def collection(name=None):
"""Render the collection page.
It renders it either with a collection specific template (aka
collection_{collection_name}.html) or with the default collection
template (collection.html).
"""
if name is None:
collection = Collection.query.get_or_404(1)
else:
collection = Collection.query.filter(
Collection.name == name).first_or_404()
# TODO add breadcrumbs
# breadcrumbs = current_breadcrumbs + collection.breadcrumbs(ln=g.ln)[1:]
return render_template([
'invenio_collections/collection_{0}.html'.format(collection.id),
'invenio_collections/collection_{0}.html'.format(slugify(name, '_')),
current_app.config['COLLECTIONS_DEFAULT_TEMPLATE']
], collection=collection) |
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day) | Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13' | Below is the the instruction that describes the task:
### Input:
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
### Response:
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
Args:
newdate (str):
The date to set. Can be any of the following formats
- YYYY-MM-DD
- MM-DD-YYYY
- MM-DD-YY
- MM/DD/YYYY
- MM/DD/YY
- YYYY/MM/DD
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
fmts = ['%Y-%m-%d', '%m-%d-%Y', '%m-%d-%y',
'%m/%d/%Y', '%m/%d/%y', '%Y/%m/%d']
# Get date/time object from newdate
dt_obj = _try_parse_datetime(newdate, fmts)
if dt_obj is None:
return False
# Set time using set_system_date_time()
return set_system_date_time(years=dt_obj.year,
months=dt_obj.month,
days=dt_obj.day) |
def set_value(self, comp_str):
"""
Set the value of component.
:param string comp_str: value of component
:returns: None
:exception: ValueError - incorrect value of component
"""
self._is_negated = False
self._encoded_value = comp_str
self._standard_value = super(
CPEComponent2_3_URI_edpacked, self)._decode() | Set the value of component.
:param string comp_str: value of component
:returns: None
:exception: ValueError - incorrect value of component | Below is the the instruction that describes the task:
### Input:
Set the value of component.
:param string comp_str: value of component
:returns: None
:exception: ValueError - incorrect value of component
### Response:
def set_value(self, comp_str):
"""
Set the value of component.
:param string comp_str: value of component
:returns: None
:exception: ValueError - incorrect value of component
"""
self._is_negated = False
self._encoded_value = comp_str
self._standard_value = super(
CPEComponent2_3_URI_edpacked, self)._decode() |
def successful(self):
"""Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed"""
status = self.status
assert status >= COMPLETED, "status is %s" % status
return (self.status == COMPLETED) | Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed | Below is the the instruction that describes the task:
### Input:
Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed
### Response:
def successful(self):
"""Return True if the job finished with a COMPLETED status, False if it
finished with a CANCELLED or FAILED status. Raise an `AssertionError`
if the job has not completed"""
status = self.status
assert status >= COMPLETED, "status is %s" % status
return (self.status == COMPLETED) |
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value | Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns. | Below is the the instruction that describes the task:
### Input:
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
### Response:
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if self.connection._DJANGO_VERSION >= 14 and settings.USE_TZ:
if timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(timezone.utc)
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return value |
def extract_file(self, name, full_path=False, directory="."):
"""Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file.
"""
if self.handle:
for member in self.handle.getmembers():
if (full_path and member.name == name or
not full_path and os.path.basename(
member.name) == name):
# TODO handle KeyError exception
self.handle.extract(member, path=directory) | Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file. | Below is the the instruction that describes the task:
### Input:
Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file.
### Response:
def extract_file(self, name, full_path=False, directory="."):
"""Extract a member from the archive to the specified working directory.
Behaviour of name and pull_path is the same as in function
get_content_of_file.
"""
if self.handle:
for member in self.handle.getmembers():
if (full_path and member.name == name or
not full_path and os.path.basename(
member.name) == name):
# TODO handle KeyError exception
self.handle.extract(member, path=directory) |
def _do_pending_writes(self):
"""Do any pending text writes"""
for text, wrap in self._pending_writes:
# truncate in case of *really* long messages
text = text[-self._n_cols*self._n_rows:]
text = text.split('\n')
text = [t if len(t) > 0 else '' for t in text]
nr, nc = self._n_rows, self._n_cols
for para in text:
para = para[:nc] if not wrap else para
lines = [para[ii:(ii+nc)] for ii in range(0, len(para), nc)]
lines = [''] if len(lines) == 0 else lines
for line in lines:
# Update row and scroll if necessary
self._text_lines.insert(0, line)
self._text_lines = self._text_lines[:nr]
self._bytes_012[1:] = self._bytes_012[:-1]
self._bytes_345[1:] = self._bytes_345[:-1]
self._insert_text_buf(line, 0)
self._pending_writes = [] | Do any pending text writes | Below is the the instruction that describes the task:
### Input:
Do any pending text writes
### Response:
def _do_pending_writes(self):
"""Do any pending text writes"""
for text, wrap in self._pending_writes:
# truncate in case of *really* long messages
text = text[-self._n_cols*self._n_rows:]
text = text.split('\n')
text = [t if len(t) > 0 else '' for t in text]
nr, nc = self._n_rows, self._n_cols
for para in text:
para = para[:nc] if not wrap else para
lines = [para[ii:(ii+nc)] for ii in range(0, len(para), nc)]
lines = [''] if len(lines) == 0 else lines
for line in lines:
# Update row and scroll if necessary
self._text_lines.insert(0, line)
self._text_lines = self._text_lines[:nr]
self._bytes_012[1:] = self._bytes_012[:-1]
self._bytes_345[1:] = self._bytes_345[:-1]
self._insert_text_buf(line, 0)
self._pending_writes = [] |
def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
},
'centroid': obj.centroid,
'bounding_box': obj.bounding_box
} | Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json. | Below is the the instruction that describes the task:
### Input:
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
### Response:
def item_afdeling_adapter(obj, request):
"""
Adapter for rendering an object of
:class: `crabpy.gateway.capakey.Afdeling` to json.
"""
return {
'id': obj.id,
'naam': obj.naam,
'gemeente': {
'id': obj.gemeente.id,
'naam': obj.gemeente.naam
},
'centroid': obj.centroid,
'bounding_box': obj.bounding_box
} |
def dot_product_unmasked_attention_local_2d_tpu(
q, k, v, bias, max_relative_position=None, query_shape=(8, 8),
dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False,
dropout_broadcast_dims=None):
"""Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
query_shape: a two tuple indicating query shape
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
[batch, heads, height, width, depth] tensor, the output of attention.
"""
if max_relative_position:
raise ValueError("Relative local 2d attention not implemented")
with tf.variable_scope(
name,
default_name="dot_product_unmasked_attention_local_2d_tpu",
values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
orig_q_shape = common_layers.shape_list(q)
# Pad query, key, value to ensure multiple of corresponding lengths.
memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)]
q = pad_to_multiple_2d(q, query_shape)
k = pad_to_multiple_2d(k, query_shape)
v = pad_to_multiple_2d(v, query_shape)
q_shape = common_layers.shape_list(q)
(height, width) = (q_shape[2],
q_shape[3])
_, num_heads, height, width, depth_k = common_layers.shape_list(k)
depth_v = common_layers.shape_list(v)[-1]
num_h_blocks = height//query_shape[0]
num_w_blocks = width//query_shape[1]
# Extract center queries, keys, and values
q = tf.reshape(q, [-1, height, width, depth_k])
queries = _extract_blocks(
q, query_shape[0], query_shape[1])
k = tf.reshape(k, [-1, height, width, depth_k])
keys = get_2d_local_memory_v2(
k, query_shape, memory_flange)
v = tf.reshape(v, [-1, height, width, depth_v])
values = get_2d_local_memory_v2(
v, query_shape, memory_flange)
memory_h = query_shape[0] + 2*memory_flange[0]
memory_w = query_shape[1] + 2*memory_flange[1]
queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks,
query_shape[0]*query_shape[1], depth_k])
keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks,
memory_h*memory_w, depth_k])
values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks,
memory_h*memory_w, depth_v])
logits = tf.matmul(queries, keys, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# Dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
ret = tf.matmul(weights, values)
# we need to get it back to shape [batch, heads, height, width]
ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks,
query_shape[0], query_shape[1], depth_v])
ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6])
ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0],
num_w_blocks*query_shape[1], depth_v])
# slice if padding was introduced
ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2],
orig_q_shape[3], -1])
return ret | Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
query_shape: a two tuple indicating query shape
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
[batch, heads, height, width, depth] tensor, the output of attention. | Below is the the instruction that describes the task:
### Input:
Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
query_shape: a two tuple indicating query shape
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
[batch, heads, height, width, depth] tensor, the output of attention.
### Response:
def dot_product_unmasked_attention_local_2d_tpu(
q, k, v, bias, max_relative_position=None, query_shape=(8, 8),
dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False,
dropout_broadcast_dims=None):
"""Calculate unmasked dot-product local self-attention 2d on tpu.
Args:
q: a Tensor with shape [batch, heads, height, width, depth].
k: a Tensor with shape [batch, heads, height, width, depth].
v: a Tensor with shape [batch, heads, height, width, depth].
bias: bias Tensor.
max_relative_position: an integer the max relative embedding considered.
Changing this invalidates checkpoints.
query_shape: a two tuple indicating query shape
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
[batch, heads, height, width, depth] tensor, the output of attention.
"""
if max_relative_position:
raise ValueError("Relative local 2d attention not implemented")
with tf.variable_scope(
name,
default_name="dot_product_unmasked_attention_local_2d_tpu",
values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
orig_q_shape = common_layers.shape_list(q)
# Pad query, key, value to ensure multiple of corresponding lengths.
memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)]
q = pad_to_multiple_2d(q, query_shape)
k = pad_to_multiple_2d(k, query_shape)
v = pad_to_multiple_2d(v, query_shape)
q_shape = common_layers.shape_list(q)
(height, width) = (q_shape[2],
q_shape[3])
_, num_heads, height, width, depth_k = common_layers.shape_list(k)
depth_v = common_layers.shape_list(v)[-1]
num_h_blocks = height//query_shape[0]
num_w_blocks = width//query_shape[1]
# Extract center queries, keys, and values
q = tf.reshape(q, [-1, height, width, depth_k])
queries = _extract_blocks(
q, query_shape[0], query_shape[1])
k = tf.reshape(k, [-1, height, width, depth_k])
keys = get_2d_local_memory_v2(
k, query_shape, memory_flange)
v = tf.reshape(v, [-1, height, width, depth_v])
values = get_2d_local_memory_v2(
v, query_shape, memory_flange)
memory_h = query_shape[0] + 2*memory_flange[0]
memory_w = query_shape[1] + 2*memory_flange[1]
queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks,
query_shape[0]*query_shape[1], depth_k])
keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks,
memory_h*memory_w, depth_k])
values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks,
memory_h*memory_w, depth_v])
logits = tf.matmul(queries, keys, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# Dropping out the attention links for each of the heads
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
ret = tf.matmul(weights, values)
# we need to get it back to shape [batch, heads, height, width]
ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks,
query_shape[0], query_shape[1], depth_v])
ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6])
ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0],
num_w_blocks*query_shape[1], depth_v])
# slice if padding was introduced
ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2],
orig_q_shape[3], -1])
return ret |
def _add_node(self, agent):
"""Add an Agent as a node to the graph."""
if agent is None:
return
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if
bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + \
'/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents)
else:
node_label = _get_node_label(agent)
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return
self.existing_nodes.append(node_key)
self.graph.add_node(node_key,
label=node_label,
**self.node_properties) | Add an Agent as a node to the graph. | Below is the the instruction that describes the task:
### Input:
Add an Agent as a node to the graph.
### Response:
def _add_node(self, agent):
"""Add an Agent as a node to the graph."""
if agent is None:
return
node_label = _get_node_label(agent)
if isinstance(agent, Agent) and agent.bound_conditions:
bound_agents = [bc.agent for bc in agent.bound_conditions if
bc.is_bound]
if bound_agents:
bound_names = [_get_node_label(a) for a in bound_agents]
node_label = _get_node_label(agent) + '/' + \
'/'.join(bound_names)
self._complex_nodes.append([agent] + bound_agents)
else:
node_label = _get_node_label(agent)
node_key = _get_node_key(agent)
if node_key in self.existing_nodes:
return
self.existing_nodes.append(node_key)
self.graph.add_node(node_key,
label=node_label,
**self.node_properties) |
def parse_localnamespacepath(self, tup_tree):
"""
Parse a LOCALNAMESPACEPATH element and return the namespace it
represents as a unicode string.
The namespace is formed by joining the namespace components (one from
each NAMESPACE child element) with a slash (e.g. to "root/cimv2").
::
<!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)>
"""
self.check_node(tup_tree, 'LOCALNAMESPACEPATH', (), (), ('NAMESPACE',))
if not kids(tup_tree):
raise CIMXMLParseError(
_format("Element {0!A} missing child elements (expecting one "
"or more child elements 'NAMESPACE')", name(tup_tree)),
conn_id=self.conn_id)
# self.list_of_various() has the same effect as self.list_of_same()
# when used with a single allowed child element, but is a little
# faster.
ns_list = self.list_of_various(tup_tree, ('NAMESPACE',))
return u'/'.join(ns_list) | Parse a LOCALNAMESPACEPATH element and return the namespace it
represents as a unicode string.
The namespace is formed by joining the namespace components (one from
each NAMESPACE child element) with a slash (e.g. to "root/cimv2").
::
<!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)> | Below is the the instruction that describes the task:
### Input:
Parse a LOCALNAMESPACEPATH element and return the namespace it
represents as a unicode string.
The namespace is formed by joining the namespace components (one from
each NAMESPACE child element) with a slash (e.g. to "root/cimv2").
::
<!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)>
### Response:
def parse_localnamespacepath(self, tup_tree):
"""
Parse a LOCALNAMESPACEPATH element and return the namespace it
represents as a unicode string.
The namespace is formed by joining the namespace components (one from
each NAMESPACE child element) with a slash (e.g. to "root/cimv2").
::
<!ELEMENT LOCALNAMESPACEPATH (NAMESPACE+)>
"""
self.check_node(tup_tree, 'LOCALNAMESPACEPATH', (), (), ('NAMESPACE',))
if not kids(tup_tree):
raise CIMXMLParseError(
_format("Element {0!A} missing child elements (expecting one "
"or more child elements 'NAMESPACE')", name(tup_tree)),
conn_id=self.conn_id)
# self.list_of_various() has the same effect as self.list_of_same()
# when used with a single allowed child element, but is a little
# faster.
ns_list = self.list_of_various(tup_tree, ('NAMESPACE',))
return u'/'.join(ns_list) |
def build_reference_fields(citation_elements, line_marker, raw_ref,
reference_format):
"""Create the final representation of the reference information.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found
piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference
line (e.g. [19])
@param raw_ref: (string) The raw string of this line
@return reference_fields: (list) A list of one dictionary containing the
reference elements
"""
# Begin the datafield element
current_field = create_reference_field(line_marker)
current_field['raw_ref'] = [raw_ref]
reference_fields = [current_field]
for element in citation_elements:
# Before going onto checking 'what' the next element is,
# handle misc text and semi-colons
# Multiple misc text subfields will be compressed later
# This will also be the only part of the code that deals with MISC
# tag_typed elements
misc_txt = element['misc_txt']
if misc_txt.strip("., [](){}"):
misc_txt = misc_txt.lstrip('])} ,.').rstrip('[({ ,.')
add_subfield(current_field, 'misc', misc_txt)
# Now handle the type dependent actions
# JOURNAL
if element['type'] == "JOURNAL":
add_journal_subfield(current_field, element, reference_format)
# REPORT NUMBER
elif element['type'] == "REPORTNUMBER":
add_subfield(current_field, 'reportnumber', element['report_num'])
# URL
elif element['type'] == "URL":
if element['url_string'] == element['url_desc']:
# Build the datafield for the URL segment of the reference
# line:
add_subfield(current_field, 'url', element['url_string'])
# Else, in the case that the url string and the description differ
# in some way, include them both
else:
add_subfield(current_field, 'url', element['url_string'])
add_subfield(current_field, 'urldesc', element['url_desc'])
# DOI
elif element['type'] == "DOI":
add_subfield(current_field, 'doi', 'doi:' + element['doi_string'])
# HDL
elif element['type'] == "HDL":
add_subfield(current_field, 'hdl', 'hdl:' + element['hdl_id'])
# AUTHOR
elif element['type'] == "AUTH":
value = element['auth_txt']
if element['auth_type'] == 'incl':
value = "(%s)" % value
add_subfield(current_field, 'author', value)
elif element['type'] == "QUOTED":
add_subfield(current_field, 'title', element['title'])
elif element['type'] == "ISBN":
add_subfield(current_field, 'isbn', element['ISBN'])
elif element['type'] == "BOOK":
add_subfield(current_field, 'title', element['title'])
elif element['type'] == "PUBLISHER":
add_subfield(current_field, 'publisher', element['publisher'])
elif element['type'] == "YEAR":
add_subfield(current_field, 'year', element['year'])
elif element['type'] == "COLLABORATION":
add_subfield(current_field,
'collaboration',
element['collaboration'])
elif element['type'] == "RECID":
add_subfield(current_field, 'recid', str(element['recid']))
return reference_fields | Create the final representation of the reference information.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found
piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference
line (e.g. [19])
@param raw_ref: (string) The raw string of this line
@return reference_fields: (list) A list of one dictionary containing the
reference elements | Below is the the instruction that describes the task:
### Input:
Create the final representation of the reference information.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found
piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference
line (e.g. [19])
@param raw_ref: (string) The raw string of this line
@return reference_fields: (list) A list of one dictionary containing the
reference elements
### Response:
def build_reference_fields(citation_elements, line_marker, raw_ref,
reference_format):
"""Create the final representation of the reference information.
@param citation_elements: (list) an ordered list of dictionary elements,
with each element corresponding to a found
piece of information from a reference line.
@param line_marker: (string) The line marker for this single reference
line (e.g. [19])
@param raw_ref: (string) The raw string of this line
@return reference_fields: (list) A list of one dictionary containing the
reference elements
"""
# Begin the datafield element
current_field = create_reference_field(line_marker)
current_field['raw_ref'] = [raw_ref]
reference_fields = [current_field]
for element in citation_elements:
# Before going onto checking 'what' the next element is,
# handle misc text and semi-colons
# Multiple misc text subfields will be compressed later
# This will also be the only part of the code that deals with MISC
# tag_typed elements
misc_txt = element['misc_txt']
if misc_txt.strip("., [](){}"):
misc_txt = misc_txt.lstrip('])} ,.').rstrip('[({ ,.')
add_subfield(current_field, 'misc', misc_txt)
# Now handle the type dependent actions
# JOURNAL
if element['type'] == "JOURNAL":
add_journal_subfield(current_field, element, reference_format)
# REPORT NUMBER
elif element['type'] == "REPORTNUMBER":
add_subfield(current_field, 'reportnumber', element['report_num'])
# URL
elif element['type'] == "URL":
if element['url_string'] == element['url_desc']:
# Build the datafield for the URL segment of the reference
# line:
add_subfield(current_field, 'url', element['url_string'])
# Else, in the case that the url string and the description differ
# in some way, include them both
else:
add_subfield(current_field, 'url', element['url_string'])
add_subfield(current_field, 'urldesc', element['url_desc'])
# DOI
elif element['type'] == "DOI":
add_subfield(current_field, 'doi', 'doi:' + element['doi_string'])
# HDL
elif element['type'] == "HDL":
add_subfield(current_field, 'hdl', 'hdl:' + element['hdl_id'])
# AUTHOR
elif element['type'] == "AUTH":
value = element['auth_txt']
if element['auth_type'] == 'incl':
value = "(%s)" % value
add_subfield(current_field, 'author', value)
elif element['type'] == "QUOTED":
add_subfield(current_field, 'title', element['title'])
elif element['type'] == "ISBN":
add_subfield(current_field, 'isbn', element['ISBN'])
elif element['type'] == "BOOK":
add_subfield(current_field, 'title', element['title'])
elif element['type'] == "PUBLISHER":
add_subfield(current_field, 'publisher', element['publisher'])
elif element['type'] == "YEAR":
add_subfield(current_field, 'year', element['year'])
elif element['type'] == "COLLABORATION":
add_subfield(current_field,
'collaboration',
element['collaboration'])
elif element['type'] == "RECID":
add_subfield(current_field, 'recid', str(element['recid']))
return reference_fields |
def getWmg(self, normalize = False):
"""
Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge.
"""
# Initialize a new dictionary for our final weighted majority graph.
wmgMap = dict()
for cand in self.candMap.keys():
wmgMap[cand] = dict()
for cand1, cand2 in itertools.combinations(self.candMap.keys(), 2):
wmgMap[cand1][cand2] = 0
wmgMap[cand2][cand1] = 0
# Go through the wmgMaps and increment the value of each edge in our final graph with the
# edges in each of the wmgMaps. We take into account the number of times that the vote
# occured.
for i in range(0, len(self.preferences)):
preference = self.preferences[i]
preferenceWmgMap = preference.wmgMap
for cand1, cand2 in itertools.combinations(preferenceWmgMap.keys(), 2):
if cand2 in preferenceWmgMap[cand1].keys():
wmgMap[cand1][cand2] += preferenceWmgMap[cand1][cand2]*preference.count
wmgMap[cand2][cand1] += preferenceWmgMap[cand2][cand1]*preference.count
# By default, we assume that the weighted majority graph should not be normalized. If
# desired, we normalize by dividing each edge by the value of the largest edge.
if (normalize == True):
maxEdge = float('-inf')
for cand in wmgMap.keys():
maxEdge = max(maxEdge, max(wmgMap[cand].values()))
for cand1 in wmgMap.keys():
for cand2 in wmgMap[cand1].keys():
wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2])/maxEdge
return wmgMap | Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge. | Below is the the instruction that describes the task:
### Input:
Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge.
### Response:
def getWmg(self, normalize = False):
"""
Generate a weighted majority graph that represents the whole profile. The function will
return a two-dimensional dictionary that associates integer representations of each pair of
candidates, cand1 and cand2, with the number of times cand1 is ranked above cand2 minus the
number of times cand2 is ranked above cand1.
:ivar bool normalize: If normalize is True, the function will return a normalized graph
where each edge has been divided by the value of the largest edge.
"""
# Initialize a new dictionary for our final weighted majority graph.
wmgMap = dict()
for cand in self.candMap.keys():
wmgMap[cand] = dict()
for cand1, cand2 in itertools.combinations(self.candMap.keys(), 2):
wmgMap[cand1][cand2] = 0
wmgMap[cand2][cand1] = 0
# Go through the wmgMaps and increment the value of each edge in our final graph with the
# edges in each of the wmgMaps. We take into account the number of times that the vote
# occured.
for i in range(0, len(self.preferences)):
preference = self.preferences[i]
preferenceWmgMap = preference.wmgMap
for cand1, cand2 in itertools.combinations(preferenceWmgMap.keys(), 2):
if cand2 in preferenceWmgMap[cand1].keys():
wmgMap[cand1][cand2] += preferenceWmgMap[cand1][cand2]*preference.count
wmgMap[cand2][cand1] += preferenceWmgMap[cand2][cand1]*preference.count
# By default, we assume that the weighted majority graph should not be normalized. If
# desired, we normalize by dividing each edge by the value of the largest edge.
if (normalize == True):
maxEdge = float('-inf')
for cand in wmgMap.keys():
maxEdge = max(maxEdge, max(wmgMap[cand].values()))
for cand1 in wmgMap.keys():
for cand2 in wmgMap[cand1].keys():
wmgMap[cand1][cand2] = float(wmgMap[cand1][cand2])/maxEdge
return wmgMap |
def _get_jvm_opts(out_file, data):
"""Retrieve Java options, adjusting memory for available cores.
"""
resources = config_utils.get_resources("purple", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"])
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"maximum": "30000M",
"magnitude": dd.get_cores(data)}}})
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return jvm_opts | Retrieve Java options, adjusting memory for available cores. | Below is the the instruction that describes the task:
### Input:
Retrieve Java options, adjusting memory for available cores.
### Response:
def _get_jvm_opts(out_file, data):
"""Retrieve Java options, adjusting memory for available cores.
"""
resources = config_utils.get_resources("purple", data["config"])
jvm_opts = resources.get("jvm_opts", ["-Xms750m", "-Xmx3500m"])
jvm_opts = config_utils.adjust_opts(jvm_opts, {"algorithm": {"memory_adjust":
{"direction": "increase",
"maximum": "30000M",
"magnitude": dd.get_cores(data)}}})
jvm_opts += broad.get_default_jvm_opts(os.path.dirname(out_file))
return jvm_opts |
def rendered(self):
"""Return (generating first if needed) rendered template."""
if not self._rendered:
template_path = get_template_path(self.raw_template_path)
if template_path:
with open(template_path, 'r') as template:
if len(os.path.splitext(template_path)) == 2 and (
os.path.splitext(template_path)[1] == '.j2'):
self._rendered = Template(template.read()).render(
context=self.context,
mappings=self.mappings,
name=self.name,
variables=self.resolved_variables
)
else:
self._rendered = template.read()
else:
raise InvalidConfig(
'Could not find template %s' % self.raw_template_path
)
return self._rendered | Return (generating first if needed) rendered template. | Below is the the instruction that describes the task:
### Input:
Return (generating first if needed) rendered template.
### Response:
def rendered(self):
"""Return (generating first if needed) rendered template."""
if not self._rendered:
template_path = get_template_path(self.raw_template_path)
if template_path:
with open(template_path, 'r') as template:
if len(os.path.splitext(template_path)) == 2 and (
os.path.splitext(template_path)[1] == '.j2'):
self._rendered = Template(template.read()).render(
context=self.context,
mappings=self.mappings,
name=self.name,
variables=self.resolved_variables
)
else:
self._rendered = template.read()
else:
raise InvalidConfig(
'Could not find template %s' % self.raw_template_path
)
return self._rendered |
def get(self, sid):
"""
Constructs a MediaContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
return MediaContext(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=sid,
) | Constructs a MediaContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext | Below is the the instruction that describes the task:
### Input:
Constructs a MediaContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
### Response:
def get(self, sid):
"""
Constructs a MediaContext
:param sid: The unique string that identifies this resource
:returns: twilio.rest.api.v2010.account.message.media.MediaContext
:rtype: twilio.rest.api.v2010.account.message.media.MediaContext
"""
return MediaContext(
self._version,
account_sid=self._solution['account_sid'],
message_sid=self._solution['message_sid'],
sid=sid,
) |
def _setAxesNames(self, axisNames):
""" Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis'
"""
for col, _ in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, '')
self._axisNames = tuple(axisNames)
self._fullAxisNames = tuple([axName + self.AXIS_POST_FIX for axName in axisNames])
for col, label in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, label) | Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis' | Below is the the instruction that describes the task:
### Input:
Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis'
### Response:
def _setAxesNames(self, axisNames):
""" Sets the axesnames, combobox lables and updates the headers. Removes old values first.
The comboLables is the axes name + '-axis'
"""
for col, _ in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, '')
self._axisNames = tuple(axisNames)
self._fullAxisNames = tuple([axName + self.AXIS_POST_FIX for axName in axisNames])
for col, label in enumerate(self._fullAxisNames, self.COL_FIRST_COMBO):
self._setHeaderLabel(col, label) |
def delete_model(self, model):
"""Delete a record."""
try:
if model.json is None:
return True
record = Record(model.json, model=model)
record.delete()
db.session.commit()
except SQLAlchemyError as e:
if not self.handle_view_exception(e):
flash(_('Failed to delete record. %(error)s', error=str(e)),
category='error')
db.session.rollback()
return False
return True | Delete a record. | Below is the the instruction that describes the task:
### Input:
Delete a record.
### Response:
def delete_model(self, model):
"""Delete a record."""
try:
if model.json is None:
return True
record = Record(model.json, model=model)
record.delete()
db.session.commit()
except SQLAlchemyError as e:
if not self.handle_view_exception(e):
flash(_('Failed to delete record. %(error)s', error=str(e)),
category='error')
db.session.rollback()
return False
return True |
def get_data_info(self):
"""
imports er tables and places data into Data_info data structure
outlined bellow:
Data_info - {er_samples: {er_samples.txt info}
er_sites: {er_sites.txt info}
er_locations: {er_locations.txt info}
er_ages: {er_ages.txt info}}
"""
Data_info = {}
data_er_samples = {}
data_er_sites = {}
data_er_locations = {}
data_er_ages = {}
if self.data_model == 3.0:
print(("data model: %1.1f" % (self.data_model)))
Data_info["er_samples"] = []
Data_info["er_sites"] = []
Data_info["er_locations"] = []
Data_info["er_ages"] = []
# self.magic_file may have a full path, but this breaks cb.Contribution
# determine if magic_file exists in WD, and if it doesn't, copy it in
magic_file_real = os.path.realpath(self.magic_file)
magic_file_short = os.path.split(self.magic_file)[1]
WD_file_real = os.path.realpath(
os.path.join(self.WD, magic_file_short))
if magic_file_real == WD_file_real:
fnames = {'measurements': magic_file_short}
else:
# copy measurements file to WD, keeping original name
shutil.copy(magic_file_real, WD_file_real)
fnames = {'measurements': magic_file_short}
self.con = cb.Contribution(self.WD, custom_filenames=fnames, read_tables=[
'measurements', 'specimens', 'samples', 'sites', 'locations', 'criteria', 'ages'])
if 'specimens' in self.con.tables:
spec_container = self.con.tables['specimens']
self.spec_data = spec_container.df
else:
self.con.add_empty_magic_table('specimens')
self.spec_data = self.con.tables['specimens'].df
if 'samples' in self.con.tables:
samp_container = self.con.tables['samples']
samp_container.front_and_backfill(['azimuth', 'dip'])
self.samp_data = samp_container.df
samp_data2 = self.samp_data.rename(
columns=map_magic.samp_magic3_2_magic2_map)
data_er_samples = samp_data2.T.to_dict()
else:
self.con.add_empty_magic_table('samples')
self.samp_data = self.con.tables['samples'].df
if 'sites' in self.con.tables:
site_container = self.con.tables['sites']
self.site_data = site_container.df
if 'age' in self.site_data.columns:
self.site_data = self.site_data[self.site_data['age'].notnull(
)]
age_ids = [col for col in self.site_data.columns if col.startswith(
"age") or col == "site"]
age_data = self.site_data[age_ids].rename(
columns=map_magic.site_magic3_2_magic2_map)
# save this in 2.5 format
er_ages = age_data.to_dict('records')
data_er_ages = {}
for s in er_ages:
s = self.convert_ages_to_calendar_year(s)
data_er_ages[s['er_site_name']] = s
sites = self.site_data.rename(
columns=map_magic.site_magic3_2_magic2_map)
# pick out what is needed by thellier_gui and put in 2.5 format
er_sites = sites.to_dict('records')
data_er_sites = {}
for s in er_sites:
data_er_sites[s['er_site_name']] = s
else:
self.con.add_empty_magic_table('sites')
self.site_data = self.con.tables['sites'].df
if 'locations' in self.con.tables:
location_container = self.con.tables["locations"]
self.loc_data = location_container.df # only need this for saving tables
if self.loc_data['location'].isnull().any():
self.loc_data.replace(
{'location': {None: 'unknown'}}, inplace=True)
self.loc_data.set_index('location', inplace=True)
self.loc_data['location'] = self.loc_data.index
loc2_data = self.loc_data.rename(
columns=map_magic.loc_magic3_2_magic2_map)
data_er_locations = loc2_data.to_dict('index')
else:
self.con.add_empty_magic_table('locations')
self.loc_data = self.con.tables['locations'].df
else: # try 2.5 data model
print(("data model: %1.1f" % (self.data_model)))
self.read_magic_file(os.path.join(
self.WD, "er_samples.txt"), 'er_sample_name')
try:
data_er_samples = self.read_magic_file(
os.path.join(self.WD, "er_samples.txt"), 'er_sample_name')
except:
print("-W- Can't find er_sample.txt in project directory")
try:
data_er_sites = self.read_magic_file(
os.path.join(self.WD, "er_sites.txt"), 'er_site_name')
except:
print("-W- Can't find er_sites.txt in project directory")
try:
data_er_locations = self.read_magic_file(os.path.join(
self.WD, "er_locations.txt"), 'er_location_name')
except:
print("-W- Can't find er_locations.txt in project directory")
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_sample_name')
except:
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_site_name')
except:
print("-W- Can't find er_ages in project directory")
Data_info["er_samples"] = data_er_samples
Data_info["er_sites"] = data_er_sites
Data_info["er_locations"] = data_er_locations
Data_info["er_ages"] = data_er_ages
return(Data_info) | imports er tables and places data into Data_info data structure
outlined bellow:
Data_info - {er_samples: {er_samples.txt info}
er_sites: {er_sites.txt info}
er_locations: {er_locations.txt info}
er_ages: {er_ages.txt info}} | Below is the the instruction that describes the task:
### Input:
imports er tables and places data into Data_info data structure
outlined bellow:
Data_info - {er_samples: {er_samples.txt info}
er_sites: {er_sites.txt info}
er_locations: {er_locations.txt info}
er_ages: {er_ages.txt info}}
### Response:
def get_data_info(self):
"""
imports er tables and places data into Data_info data structure
outlined bellow:
Data_info - {er_samples: {er_samples.txt info}
er_sites: {er_sites.txt info}
er_locations: {er_locations.txt info}
er_ages: {er_ages.txt info}}
"""
Data_info = {}
data_er_samples = {}
data_er_sites = {}
data_er_locations = {}
data_er_ages = {}
if self.data_model == 3.0:
print(("data model: %1.1f" % (self.data_model)))
Data_info["er_samples"] = []
Data_info["er_sites"] = []
Data_info["er_locations"] = []
Data_info["er_ages"] = []
# self.magic_file may have a full path, but this breaks cb.Contribution
# determine if magic_file exists in WD, and if it doesn't, copy it in
magic_file_real = os.path.realpath(self.magic_file)
magic_file_short = os.path.split(self.magic_file)[1]
WD_file_real = os.path.realpath(
os.path.join(self.WD, magic_file_short))
if magic_file_real == WD_file_real:
fnames = {'measurements': magic_file_short}
else:
# copy measurements file to WD, keeping original name
shutil.copy(magic_file_real, WD_file_real)
fnames = {'measurements': magic_file_short}
self.con = cb.Contribution(self.WD, custom_filenames=fnames, read_tables=[
'measurements', 'specimens', 'samples', 'sites', 'locations', 'criteria', 'ages'])
if 'specimens' in self.con.tables:
spec_container = self.con.tables['specimens']
self.spec_data = spec_container.df
else:
self.con.add_empty_magic_table('specimens')
self.spec_data = self.con.tables['specimens'].df
if 'samples' in self.con.tables:
samp_container = self.con.tables['samples']
samp_container.front_and_backfill(['azimuth', 'dip'])
self.samp_data = samp_container.df
samp_data2 = self.samp_data.rename(
columns=map_magic.samp_magic3_2_magic2_map)
data_er_samples = samp_data2.T.to_dict()
else:
self.con.add_empty_magic_table('samples')
self.samp_data = self.con.tables['samples'].df
if 'sites' in self.con.tables:
site_container = self.con.tables['sites']
self.site_data = site_container.df
if 'age' in self.site_data.columns:
self.site_data = self.site_data[self.site_data['age'].notnull(
)]
age_ids = [col for col in self.site_data.columns if col.startswith(
"age") or col == "site"]
age_data = self.site_data[age_ids].rename(
columns=map_magic.site_magic3_2_magic2_map)
# save this in 2.5 format
er_ages = age_data.to_dict('records')
data_er_ages = {}
for s in er_ages:
s = self.convert_ages_to_calendar_year(s)
data_er_ages[s['er_site_name']] = s
sites = self.site_data.rename(
columns=map_magic.site_magic3_2_magic2_map)
# pick out what is needed by thellier_gui and put in 2.5 format
er_sites = sites.to_dict('records')
data_er_sites = {}
for s in er_sites:
data_er_sites[s['er_site_name']] = s
else:
self.con.add_empty_magic_table('sites')
self.site_data = self.con.tables['sites'].df
if 'locations' in self.con.tables:
location_container = self.con.tables["locations"]
self.loc_data = location_container.df # only need this for saving tables
if self.loc_data['location'].isnull().any():
self.loc_data.replace(
{'location': {None: 'unknown'}}, inplace=True)
self.loc_data.set_index('location', inplace=True)
self.loc_data['location'] = self.loc_data.index
loc2_data = self.loc_data.rename(
columns=map_magic.loc_magic3_2_magic2_map)
data_er_locations = loc2_data.to_dict('index')
else:
self.con.add_empty_magic_table('locations')
self.loc_data = self.con.tables['locations'].df
else: # try 2.5 data model
print(("data model: %1.1f" % (self.data_model)))
self.read_magic_file(os.path.join(
self.WD, "er_samples.txt"), 'er_sample_name')
try:
data_er_samples = self.read_magic_file(
os.path.join(self.WD, "er_samples.txt"), 'er_sample_name')
except:
print("-W- Can't find er_sample.txt in project directory")
try:
data_er_sites = self.read_magic_file(
os.path.join(self.WD, "er_sites.txt"), 'er_site_name')
except:
print("-W- Can't find er_sites.txt in project directory")
try:
data_er_locations = self.read_magic_file(os.path.join(
self.WD, "er_locations.txt"), 'er_location_name')
except:
print("-W- Can't find er_locations.txt in project directory")
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_sample_name')
except:
try:
data_er_ages = self.read_magic_file(
os.path.join(self.WD, "er_ages.txt"), 'er_site_name')
except:
print("-W- Can't find er_ages in project directory")
Data_info["er_samples"] = data_er_samples
Data_info["er_sites"] = data_er_sites
Data_info["er_locations"] = data_er_locations
Data_info["er_ages"] = data_er_ages
return(Data_info) |
def release():
"check release before upload to PyPI"
sh("paver bdist_wheel")
wheels = path("dist").files("*.whl")
if not wheels:
error("\n*** ERROR: No release wheel was built!")
sys.exit(1)
if any(".dev" in i for i in wheels):
error("\n*** ERROR: You're still using a 'dev' version!")
sys.exit(1)
# Check that source distribution can be built and is complete
print('')
print("~" * 78)
print("TESTING SOURCE BUILD")
sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/"
" && /usr/bin/python setup.py sdist >/dev/null"
" && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }"
" | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then"
" echo '^^^ Difference in file lists! ^^^'; false;"
" else true; fi; } 2>&1"
% tuple([project["name"], version] * 4)
)
path("dist/%s-%s" % (project["name"], version)).rmtree()
print("~" * 78)
print('')
print("Created", " ".join([str(i) for i in path("dist").listdir()]))
print("Use 'paver sdist bdist_wheel' to build the release and")
print(" 'twine upload dist/*.{zip,whl}' to upload to PyPI")
print("Use 'paver dist_docs' to prepare an API documentation upload") | check release before upload to PyPI | Below is the the instruction that describes the task:
### Input:
check release before upload to PyPI
### Response:
def release():
"check release before upload to PyPI"
sh("paver bdist_wheel")
wheels = path("dist").files("*.whl")
if not wheels:
error("\n*** ERROR: No release wheel was built!")
sys.exit(1)
if any(".dev" in i for i in wheels):
error("\n*** ERROR: You're still using a 'dev' version!")
sys.exit(1)
# Check that source distribution can be built and is complete
print('')
print("~" * 78)
print("TESTING SOURCE BUILD")
sh( "{ command cd dist/ && unzip -q %s-%s.zip && command cd %s-%s/"
" && /usr/bin/python setup.py sdist >/dev/null"
" && if { unzip -ql ../%s-%s.zip; unzip -ql dist/%s-%s.zip; }"
" | cut -b26- | sort | uniq -c| egrep -v '^ +2 +' ; then"
" echo '^^^ Difference in file lists! ^^^'; false;"
" else true; fi; } 2>&1"
% tuple([project["name"], version] * 4)
)
path("dist/%s-%s" % (project["name"], version)).rmtree()
print("~" * 78)
print('')
print("Created", " ".join([str(i) for i in path("dist").listdir()]))
print("Use 'paver sdist bdist_wheel' to build the release and")
print(" 'twine upload dist/*.{zip,whl}' to upload to PyPI")
print("Use 'paver dist_docs' to prepare an API documentation upload") |
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, **kargs):
""" Runs bottle as a web server. """
app = app if app else default_app()
quiet = bool(kargs.get('quiet', False))
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
if not quiet and isinstance(server, ServerAdapter): # pragma: no cover
if not reloader or os.environ.get('BOTTLE_CHILD') == 'true':
print("Bottle server starting up (using %s)..." % repr(server))
print("Listening on http://%s:%d/" % (server.host, server.port))
print("Use Ctrl-C to quit.")
print()
else:
print("Bottle auto reloader starting up...")
try:
if reloader and interval:
reloader_run(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt:
if not quiet: # pragma: no cover
print("Shutting Down...") | Runs bottle as a web server. | Below is the the instruction that describes the task:
### Input:
Runs bottle as a web server.
### Response:
def run(app=None, server=WSGIRefServer, host='127.0.0.1', port=8080,
interval=1, reloader=False, **kargs):
""" Runs bottle as a web server. """
app = app if app else default_app()
quiet = bool(kargs.get('quiet', False))
# Instantiate server, if it is a class instead of an instance
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise RuntimeError("Server must be a subclass of WSGIAdapter")
if not quiet and isinstance(server, ServerAdapter): # pragma: no cover
if not reloader or os.environ.get('BOTTLE_CHILD') == 'true':
print("Bottle server starting up (using %s)..." % repr(server))
print("Listening on http://%s:%d/" % (server.host, server.port))
print("Use Ctrl-C to quit.")
print()
else:
print("Bottle auto reloader starting up...")
try:
if reloader and interval:
reloader_run(server, app, interval)
else:
server.run(app)
except KeyboardInterrupt:
if not quiet: # pragma: no cover
print("Shutting Down...") |
def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment) | An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist. | Below is the the instruction that describes the task:
### Input:
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
### Response:
def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment) |
def _dumps(self, obj):
""" If :prop:serialized is True, @obj will be serialized
using :prop:serializer
"""
if not self.serialized:
return obj
return self.serializer.dumps(obj) | If :prop:serialized is True, @obj will be serialized
using :prop:serializer | Below is the the instruction that describes the task:
### Input:
If :prop:serialized is True, @obj will be serialized
using :prop:serializer
### Response:
def _dumps(self, obj):
""" If :prop:serialized is True, @obj will be serialized
using :prop:serializer
"""
if not self.serialized:
return obj
return self.serializer.dumps(obj) |
def gridOn(self): # noqa: N802
"""Control whether the gridline is drawn for this tick."""
return (self._gridOn and (self._has_default_loc()
or transforms.interval_contains(self.get_view_interval(), self.get_loc()))) | Control whether the gridline is drawn for this tick. | Below is the the instruction that describes the task:
### Input:
Control whether the gridline is drawn for this tick.
### Response:
def gridOn(self): # noqa: N802
"""Control whether the gridline is drawn for this tick."""
return (self._gridOn and (self._has_default_loc()
or transforms.interval_contains(self.get_view_interval(), self.get_loc()))) |
def decode(self, bytes, index=None, raw=False):
"""decode(bytes[[, index], raw=False]) -> value1, ..., valueN
Decodes the given sequence of bytes according to this Array's
element type.
If the optional `index` parameter is an integer or slice, then
only the element(s) at the specified position(s) will be
decoded and returned.
"""
if index is None:
index = slice(0, self.nelems)
if type(index) is slice:
step = 1 if index.step is None else index.step
indices = xrange(index.start, index.stop, step)
result = [ self.decodeElem(bytes, n, raw) for n in indices ]
else:
result = self.decodeElem(bytes, index, raw)
return result | decode(bytes[[, index], raw=False]) -> value1, ..., valueN
Decodes the given sequence of bytes according to this Array's
element type.
If the optional `index` parameter is an integer or slice, then
only the element(s) at the specified position(s) will be
decoded and returned. | Below is the the instruction that describes the task:
### Input:
decode(bytes[[, index], raw=False]) -> value1, ..., valueN
Decodes the given sequence of bytes according to this Array's
element type.
If the optional `index` parameter is an integer or slice, then
only the element(s) at the specified position(s) will be
decoded and returned.
### Response:
def decode(self, bytes, index=None, raw=False):
"""decode(bytes[[, index], raw=False]) -> value1, ..., valueN
Decodes the given sequence of bytes according to this Array's
element type.
If the optional `index` parameter is an integer or slice, then
only the element(s) at the specified position(s) will be
decoded and returned.
"""
if index is None:
index = slice(0, self.nelems)
if type(index) is slice:
step = 1 if index.step is None else index.step
indices = xrange(index.start, index.stop, step)
result = [ self.decodeElem(bytes, n, raw) for n in indices ]
else:
result = self.decodeElem(bytes, index, raw)
return result |
def ask_bool(question: str, default: bool = True) -> bool:
"""Asks a question yes no style"""
default_q = "Y/n" if default else "y/N"
answer = input("{0} [{1}]: ".format(question, default_q))
lower = answer.lower()
if not lower:
return default
return lower == "y" | Asks a question yes no style | Below is the the instruction that describes the task:
### Input:
Asks a question yes no style
### Response:
def ask_bool(question: str, default: bool = True) -> bool:
"""Asks a question yes no style"""
default_q = "Y/n" if default else "y/N"
answer = input("{0} [{1}]: ".format(question, default_q))
lower = answer.lower()
if not lower:
return default
return lower == "y" |
def c(*args):
r"""Imitates the ``c`` function from R.
Since this whole library is aimed at re-creating in
Python what R has already done so well, the ``c`` function was created to
wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R,
this works with scalars, iterables, and any mix therein.
Note that using the ``c`` function on multi-nested lists or iterables
will fail!
Examples
--------
Using ``c`` with varargs will yield a single array:
>>> c(1, 2, 3, 4)
array([1, 2, 3, 4])
Using ``c`` with nested lists and scalars will also yield a single array:
>>> c([1, 2], 4, c(5, 4))
array([1, 2, 4, 5, 4])
However, using ``c`` with multi-level lists will fail!
>>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP
ValueError: all the input arrays must have same number of dimensions
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html
"""
# R returns NULL for this
if not args:
return None
# just an array of len 1
if len(args) == 1:
element = args[0]
# if it's iterable, make it an array
if is_iterable(element):
return np.asarray(element)
# otherwise it's not iterable, put it in an array
return np.asarray([element])
# np.concat all. This can be slow, as noted by numerous threads on
# numpy concat efficiency, however an alternative using recursive
# yields was tested and performed far worse:
#
# >>> def timeit(func, ntimes, *args):
# ... times = []
# ... for i in range(ntimes):
# ... start = time.time()
# ... func(*args)
# ... times.append(time.time() - start)
# ... arr = np.asarray(times)
# ... print("%s (%i times) - Mean: %.5f sec, "
# ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes,
# ... arr.mean(), arr.min(),
# ... arr.max()))
# >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)]
# >>> timeit(c1, 100, *y)
# c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065
# >>> timeit(c2, 100, *y)
# c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115
#
# So we stick with c1, which is this variant.
return np.concatenate([a if is_iterable(a) else [a] for a in args]) | r"""Imitates the ``c`` function from R.
Since this whole library is aimed at re-creating in
Python what R has already done so well, the ``c`` function was created to
wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R,
this works with scalars, iterables, and any mix therein.
Note that using the ``c`` function on multi-nested lists or iterables
will fail!
Examples
--------
Using ``c`` with varargs will yield a single array:
>>> c(1, 2, 3, 4)
array([1, 2, 3, 4])
Using ``c`` with nested lists and scalars will also yield a single array:
>>> c([1, 2], 4, c(5, 4))
array([1, 2, 4, 5, 4])
However, using ``c`` with multi-level lists will fail!
>>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP
ValueError: all the input arrays must have same number of dimensions
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html | Below is the the instruction that describes the task:
### Input:
r"""Imitates the ``c`` function from R.
Since this whole library is aimed at re-creating in
Python what R has already done so well, the ``c`` function was created to
wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R,
this works with scalars, iterables, and any mix therein.
Note that using the ``c`` function on multi-nested lists or iterables
will fail!
Examples
--------
Using ``c`` with varargs will yield a single array:
>>> c(1, 2, 3, 4)
array([1, 2, 3, 4])
Using ``c`` with nested lists and scalars will also yield a single array:
>>> c([1, 2], 4, c(5, 4))
array([1, 2, 4, 5, 4])
However, using ``c`` with multi-level lists will fail!
>>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP
ValueError: all the input arrays must have same number of dimensions
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html
### Response:
def c(*args):
r"""Imitates the ``c`` function from R.
Since this whole library is aimed at re-creating in
Python what R has already done so well, the ``c`` function was created to
wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R,
this works with scalars, iterables, and any mix therein.
Note that using the ``c`` function on multi-nested lists or iterables
will fail!
Examples
--------
Using ``c`` with varargs will yield a single array:
>>> c(1, 2, 3, 4)
array([1, 2, 3, 4])
Using ``c`` with nested lists and scalars will also yield a single array:
>>> c([1, 2], 4, c(5, 4))
array([1, 2, 4, 5, 4])
However, using ``c`` with multi-level lists will fail!
>>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP
ValueError: all the input arrays must have same number of dimensions
References
----------
.. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html
"""
# R returns NULL for this
if not args:
return None
# just an array of len 1
if len(args) == 1:
element = args[0]
# if it's iterable, make it an array
if is_iterable(element):
return np.asarray(element)
# otherwise it's not iterable, put it in an array
return np.asarray([element])
# np.concat all. This can be slow, as noted by numerous threads on
# numpy concat efficiency, however an alternative using recursive
# yields was tested and performed far worse:
#
# >>> def timeit(func, ntimes, *args):
# ... times = []
# ... for i in range(ntimes):
# ... start = time.time()
# ... func(*args)
# ... times.append(time.time() - start)
# ... arr = np.asarray(times)
# ... print("%s (%i times) - Mean: %.5f sec, "
# ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes,
# ... arr.mean(), arr.min(),
# ... arr.max()))
# >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)]
# >>> timeit(c1, 100, *y)
# c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065
# >>> timeit(c2, 100, *y)
# c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115
#
# So we stick with c1, which is this variant.
return np.concatenate([a if is_iterable(a) else [a] for a in args]) |
def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
"""Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg | Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
### Response:
def get_links_for_a_scheduler(self, pollers, reactionners, brokers):
"""Get a configuration dictionary with pollers, reactionners and brokers links
for a scheduler
:return: dict containing pollers, reactionners and brokers links (key is satellite id)
:rtype: dict
"""
# Create void satellite links
cfg = {
'pollers': {},
'reactionners': {},
'brokers': {},
}
# Our self.daemons are only identifiers... that we use to fill the satellite links
try:
for poller in self.pollers + self.get_potential_satellites_by_type(pollers, "poller"):
if poller in pollers:
poller = pollers[poller]
cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
for reactionner in self.reactionners + self.get_potential_satellites_by_type(
reactionners, "reactionner"):
if reactionner in reactionners:
reactionner = reactionners[reactionner]
cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
for broker in self.brokers + self.get_potential_satellites_by_type(brokers, "broker"):
if broker in brokers:
broker = brokers[broker]
cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
except Exception as exp: # pylint: disable=broad-except
logger.exception("realm.get_links_for_a_scheduler: %s", exp)
# for poller in self.get_potential_satellites_by_type(pollers, "poller"):
# logger.info("Poller: %s", poller)
# cfg['pollers'][poller.uuid] = poller.give_satellite_cfg()
#
# for reactionner in self.get_potential_satellites_by_type(reactionners, "reactionner"):
# cfg['reactionners'][reactionner.uuid] = reactionner.give_satellite_cfg()
#
# for broker in self.get_potential_satellites_by_type(brokers, "broker"):
# cfg['brokers'][broker.uuid] = broker.give_satellite_cfg()
return cfg |
def _listdir(self, path):
"""Return the list of files in a directory, assuming that our user can read it."""
if self._user is None:
return os.listdir(path)
else:
args = self._build_cmdline(['/bin/ls', '-1A', path])
return subprocess.check_output(args, stderr=DEVNULL).decode('utf-8', errors='ignore').split('\n') | Return the list of files in a directory, assuming that our user can read it. | Below is the the instruction that describes the task:
### Input:
Return the list of files in a directory, assuming that our user can read it.
### Response:
def _listdir(self, path):
"""Return the list of files in a directory, assuming that our user can read it."""
if self._user is None:
return os.listdir(path)
else:
args = self._build_cmdline(['/bin/ls', '-1A', path])
return subprocess.check_output(args, stderr=DEVNULL).decode('utf-8', errors='ignore').split('\n') |
def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs): # pragma: no cover
"""
Wraps a target with queues replacing stdout and stderr
"""
import sys
sys.stdout = IOQueue(q_stdout)
sys.stderr = IOQueue(q_stderr)
try:
target(*args, **kwargs)
except:
if not robust:
s = 'Error in tab\n' + traceback.format_exc()
logger = daiquiri.getLogger(name)
logger.error(s)
else:
raise
if not robust:
q_error.put(name)
raise | Wraps a target with queues replacing stdout and stderr | Below is the the instruction that describes the task:
### Input:
Wraps a target with queues replacing stdout and stderr
### Response:
def wrapped_target(target, q_stdout, q_stderr, q_error, robust, name, *args, **kwargs): # pragma: no cover
"""
Wraps a target with queues replacing stdout and stderr
"""
import sys
sys.stdout = IOQueue(q_stdout)
sys.stderr = IOQueue(q_stderr)
try:
target(*args, **kwargs)
except:
if not robust:
s = 'Error in tab\n' + traceback.format_exc()
logger = daiquiri.getLogger(name)
logger.error(s)
else:
raise
if not robust:
q_error.put(name)
raise |
def group_dict_by_value(d: dict) -> dict:
"""
Group a dictionary by values.
Parameters
----------
d : dict
Input dictionary
Returns
-------
dict
Output dictionary. The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value.
>>> group_dict_by_value({2: 3, 1: 2, 3: 1})
{3: [2], 2: [1], 1: [3]}
>>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3})
{3: [2, 12], 2: [1], 1: [3, 10]}
"""
d_out = {}
for k, v in d.items():
if v in d_out:
d_out[v].append(k)
else:
d_out[v] = [k]
return d_out | Group a dictionary by values.
Parameters
----------
d : dict
Input dictionary
Returns
-------
dict
Output dictionary. The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value.
>>> group_dict_by_value({2: 3, 1: 2, 3: 1})
{3: [2], 2: [1], 1: [3]}
>>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3})
{3: [2, 12], 2: [1], 1: [3, 10]} | Below is the the instruction that describes the task:
### Input:
Group a dictionary by values.
Parameters
----------
d : dict
Input dictionary
Returns
-------
dict
Output dictionary. The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value.
>>> group_dict_by_value({2: 3, 1: 2, 3: 1})
{3: [2], 2: [1], 1: [3]}
>>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3})
{3: [2, 12], 2: [1], 1: [3, 10]}
### Response:
def group_dict_by_value(d: dict) -> dict:
"""
Group a dictionary by values.
Parameters
----------
d : dict
Input dictionary
Returns
-------
dict
Output dictionary. The keys are the values of the initial dictionary
and the values ae given by a list of keys corresponding to the value.
>>> group_dict_by_value({2: 3, 1: 2, 3: 1})
{3: [2], 2: [1], 1: [3]}
>>> group_dict_by_value({2: 3, 1: 2, 3: 1, 10:1, 12: 3})
{3: [2, 12], 2: [1], 1: [3, 10]}
"""
d_out = {}
for k, v in d.items():
if v in d_out:
d_out[v].append(k)
else:
d_out[v] = [k]
return d_out |
def get_prices(self, date: str, currency: str) -> List[PriceModel]:
""" Fetches all the prices for the given arguments """
from .repositories import PriceRepository
session = self.session
repo = PriceRepository(session)
query = repo.query
if date:
query = query.filter(dal.Price.date == date)
if currency:
query = query.filter(dal.Price.currency == currency)
# Sort by symbol.
query = query.order_by(dal.Price.namespace, dal.Price.symbol)
price_entities = query.all()
mapper = mappers.PriceMapper()
result = []
for entity in price_entities:
model = mapper.map_entity(entity)
result.append(model)
return result | Fetches all the prices for the given arguments | Below is the the instruction that describes the task:
### Input:
Fetches all the prices for the given arguments
### Response:
def get_prices(self, date: str, currency: str) -> List[PriceModel]:
""" Fetches all the prices for the given arguments """
from .repositories import PriceRepository
session = self.session
repo = PriceRepository(session)
query = repo.query
if date:
query = query.filter(dal.Price.date == date)
if currency:
query = query.filter(dal.Price.currency == currency)
# Sort by symbol.
query = query.order_by(dal.Price.namespace, dal.Price.symbol)
price_entities = query.all()
mapper = mappers.PriceMapper()
result = []
for entity in price_entities:
model = mapper.map_entity(entity)
result.append(model)
return result |
def frequency_measurement(shell_ctx):
""" measures how many times a user has used this program in the last calendar week """
freq = update_frequency(shell_ctx)
count = 0
base = datetime.datetime.utcnow()
date_list = [base - datetime.timedelta(days=x) for x in range(0, DAYS_AGO)]
for day in date_list:
count += 1 if freq.get(day_format(day), 0) > 0 else 0
return count | measures how many times a user has used this program in the last calendar week | Below is the the instruction that describes the task:
### Input:
measures how many times a user has used this program in the last calendar week
### Response:
def frequency_measurement(shell_ctx):
""" measures how many times a user has used this program in the last calendar week """
freq = update_frequency(shell_ctx)
count = 0
base = datetime.datetime.utcnow()
date_list = [base - datetime.timedelta(days=x) for x in range(0, DAYS_AGO)]
for day in date_list:
count += 1 if freq.get(day_format(day), 0) > 0 else 0
return count |
def set_highlighter(self, highlighter):
"""
Sets given highlighter as the current document highlighter.
:param highlighter: Highlighter.
:type highlighter: QSyntaxHighlighter
:return: Method success.
:rtype: bool
"""
if not issubclass(highlighter.__class__, QSyntaxHighlighter):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' is not a 'QSyntaxHighlighter' subclass!".format(
self.__class__.__name__, highlighter))
if self.__highlighter:
self.remove_highlighter()
LOGGER.debug("> Setting '{0}' highlighter.".format(highlighter))
self.__highlighter = highlighter
return True | Sets given highlighter as the current document highlighter.
:param highlighter: Highlighter.
:type highlighter: QSyntaxHighlighter
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Sets given highlighter as the current document highlighter.
:param highlighter: Highlighter.
:type highlighter: QSyntaxHighlighter
:return: Method success.
:rtype: bool
### Response:
def set_highlighter(self, highlighter):
"""
Sets given highlighter as the current document highlighter.
:param highlighter: Highlighter.
:type highlighter: QSyntaxHighlighter
:return: Method success.
:rtype: bool
"""
if not issubclass(highlighter.__class__, QSyntaxHighlighter):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' is not a 'QSyntaxHighlighter' subclass!".format(
self.__class__.__name__, highlighter))
if self.__highlighter:
self.remove_highlighter()
LOGGER.debug("> Setting '{0}' highlighter.".format(highlighter))
self.__highlighter = highlighter
return True |
def loadlabelfont(self):
"""Auxiliary method to load font if not yet done."""
if self.labelfont == None:
self.labelfont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) | Auxiliary method to load font if not yet done. | Below is the the instruction that describes the task:
### Input:
Auxiliary method to load font if not yet done.
### Response:
def loadlabelfont(self):
"""Auxiliary method to load font if not yet done."""
if self.labelfont == None:
self.labelfont = imft.load_path(os.path.join(fontsdir, "courR10.pil")) |
def has_textonly_pdf():
"""Does Tesseract have textonly_pdf capability?
Available in v4.00.00alpha since January 2017. Best to
parse the parameter list
"""
args_tess = ['tesseract', '--print-parameters', 'pdf']
params = ''
try:
params = check_output(args_tess, universal_newlines=True, stderr=STDOUT)
except CalledProcessError as e:
print("Could not --print-parameters from tesseract", file=sys.stderr)
raise MissingDependencyError from e
if 'textonly_pdf' in params:
return True
return False | Does Tesseract have textonly_pdf capability?
Available in v4.00.00alpha since January 2017. Best to
parse the parameter list | Below is the the instruction that describes the task:
### Input:
Does Tesseract have textonly_pdf capability?
Available in v4.00.00alpha since January 2017. Best to
parse the parameter list
### Response:
def has_textonly_pdf():
"""Does Tesseract have textonly_pdf capability?
Available in v4.00.00alpha since January 2017. Best to
parse the parameter list
"""
args_tess = ['tesseract', '--print-parameters', 'pdf']
params = ''
try:
params = check_output(args_tess, universal_newlines=True, stderr=STDOUT)
except CalledProcessError as e:
print("Could not --print-parameters from tesseract", file=sys.stderr)
raise MissingDependencyError from e
if 'textonly_pdf' in params:
return True
return False |
def fasta_motif_scan( fasta_fname, input_tuples, regex_ready=False, allow_overlaps=True, file_buffer=False, molecule='dna' ):
"""
fasta_fname = string path to FASTA file
input_tuples = tuple containing (1) motif sequence, (2) contig name, (3) start position*, (4) end position, (5) strand to search
*start is expected to be 0-base, end is expected to be 1-base
"""
###################
# validity checks #
###################
if not isinstance( fasta_fname, six.string_types ):
raise TypeError( "In fasta_motif_scan, fasta_fname must be a string!" )
elif not isinstance( molecule, six.string_types ):
raise TypeError( "In fasta_motif_scan, molecule must be a string!" )
elif isinstance( input_tuples, six.string_types ) or not isinstance( input_tuples, tuple ):
raise( TypeError( "In fasta_motif_scan, input_tuples should be a tuple!" ) )
elif not type( regex_ready ) is bool:
raise( TypeError( "In fasta_motif_scan, regex_ready should be a bool!" ) )
elif not type( file_buffer ) is bool:
raise( TypeError( "In fasta_motif_scan, file_buffer should be a bool!" ) )
#######################################
# deal with temporary files if needed #
#######################################
if file_buffer == True:
TMPFILE = tempfile.NamedTemporaryFile( delete = False )
return_name = TMPFILE.name
#########################
# setup some local vars #
#########################
motif_seq, contig, start, end, strand = input_tuples
if regex_ready == False:
if strand == '+':
regex_compiled = regex.compile( make_degenerate_regex( motif_seq ) )
elif strand == '-':
regex_compiled = regex.compile( make_degenerate_regex( rev_comp( motif_seq, molecule ) ) )
else:
if strand == '+':
regex_compiled = regex.compile( motif_seq )
elif strand == '-':
regex_compiled = regex.compile( rev_comp( motif_seq, molecule ) )
#######
# run #
#######
site_count = 0
if file_buffer == True:
site_list = None
else:
site_list = []
with pyfaidx.Fasta( fasta_fname, as_raw=True ) as FAIDX:
sequence = str( FAIDX[contig][start:end] ).upper()
for m in regex_compiled.finditer( sequence, overlapped=allow_overlaps ):
# self, motif, contig, positionStart, strand, regexMatch, molecule='dna'
tmp = SequenceMotif( motif_seq, contig, start, strand, m, molecule )
site_count += 1
if file_buffer == True:
TMPFILE.write( "%s\n" % ( tmp ) )
else:
site_list.append( tmp )
if file_buffer == True:
return( input_tuples, None, return_name, site_count )
else:
return ( input_tuples, site_list, None, site_count ) | fasta_fname = string path to FASTA file
input_tuples = tuple containing (1) motif sequence, (2) contig name, (3) start position*, (4) end position, (5) strand to search
*start is expected to be 0-base, end is expected to be 1-base | Below is the the instruction that describes the task:
### Input:
fasta_fname = string path to FASTA file
input_tuples = tuple containing (1) motif sequence, (2) contig name, (3) start position*, (4) end position, (5) strand to search
*start is expected to be 0-base, end is expected to be 1-base
### Response:
def fasta_motif_scan( fasta_fname, input_tuples, regex_ready=False, allow_overlaps=True, file_buffer=False, molecule='dna' ):
"""
fasta_fname = string path to FASTA file
input_tuples = tuple containing (1) motif sequence, (2) contig name, (3) start position*, (4) end position, (5) strand to search
*start is expected to be 0-base, end is expected to be 1-base
"""
###################
# validity checks #
###################
if not isinstance( fasta_fname, six.string_types ):
raise TypeError( "In fasta_motif_scan, fasta_fname must be a string!" )
elif not isinstance( molecule, six.string_types ):
raise TypeError( "In fasta_motif_scan, molecule must be a string!" )
elif isinstance( input_tuples, six.string_types ) or not isinstance( input_tuples, tuple ):
raise( TypeError( "In fasta_motif_scan, input_tuples should be a tuple!" ) )
elif not type( regex_ready ) is bool:
raise( TypeError( "In fasta_motif_scan, regex_ready should be a bool!" ) )
elif not type( file_buffer ) is bool:
raise( TypeError( "In fasta_motif_scan, file_buffer should be a bool!" ) )
#######################################
# deal with temporary files if needed #
#######################################
if file_buffer == True:
TMPFILE = tempfile.NamedTemporaryFile( delete = False )
return_name = TMPFILE.name
#########################
# setup some local vars #
#########################
motif_seq, contig, start, end, strand = input_tuples
if regex_ready == False:
if strand == '+':
regex_compiled = regex.compile( make_degenerate_regex( motif_seq ) )
elif strand == '-':
regex_compiled = regex.compile( make_degenerate_regex( rev_comp( motif_seq, molecule ) ) )
else:
if strand == '+':
regex_compiled = regex.compile( motif_seq )
elif strand == '-':
regex_compiled = regex.compile( rev_comp( motif_seq, molecule ) )
#######
# run #
#######
site_count = 0
if file_buffer == True:
site_list = None
else:
site_list = []
with pyfaidx.Fasta( fasta_fname, as_raw=True ) as FAIDX:
sequence = str( FAIDX[contig][start:end] ).upper()
for m in regex_compiled.finditer( sequence, overlapped=allow_overlaps ):
# self, motif, contig, positionStart, strand, regexMatch, molecule='dna'
tmp = SequenceMotif( motif_seq, contig, start, strand, m, molecule )
site_count += 1
if file_buffer == True:
TMPFILE.write( "%s\n" % ( tmp ) )
else:
site_list.append( tmp )
if file_buffer == True:
return( input_tuples, None, return_name, site_count )
else:
return ( input_tuples, site_list, None, site_count ) |
def principal_direction_extents(neurites, neurite_type=NeuriteType.all, direction=0):
'''Principal direction extent of neurites in neurons'''
def _pde(neurite):
'''Get the PDE of a single neurite'''
# Get the X, Y,Z coordinates of the points in each section
points = neurite.points[:, :3]
return morphmath.principal_direction_extent(points)[direction]
return map(_pde, iter_neurites(neurites, filt=is_type(neurite_type))) | Principal direction extent of neurites in neurons | Below is the the instruction that describes the task:
### Input:
Principal direction extent of neurites in neurons
### Response:
def principal_direction_extents(neurites, neurite_type=NeuriteType.all, direction=0):
'''Principal direction extent of neurites in neurons'''
def _pde(neurite):
'''Get the PDE of a single neurite'''
# Get the X, Y,Z coordinates of the points in each section
points = neurite.points[:, :3]
return morphmath.principal_direction_extent(points)[direction]
return map(_pde, iter_neurites(neurites, filt=is_type(neurite_type))) |
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = request.build_absolute_uri(callback)
args = {
'client_id': self.provider.consumer_key,
'redirect_uri': callback,
'response_type': 'code',
}
state = self.get_application_state(request, callback)
if state is not None:
args['state'] = state
request.session[self.session_key] = state
return args | Get request parameters for redirect url. | Below is the the instruction that describes the task:
### Input:
Get request parameters for redirect url.
### Response:
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = request.build_absolute_uri(callback)
args = {
'client_id': self.provider.consumer_key,
'redirect_uri': callback,
'response_type': 'code',
}
state = self.get_application_state(request, callback)
if state is not None:
args['state'] = state
request.session[self.session_key] = state
return args |
def is_installed(gemname, version=None):
"""Check if a gem is installed."""
cmdline = ['gem', 'list', '-i', gemname]
if version:
cmdline.extend(['-v', version])
try:
subprocess.check_output(cmdline, shell=False)
return True
except (OSError, subprocess.CalledProcessError) as err:
if err.returncode == 1:
return False
else:
raise error.ButcherError(
'Failure running gem. Error was: %s. Output: %s', err,
err.output) | Check if a gem is installed. | Below is the the instruction that describes the task:
### Input:
Check if a gem is installed.
### Response:
def is_installed(gemname, version=None):
"""Check if a gem is installed."""
cmdline = ['gem', 'list', '-i', gemname]
if version:
cmdline.extend(['-v', version])
try:
subprocess.check_output(cmdline, shell=False)
return True
except (OSError, subprocess.CalledProcessError) as err:
if err.returncode == 1:
return False
else:
raise error.ButcherError(
'Failure running gem. Error was: %s. Output: %s', err,
err.output) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.