code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def create_environment(preamble):
"""
Create a dictionary of variables obtained from the preamble of
the task file and the environment the program is running on.
"""
environment = copy.deepcopy(os.environ)
for line in preamble:
logging.debug(line)
if "=" in line and not line.startswith("#"):
tmp = line.split("=")
key = tmp[0].strip()
value = tmp[1].strip()
logging.debug(
"Found variable {} with value {}".format(key, value))
environment.update({key: value})
logging.debug("Env {}".format(environment))
return environment | Create a dictionary of variables obtained from the preamble of
the task file and the environment the program is running on. | Below is the the instruction that describes the task:
### Input:
Create a dictionary of variables obtained from the preamble of
the task file and the environment the program is running on.
### Response:
def create_environment(preamble):
"""
Create a dictionary of variables obtained from the preamble of
the task file and the environment the program is running on.
"""
environment = copy.deepcopy(os.environ)
for line in preamble:
logging.debug(line)
if "=" in line and not line.startswith("#"):
tmp = line.split("=")
key = tmp[0].strip()
value = tmp[1].strip()
logging.debug(
"Found variable {} with value {}".format(key, value))
environment.update({key: value})
logging.debug("Env {}".format(environment))
return environment |
def contains_index(self):
"""Returns the *line number* that has the CONTAINS keyword separating the
member and type definitions from the subroutines and functions."""
if self._contains_index is None:
max_t = 0
for tkey in self.types:
if self.types[tkey].end > max_t and not self.types[tkey].embedded:
max_t = self.types[tkey].end
#Now we have a good first guess. Continue to iterate the next few lines
#of the the refstring until we find a solid "CONTAINS" keyword. If there
#are no types in the module, then max_t will be zero and we don't have
#the danger of running into a contains keyword as part of a type. In that
#case we can just keep going until we find it.
i = 0
start = self.linenum(max_t)[0]
max_i = 10 if max_t > 0 else len(self._lines)
while (self._contains_index is None and i < max_i
and start + i < len(self._lines)):
iline = self._lines[start + i].lower()
if "contains" in iline:
if '!' not in iline or (iline.index('!') > iline.index("contains")):
self._contains_index = start + i
i += 1
if self._contains_index is None:
#There must not be a CONTAINS keyword in the module
self._contains_index = len(self._lines)-1
return self._contains_index | Returns the *line number* that has the CONTAINS keyword separating the
member and type definitions from the subroutines and functions. | Below is the the instruction that describes the task:
### Input:
Returns the *line number* that has the CONTAINS keyword separating the
member and type definitions from the subroutines and functions.
### Response:
def contains_index(self):
"""Returns the *line number* that has the CONTAINS keyword separating the
member and type definitions from the subroutines and functions."""
if self._contains_index is None:
max_t = 0
for tkey in self.types:
if self.types[tkey].end > max_t and not self.types[tkey].embedded:
max_t = self.types[tkey].end
#Now we have a good first guess. Continue to iterate the next few lines
#of the the refstring until we find a solid "CONTAINS" keyword. If there
#are no types in the module, then max_t will be zero and we don't have
#the danger of running into a contains keyword as part of a type. In that
#case we can just keep going until we find it.
i = 0
start = self.linenum(max_t)[0]
max_i = 10 if max_t > 0 else len(self._lines)
while (self._contains_index is None and i < max_i
and start + i < len(self._lines)):
iline = self._lines[start + i].lower()
if "contains" in iline:
if '!' not in iline or (iline.index('!') > iline.index("contains")):
self._contains_index = start + i
i += 1
if self._contains_index is None:
#There must not be a CONTAINS keyword in the module
self._contains_index = len(self._lines)-1
return self._contains_index |
def options(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to OPTIONS"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='OPTIONS', **overrides) | Sets the acceptable HTTP method to OPTIONS | Below is the the instruction that describes the task:
### Input:
Sets the acceptable HTTP method to OPTIONS
### Response:
def options(self, urls=None, **overrides):
"""Sets the acceptable HTTP method to OPTIONS"""
if urls is not None:
overrides['urls'] = urls
return self.where(accept='OPTIONS', **overrides) |
def create_consumer_group(self, project, logstore, consumer_group, timeout, in_order=False):
""" create consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: time-out
:type in_order: bool
:param in_order: if consume in order, default is False
:return: CreateConsumerGroupResponse
"""
request = CreateConsumerGroupRequest(project, logstore, ConsumerGroupEntity(consumer_group, timeout, in_order))
consumer_group = request.consumer_group
body_str = consumer_group.to_request_json()
headers = {
"x-log-bodyrawsize": '0',
"Content-Type": "application/json"
}
params = {}
project = request.get_project()
resource = "/logstores/" + request.get_logstore() + "/consumergroups"
(resp, header) = self._send("POST", project, body_str, resource, params, headers)
return CreateConsumerGroupResponse(header, resp) | create consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: time-out
:type in_order: bool
:param in_order: if consume in order, default is False
:return: CreateConsumerGroupResponse | Below is the the instruction that describes the task:
### Input:
create consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: time-out
:type in_order: bool
:param in_order: if consume in order, default is False
:return: CreateConsumerGroupResponse
### Response:
def create_consumer_group(self, project, logstore, consumer_group, timeout, in_order=False):
""" create consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:type timeout: int
:param timeout: time-out
:type in_order: bool
:param in_order: if consume in order, default is False
:return: CreateConsumerGroupResponse
"""
request = CreateConsumerGroupRequest(project, logstore, ConsumerGroupEntity(consumer_group, timeout, in_order))
consumer_group = request.consumer_group
body_str = consumer_group.to_request_json()
headers = {
"x-log-bodyrawsize": '0',
"Content-Type": "application/json"
}
params = {}
project = request.get_project()
resource = "/logstores/" + request.get_logstore() + "/consumergroups"
(resp, header) = self._send("POST", project, body_str, resource, params, headers)
return CreateConsumerGroupResponse(header, resp) |
def get_example():
"""Make an example for training and testing. Outputs a tuple
(label, features) where label is +1 if capital letters are the majority,
and -1 otherwise; and features is a list of letters.
"""
features = random.sample(string.ascii_letters, NUM_SAMPLES)
num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ])
num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ])
if num_capitalized > num_lowercase:
label = 1
else:
label = -1
return (label, features) | Make an example for training and testing. Outputs a tuple
(label, features) where label is +1 if capital letters are the majority,
and -1 otherwise; and features is a list of letters. | Below is the the instruction that describes the task:
### Input:
Make an example for training and testing. Outputs a tuple
(label, features) where label is +1 if capital letters are the majority,
and -1 otherwise; and features is a list of letters.
### Response:
def get_example():
"""Make an example for training and testing. Outputs a tuple
(label, features) where label is +1 if capital letters are the majority,
and -1 otherwise; and features is a list of letters.
"""
features = random.sample(string.ascii_letters, NUM_SAMPLES)
num_capitalized = len([ letter for letter in features if letter in string.ascii_uppercase ])
num_lowercase = len([ letter for letter in features if letter in string.ascii_lowercase ])
if num_capitalized > num_lowercase:
label = 1
else:
label = -1
return (label, features) |
def setparents(self):
"""Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`"""
for c in self:
if isinstance(c, AbstractElement):
c.parent = self
c.setparents() | Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy` | Below is the the instruction that describes the task:
### Input:
Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`
### Response:
def setparents(self):
"""Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`"""
for c in self:
if isinstance(c, AbstractElement):
c.parent = self
c.setparents() |
def receive_one(self):
"""return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running.
"""
if not self.runid_pkgidx_map:
return None
while True:
if not self.runid_to_return:
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_next_finished_pkgidx_result_pair()
if ret is not None:
break
if self.runid_pkgidx_map:
time.sleep(self.sleep)
return ret | return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running. | Below is the the instruction that describes the task:
### Input:
return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running.
### Response:
def receive_one(self):
"""return a pair of a package index and result of a task
This method waits until a tasks finishes. It returns `None` if
no task is running.
Returns
-------
tuple or None
A pair of a package index and result. `None` if no tasks
is running.
"""
if not self.runid_pkgidx_map:
return None
while True:
if not self.runid_to_return:
self.runid_to_return.extend(self.dispatcher.poll())
ret = self._collect_next_finished_pkgidx_result_pair()
if ret is not None:
break
if self.runid_pkgidx_map:
time.sleep(self.sleep)
return ret |
def find_config_file():
"""
Returns the path to the config file if found in either the current working
directory, or the user's home directory. If a config file is not found,
the function will return None.
"""
local_config_name = path(CONFIG_FILE_NAME)
if os.path.isfile(local_config_name):
return local_config_name
else:
user_config_name = path(CONFIG_FILE_NAME, root=USER)
if os.path.isfile(user_config_name):
return user_config_name
else:
return None | Returns the path to the config file if found in either the current working
directory, or the user's home directory. If a config file is not found,
the function will return None. | Below is the the instruction that describes the task:
### Input:
Returns the path to the config file if found in either the current working
directory, or the user's home directory. If a config file is not found,
the function will return None.
### Response:
def find_config_file():
"""
Returns the path to the config file if found in either the current working
directory, or the user's home directory. If a config file is not found,
the function will return None.
"""
local_config_name = path(CONFIG_FILE_NAME)
if os.path.isfile(local_config_name):
return local_config_name
else:
user_config_name = path(CONFIG_FILE_NAME, root=USER)
if os.path.isfile(user_config_name):
return user_config_name
else:
return None |
def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
strength = self.strengths[index1, index2]
distance = self.distances[index1, index2]
yield strength*distance**(-6), 1 | Yields pairs ((s(r_ij), v(bar{r}_ij)) | Below is the the instruction that describes the task:
### Input:
Yields pairs ((s(r_ij), v(bar{r}_ij))
### Response:
def yield_pair_energies(self, index1, index2):
"""Yields pairs ((s(r_ij), v(bar{r}_ij))"""
strength = self.strengths[index1, index2]
distance = self.distances[index1, index2]
yield strength*distance**(-6), 1 |
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.Import(
names,
getattr(node, "lineno", None),
getattr(node, "col_offset", None),
parent,
)
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
parent.set_local(name.split(".")[0], newnode)
return newnode | visit a Import node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Import node by returning a fresh instance of it
### Response:
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.Import(
names,
getattr(node, "lineno", None),
getattr(node, "col_offset", None),
parent,
)
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
parent.set_local(name.split(".")[0], newnode)
return newnode |
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result | Remap unknown fields in message out of message.source. | Below is the the instruction that describes the task:
### Input:
Remap unknown fields in message out of message.source.
### Response:
def _EncodeUnknownFields(message):
"""Remap unknown fields in message out of message.source."""
source = _UNRECOGNIZED_FIELD_MAPPINGS.get(type(message))
if source is None:
return message
# CopyProtoMessage uses _ProtoJsonApiTools, which uses this message. Use
# the vanilla protojson-based copy function to avoid infinite recursion.
result = _CopyProtoMessageVanillaProtoJson(message)
pairs_field = message.field_by_name(source)
if not isinstance(pairs_field, messages.MessageField):
raise exceptions.InvalidUserInputError(
'Invalid pairs field %s' % pairs_field)
pairs_type = pairs_field.message_type
value_field = pairs_type.field_by_name('value')
value_variant = value_field.variant
pairs = getattr(message, source)
codec = _ProtoJsonApiTools.Get()
for pair in pairs:
encoded_value = codec.encode_field(value_field, pair.value)
result.set_unrecognized_field(pair.key, encoded_value, value_variant)
setattr(result, source, [])
return result |
def url(self, key, includeToken=None):
""" Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
"""
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token)
return '%s%s' % (self._baseurl, key) | Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'. | Below is the the instruction that describes the task:
### Input:
Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
### Response:
def url(self, key, includeToken=None):
""" Build a URL string with proper token argument. Token will be appended to the URL
if either includeToken is True or CONFIG.log.show_secrets is 'true'.
"""
if self._token and (includeToken or self._showSecrets):
delim = '&' if '?' in key else '?'
return '%s%s%sX-Plex-Token=%s' % (self._baseurl, key, delim, self._token)
return '%s%s' % (self._baseurl, key) |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(PRC, self).fix_config(options)
opt = "class_index"
if opt not in options:
options[opt] = [0]
if opt not in self.help:
self.help[opt] = "The list of 0-based class-label indices to display (list)."
opt = "key_loc"
if opt not in options:
options[opt] = "lower center"
if opt not in self.help:
self.help[opt] = "The location of the key in the plot (str)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
### Response:
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(PRC, self).fix_config(options)
opt = "class_index"
if opt not in options:
options[opt] = [0]
if opt not in self.help:
self.help[opt] = "The list of 0-based class-label indices to display (list)."
opt = "key_loc"
if opt not in options:
options[opt] = "lower center"
if opt not in self.help:
self.help[opt] = "The location of the key in the plot (str)."
opt = "title"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The title for the plot (str)."
opt = "outfile"
if opt not in options:
options[opt] = None
if opt not in self.help:
self.help[opt] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options:
options[opt] = True
if opt not in self.help:
self.help[opt] = "Whether to wait for user to close the plot window (bool)."
return options |
def timing(self, stat, value, tags=None):
"""Measure a timing for statistical distribution."""
self.client.timing(metric=stat, value=value, tags=tags) | Measure a timing for statistical distribution. | Below is the the instruction that describes the task:
### Input:
Measure a timing for statistical distribution.
### Response:
def timing(self, stat, value, tags=None):
"""Measure a timing for statistical distribution."""
self.client.timing(metric=stat, value=value, tags=tags) |
def get_pmid_by_keyword(keyword: str,
graph: Optional[BELGraph] = None,
pubmed_identifiers: Optional[Set[str]] = None,
) -> Set[str]:
"""Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string
"""
if pubmed_identifiers is not None:
return {
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier.startswith(keyword)
}
if graph is None:
raise ValueError('Graph not supplied')
return {
pubmed_identifier
for pubmed_identifier in iterate_pubmed_identifiers(graph)
if pubmed_identifier.startswith(keyword)
} | Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string | Below is the the instruction that describes the task:
### Input:
Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string
### Response:
def get_pmid_by_keyword(keyword: str,
graph: Optional[BELGraph] = None,
pubmed_identifiers: Optional[Set[str]] = None,
) -> Set[str]:
"""Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string
"""
if pubmed_identifiers is not None:
return {
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier.startswith(keyword)
}
if graph is None:
raise ValueError('Graph not supplied')
return {
pubmed_identifier
for pubmed_identifier in iterate_pubmed_identifiers(graph)
if pubmed_identifier.startswith(keyword)
} |
def respond_to_contact_info(self, message):
"""contact info: Show everyone's emergency contact info."""
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message) | contact info: Show everyone's emergency contact info. | Below is the the instruction that describes the task:
### Input:
contact info: Show everyone's emergency contact info.
### Response:
def respond_to_contact_info(self, message):
"""contact info: Show everyone's emergency contact info."""
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message) |
def get_all_cpv_use(cp):
'''
.. versionadded:: 2015.8.0
Uses portage to determine final USE flags and settings for an emerge.
@type cp: string
@param cp: eg cat/pkg
@rtype: lists
@return use, use_expand_hidden, usemask, useforce
'''
cpv = _get_cpv(cp)
portage = _get_portage()
use = None
_porttree().dbapi.settings.unlock()
try:
_porttree().dbapi.settings.setcpv(cpv, mydb=portage.portdb)
use = portage.settings['PORTAGE_USE'].split()
use_expand_hidden = portage.settings["USE_EXPAND_HIDDEN"].split()
usemask = list(_porttree().dbapi.settings.usemask)
useforce = list(_porttree().dbapi.settings.useforce)
except KeyError:
_porttree().dbapi.settings.reset()
_porttree().dbapi.settings.lock()
return [], [], [], []
# reset cpv filter
_porttree().dbapi.settings.reset()
_porttree().dbapi.settings.lock()
return use, use_expand_hidden, usemask, useforce | .. versionadded:: 2015.8.0
Uses portage to determine final USE flags and settings for an emerge.
@type cp: string
@param cp: eg cat/pkg
@rtype: lists
@return use, use_expand_hidden, usemask, useforce | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
Uses portage to determine final USE flags and settings for an emerge.
@type cp: string
@param cp: eg cat/pkg
@rtype: lists
@return use, use_expand_hidden, usemask, useforce
### Response:
def get_all_cpv_use(cp):
'''
.. versionadded:: 2015.8.0
Uses portage to determine final USE flags and settings for an emerge.
@type cp: string
@param cp: eg cat/pkg
@rtype: lists
@return use, use_expand_hidden, usemask, useforce
'''
cpv = _get_cpv(cp)
portage = _get_portage()
use = None
_porttree().dbapi.settings.unlock()
try:
_porttree().dbapi.settings.setcpv(cpv, mydb=portage.portdb)
use = portage.settings['PORTAGE_USE'].split()
use_expand_hidden = portage.settings["USE_EXPAND_HIDDEN"].split()
usemask = list(_porttree().dbapi.settings.usemask)
useforce = list(_porttree().dbapi.settings.useforce)
except KeyError:
_porttree().dbapi.settings.reset()
_porttree().dbapi.settings.lock()
return [], [], [], []
# reset cpv filter
_porttree().dbapi.settings.reset()
_porttree().dbapi.settings.lock()
return use, use_expand_hidden, usemask, useforce |
def build_response(headers: Headers, key: str) -> None:
"""
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
"""
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept(key) | Build a handshake response to send to the client.
``key`` comes from :func:`check_request`. | Below is the the instruction that describes the task:
### Input:
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
### Response:
def build_response(headers: Headers, key: str) -> None:
"""
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
"""
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept(key) |
def validate_proxy_args(cls, *args):
"""No more than 1 of the following can be specified: tor_routing, proxy, proxy_list"""
supplied_proxies = Counter((not arg for arg in (*args,))).get(False)
if not supplied_proxies:
return
elif supplied_proxies > 1:
raise RaccoonException("Must specify only one of the following:\n"
"--tor-routing, --proxy-list, --proxy") | No more than 1 of the following can be specified: tor_routing, proxy, proxy_list | Below is the the instruction that describes the task:
### Input:
No more than 1 of the following can be specified: tor_routing, proxy, proxy_list
### Response:
def validate_proxy_args(cls, *args):
"""No more than 1 of the following can be specified: tor_routing, proxy, proxy_list"""
supplied_proxies = Counter((not arg for arg in (*args,))).get(False)
if not supplied_proxies:
return
elif supplied_proxies > 1:
raise RaccoonException("Must specify only one of the following:\n"
"--tor-routing, --proxy-list, --proxy") |
def index_document(self, text, url):
"Index the text of a document."
## For now, use first line for title
title = text[:text.index('\n')].strip()
docwords = words(text)
docid = len(self.documents)
self.documents.append(Document(title, url, len(docwords)))
for word in docwords:
if word not in self.stopwords:
self.index[word][docid] += 1 | Index the text of a document. | Below is the the instruction that describes the task:
### Input:
Index the text of a document.
### Response:
def index_document(self, text, url):
"Index the text of a document."
## For now, use first line for title
title = text[:text.index('\n')].strip()
docwords = words(text)
docid = len(self.documents)
self.documents.append(Document(title, url, len(docwords)))
for word in docwords:
if word not in self.stopwords:
self.index[word][docid] += 1 |
def _visit_or_none(node, attr, visitor, parent, visit="visit", **kws):
"""If the given node has an attribute, visits the attribute, and
otherwise returns None.
"""
value = getattr(node, attr, None)
if value:
return getattr(visitor, visit)(value, parent, **kws)
return None | If the given node has an attribute, visits the attribute, and
otherwise returns None. | Below is the the instruction that describes the task:
### Input:
If the given node has an attribute, visits the attribute, and
otherwise returns None.
### Response:
def _visit_or_none(node, attr, visitor, parent, visit="visit", **kws):
"""If the given node has an attribute, visits the attribute, and
otherwise returns None.
"""
value = getattr(node, attr, None)
if value:
return getattr(visitor, visit)(value, parent, **kws)
return None |
def _get_event_handlers():
"""
Gets dictionary of event handlers and the modules that define them
Returns:
event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error"
"""
import os
import importlib
event_handlers = {
"on_ready": [],
"on_resume": [],
"on_error": [],
"on_message": [],
"on_socket_raw_receive": [],
"on_socket_raw_send": [],
"on_message_delete": [],
"on_message_edit": [],
"on_reaction_add": [],
"on_reaction_remove": [],
"on_reaction_clear": [],
"on_channel_delete": [],
"on_channel_create": [],
"on_channel_update": [],
"on_member_join": [],
"on_member_remove": [],
"on_member_update": [],
"on_server_join": [],
"on_server_remove": [],
"on_server_update": [],
"on_server_role_create": [],
"on_server_role_delete": [],
"on_server_role_update": [],
"on_server_emojis_update": [],
"on_server_available": [],
"on_server_unavailable": [],
"on_voice_state_update": [],
"on_member_ban": [],
"on_member_unban": [],
"on_typing": [],
"on_group_join": [],
"on_group_remove": []
}
# Iterate through module folders
database_dir = "{}/modules".format(
os.path.dirname(os.path.realpath(__file__)))
for module_name in os.listdir(database_dir):
module_dir = "{}/{}".format(database_dir, module_name)
# Iterate through files in module
if os.path.isdir(module_dir) and not module_name.startswith("_"):
# Add all defined event handlers in module files
module_event_handlers = os.listdir(module_dir)
for event_handler in event_handlers.keys():
if "{}.py".format(event_handler) in module_event_handlers:
import_name = ".discord_modis.modules.{}.{}".format(
module_name, event_handler)
logger.debug("Found event handler {}".format(import_name[23:]))
try:
event_handlers[event_handler].append(
importlib.import_module(import_name, "modis"))
except Exception as e:
# Log errors in modules
logger.exception(e)
return event_handlers | Gets dictionary of event handlers and the modules that define them
Returns:
event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error" | Below is the the instruction that describes the task:
### Input:
Gets dictionary of event handlers and the modules that define them
Returns:
event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error"
### Response:
def _get_event_handlers():
"""
Gets dictionary of event handlers and the modules that define them
Returns:
event_handlers (dict): Contains "all", "on_ready", "on_message", "on_reaction_add", "on_error"
"""
import os
import importlib
event_handlers = {
"on_ready": [],
"on_resume": [],
"on_error": [],
"on_message": [],
"on_socket_raw_receive": [],
"on_socket_raw_send": [],
"on_message_delete": [],
"on_message_edit": [],
"on_reaction_add": [],
"on_reaction_remove": [],
"on_reaction_clear": [],
"on_channel_delete": [],
"on_channel_create": [],
"on_channel_update": [],
"on_member_join": [],
"on_member_remove": [],
"on_member_update": [],
"on_server_join": [],
"on_server_remove": [],
"on_server_update": [],
"on_server_role_create": [],
"on_server_role_delete": [],
"on_server_role_update": [],
"on_server_emojis_update": [],
"on_server_available": [],
"on_server_unavailable": [],
"on_voice_state_update": [],
"on_member_ban": [],
"on_member_unban": [],
"on_typing": [],
"on_group_join": [],
"on_group_remove": []
}
# Iterate through module folders
database_dir = "{}/modules".format(
os.path.dirname(os.path.realpath(__file__)))
for module_name in os.listdir(database_dir):
module_dir = "{}/{}".format(database_dir, module_name)
# Iterate through files in module
if os.path.isdir(module_dir) and not module_name.startswith("_"):
# Add all defined event handlers in module files
module_event_handlers = os.listdir(module_dir)
for event_handler in event_handlers.keys():
if "{}.py".format(event_handler) in module_event_handlers:
import_name = ".discord_modis.modules.{}.{}".format(
module_name, event_handler)
logger.debug("Found event handler {}".format(import_name[23:]))
try:
event_handlers[event_handler].append(
importlib.import_module(import_name, "modis"))
except Exception as e:
# Log errors in modules
logger.exception(e)
return event_handlers |
def resolve_base_href(self, handle_failures=None):
"""
Find any ``<base href>`` tag in the document, and apply its
values to all links found in the document. Also remove the
tag once it has been applied.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
"""
base_href = None
basetags = self.xpath('//base[@href]|//x:base[@href]',
namespaces={'x': XHTML_NAMESPACE})
for b in basetags:
base_href = b.get('href')
b.drop_tree()
if not base_href:
return
self.make_links_absolute(base_href, resolve_base_href=False,
handle_failures=handle_failures) | Find any ``<base href>`` tag in the document, and apply its
values to all links found in the document. Also remove the
tag once it has been applied.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed. | Below is the the instruction that describes the task:
### Input:
Find any ``<base href>`` tag in the document, and apply its
values to all links found in the document. Also remove the
tag once it has been applied.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
### Response:
def resolve_base_href(self, handle_failures=None):
"""
Find any ``<base href>`` tag in the document, and apply its
values to all links found in the document. Also remove the
tag once it has been applied.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
"""
base_href = None
basetags = self.xpath('//base[@href]|//x:base[@href]',
namespaces={'x': XHTML_NAMESPACE})
for b in basetags:
base_href = b.get('href')
b.drop_tree()
if not base_href:
return
self.make_links_absolute(base_href, resolve_base_href=False,
handle_failures=handle_failures) |
def SetHasherNames(self, hasher_names_string):
"""Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
"""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string | Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable. | Below is the the instruction that describes the task:
### Input:
Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
### Response:
def SetHasherNames(self, hasher_names_string):
"""Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
"""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string |
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
""" Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
"""
# Use rid if provided
if rid is not None:
assert type(rid) == list, "rid must be a list. rid: {}".format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info("{} rids were not found in the GCT.".format(num_missing_rids))
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], (
"row_bool must have length equal to gctoo.data_df.shape[0]. " +
"len(row_bool): {}, gctoo.data_df.shape[0]: {}".format(
len(row_bool), gctoo.data_df.shape[0]))
rows_to_keep = gctoo.data_df.index[row_bool].values
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, (
"ridx must be a list of integers. ridx[0]: {}, " +
"type(ridx[0]): {}").format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], (
"ridx contains an integer larger than the number of rows in " +
"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format(
max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values
# If rid, row_bool, and ridx are all None, return all rows
else:
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid]
return rows_to_keep | Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept | Below is the the instruction that describes the task:
### Input:
Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
### Response:
def get_rows_to_keep(gctoo, rid=None, row_bool=None, ridx=None, exclude_rid=None):
""" Figure out based on the possible row inputs which rows to keep.
Args:
gctoo (GCToo object):
rid (list of strings):
row_bool (boolean array):
ridx (list of integers):
exclude_rid (list of strings):
Returns:
rows_to_keep (list of strings): row ids to be kept
"""
# Use rid if provided
if rid is not None:
assert type(rid) == list, "rid must be a list. rid: {}".format(rid)
rows_to_keep = [gctoo_row for gctoo_row in gctoo.data_df.index if gctoo_row in rid]
# Tell user if some rids not found
num_missing_rids = len(rid) - len(rows_to_keep)
if num_missing_rids != 0:
logger.info("{} rids were not found in the GCT.".format(num_missing_rids))
# Use row_bool if provided
elif row_bool is not None:
assert len(row_bool) == gctoo.data_df.shape[0], (
"row_bool must have length equal to gctoo.data_df.shape[0]. " +
"len(row_bool): {}, gctoo.data_df.shape[0]: {}".format(
len(row_bool), gctoo.data_df.shape[0]))
rows_to_keep = gctoo.data_df.index[row_bool].values
# Use ridx if provided
elif ridx is not None:
assert type(ridx[0]) is int, (
"ridx must be a list of integers. ridx[0]: {}, " +
"type(ridx[0]): {}").format(ridx[0], type(ridx[0]))
assert max(ridx) <= gctoo.data_df.shape[0], (
"ridx contains an integer larger than the number of rows in " +
"the GCToo. max(ridx): {}, gctoo.data_df.shape[0]: {}").format(
max(ridx), gctoo.data_df.shape[0])
rows_to_keep = gctoo.data_df.index[ridx].values
# If rid, row_bool, and ridx are all None, return all rows
else:
rows_to_keep = gctoo.data_df.index.values
# Use exclude_rid if provided
if exclude_rid is not None:
# Keep only those rows that are not in exclude_rid
rows_to_keep = [row_to_keep for row_to_keep in rows_to_keep if row_to_keep not in exclude_rid]
return rows_to_keep |
def configure_crossover(self, genome1, genome2, config):
""" Configure a new genome by crossover from two parent genomes. """
assert isinstance(genome1.fitness, (int, float))
assert isinstance(genome2.fitness, (int, float))
if genome1.fitness > genome2.fitness:
parent1, parent2 = genome1, genome2
else:
parent1, parent2 = genome2, genome1
# Inherit connection genes
for key, cg1 in iteritems(parent1.connections):
cg2 = parent2.connections.get(key)
if cg2 is None:
# Excess or disjoint gene: copy from the fittest parent.
self.connections[key] = cg1.copy()
else:
# Homologous gene: combine genes from both parents.
self.connections[key] = cg1.crossover(cg2)
# Inherit node genes
parent1_set = parent1.nodes
parent2_set = parent2.nodes
for key, ng1 in iteritems(parent1_set):
ng2 = parent2_set.get(key)
assert key not in self.nodes
if ng2 is None:
# Extra gene: copy from the fittest parent
self.nodes[key] = ng1.copy()
else:
# Homologous gene: combine genes from both parents.
self.nodes[key] = ng1.crossover(ng2) | Configure a new genome by crossover from two parent genomes. | Below is the the instruction that describes the task:
### Input:
Configure a new genome by crossover from two parent genomes.
### Response:
def configure_crossover(self, genome1, genome2, config):
""" Configure a new genome by crossover from two parent genomes. """
assert isinstance(genome1.fitness, (int, float))
assert isinstance(genome2.fitness, (int, float))
if genome1.fitness > genome2.fitness:
parent1, parent2 = genome1, genome2
else:
parent1, parent2 = genome2, genome1
# Inherit connection genes
for key, cg1 in iteritems(parent1.connections):
cg2 = parent2.connections.get(key)
if cg2 is None:
# Excess or disjoint gene: copy from the fittest parent.
self.connections[key] = cg1.copy()
else:
# Homologous gene: combine genes from both parents.
self.connections[key] = cg1.crossover(cg2)
# Inherit node genes
parent1_set = parent1.nodes
parent2_set = parent2.nodes
for key, ng1 in iteritems(parent1_set):
ng2 = parent2_set.get(key)
assert key not in self.nodes
if ng2 is None:
# Extra gene: copy from the fittest parent
self.nodes[key] = ng1.copy()
else:
# Homologous gene: combine genes from both parents.
self.nodes[key] = ng1.crossover(ng2) |
def tick(self):
""" Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period))
else:
act.iteratorHolder[0] = None
return True | Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop | Below is the the instruction that describes the task:
### Input:
Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop
### Response:
def tick(self):
""" Activity tick handler; services all activities
Returns: True if controlling iterator says it's okay to keep going;
False to stop
"""
# Run activities whose time has come
for act in self.__activities:
if not act.iteratorHolder[0]:
continue
try:
next(act.iteratorHolder[0])
except StopIteration:
act.cb()
if act.repeating:
act.iteratorHolder[0] = iter(xrange(act.period))
else:
act.iteratorHolder[0] = None
return True |
def get_vulnerability_functions_04(node, fname):
"""
:param node:
a vulnerabilityModel node
:param fname:
path to the vulnerability file
:returns:
a dictionary imt, vf_id -> vulnerability function
"""
logging.warning('Please upgrade %s to NRML 0.5', fname)
# NB: the IMTs can be duplicated and with different levels, each
# vulnerability function in a set will get its own levels
imts = set()
vf_ids = set()
# imt, vf_id -> vulnerability function
vmodel = scientific.VulnerabilityModel(**node.attrib)
for vset in node:
imt_str = vset.IML['IMT']
imls = ~vset.IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
vf_id = vfun['vulnerabilityFunctionID']
if vf_id in vf_ids:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(vf_id, fname, vfun.lineno))
vf_ids.add(vf_id)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vmodel[imt_str, vf_id] = scientific.VulnerabilityFunction(
vf_id, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
return vmodel | :param node:
a vulnerabilityModel node
:param fname:
path to the vulnerability file
:returns:
a dictionary imt, vf_id -> vulnerability function | Below is the the instruction that describes the task:
### Input:
:param node:
a vulnerabilityModel node
:param fname:
path to the vulnerability file
:returns:
a dictionary imt, vf_id -> vulnerability function
### Response:
def get_vulnerability_functions_04(node, fname):
"""
:param node:
a vulnerabilityModel node
:param fname:
path to the vulnerability file
:returns:
a dictionary imt, vf_id -> vulnerability function
"""
logging.warning('Please upgrade %s to NRML 0.5', fname)
# NB: the IMTs can be duplicated and with different levels, each
# vulnerability function in a set will get its own levels
imts = set()
vf_ids = set()
# imt, vf_id -> vulnerability function
vmodel = scientific.VulnerabilityModel(**node.attrib)
for vset in node:
imt_str = vset.IML['IMT']
imls = ~vset.IML
imts.add(imt_str)
for vfun in vset.getnodes('discreteVulnerability'):
vf_id = vfun['vulnerabilityFunctionID']
if vf_id in vf_ids:
raise InvalidFile(
'Duplicated vulnerabilityFunctionID: %s: %s, line %d' %
(vf_id, fname, vfun.lineno))
vf_ids.add(vf_id)
with context(fname, vfun):
loss_ratios = ~vfun.lossRatio
coefficients = ~vfun.coefficientsVariation
if len(loss_ratios) != len(imls):
raise InvalidFile(
'There are %d loss ratios, but %d imls: %s, line %d' %
(len(loss_ratios), len(imls), fname,
vfun.lossRatio.lineno))
if len(coefficients) != len(imls):
raise InvalidFile(
'There are %d coefficients, but %d imls: %s, line %d' %
(len(coefficients), len(imls), fname,
vfun.coefficientsVariation.lineno))
with context(fname, vfun):
vmodel[imt_str, vf_id] = scientific.VulnerabilityFunction(
vf_id, imt_str, imls, loss_ratios, coefficients,
vfun['probabilisticDistribution'])
return vmodel |
def get_context_data(self, **kwargs):
"""
Adds ``is_rendered`` to the context and the widget's context data.
``is_rendered`` signals that the AJAX view has been called and that
we are displaying the full widget now. When ``is_rendered`` is not
found in the widget template it means that we are seeing the first
page load and all widgets still have to get their real data from
this AJAX view.
"""
ctx = super(RenderWidgetMixin, self).get_context_data(**kwargs)
ctx.update({
'is_rendered': True,
'widget': self.widget,
})
ctx.update(self.widget.get_context_data())
return ctx | Adds ``is_rendered`` to the context and the widget's context data.
``is_rendered`` signals that the AJAX view has been called and that
we are displaying the full widget now. When ``is_rendered`` is not
found in the widget template it means that we are seeing the first
page load and all widgets still have to get their real data from
this AJAX view. | Below is the the instruction that describes the task:
### Input:
Adds ``is_rendered`` to the context and the widget's context data.
``is_rendered`` signals that the AJAX view has been called and that
we are displaying the full widget now. When ``is_rendered`` is not
found in the widget template it means that we are seeing the first
page load and all widgets still have to get their real data from
this AJAX view.
### Response:
def get_context_data(self, **kwargs):
"""
Adds ``is_rendered`` to the context and the widget's context data.
``is_rendered`` signals that the AJAX view has been called and that
we are displaying the full widget now. When ``is_rendered`` is not
found in the widget template it means that we are seeing the first
page load and all widgets still have to get their real data from
this AJAX view.
"""
ctx = super(RenderWidgetMixin, self).get_context_data(**kwargs)
ctx.update({
'is_rendered': True,
'widget': self.widget,
})
ctx.update(self.widget.get_context_data())
return ctx |
def _do_request(self, method, path, params=None, data=None):
"""Query Marathon server."""
headers = {
'Content-Type': 'application/json', 'Accept': 'application/json'}
if self.auth_token:
headers['Authorization'] = "token={}".format(self.auth_token)
response = None
servers = list(self.servers)
while servers and response is None:
server = servers.pop(0)
url = ''.join([server.rstrip('/'), path])
try:
response = self.session.request(
method, url, params=params, data=data, headers=headers,
auth=self.auth, timeout=self.timeout, verify=self.verify)
marathon.log.info('Got response from %s', server)
except requests.exceptions.RequestException as e:
marathon.log.error(
'Error while calling %s: %s', url, str(e))
if response is None:
raise NoResponseError('No remaining Marathon servers to try')
if response.status_code >= 500:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
raise InternalServerError(response)
elif response.status_code >= 400:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
if response.status_code == 404:
raise NotFoundError(response)
elif response.status_code == 409:
raise ConflictError(response)
else:
raise MarathonHttpError(response)
elif response.status_code >= 300:
marathon.log.warn('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
else:
marathon.log.debug('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
return response | Query Marathon server. | Below is the the instruction that describes the task:
### Input:
Query Marathon server.
### Response:
def _do_request(self, method, path, params=None, data=None):
"""Query Marathon server."""
headers = {
'Content-Type': 'application/json', 'Accept': 'application/json'}
if self.auth_token:
headers['Authorization'] = "token={}".format(self.auth_token)
response = None
servers = list(self.servers)
while servers and response is None:
server = servers.pop(0)
url = ''.join([server.rstrip('/'), path])
try:
response = self.session.request(
method, url, params=params, data=data, headers=headers,
auth=self.auth, timeout=self.timeout, verify=self.verify)
marathon.log.info('Got response from %s', server)
except requests.exceptions.RequestException as e:
marathon.log.error(
'Error while calling %s: %s', url, str(e))
if response is None:
raise NoResponseError('No remaining Marathon servers to try')
if response.status_code >= 500:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
raise InternalServerError(response)
elif response.status_code >= 400:
marathon.log.error('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
if response.status_code == 404:
raise NotFoundError(response)
elif response.status_code == 409:
raise ConflictError(response)
else:
raise MarathonHttpError(response)
elif response.status_code >= 300:
marathon.log.warn('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
else:
marathon.log.debug('Got HTTP {code}: {body}'.format(
code=response.status_code, body=response.text.encode('utf-8')))
return response |
def add_enrollment(db, uuid, organization, from_date=None, to_date=None):
"""Enroll a unique identity to an organization.
The function adds a new relationship between the unique identity
identified by 'uuid' and the given 'organization'. The given
identity and organization must exist prior to add this enrollment
in the registry. Otherwise, a 'NotFoundError' exception will be raised.
The period of the enrollment can be given with the parameters 'from_date'
and 'to_date', where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
If the given enrollment data is already in the registry, the function
will raise a 'AlreadyExistsError' exception.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:raises NotFoundError: when either 'uuid' or 'organization' are not
found in the registry.
:raises InvalidValeError: raised in three cases, when either identity or
organization are None or empty strings; when "from_date" < 1900-01-01 or
"to_date" > 2100-01-01; when "from_date > to_date"
:raises AlreadyExistsError: raised when given enrollment already exists
in the registry.
"""
if uuid is None:
raise InvalidValueError('uuid cannot be None')
if uuid == '':
raise InvalidValueError('uuid cannot be an empty string')
if organization is None:
raise InvalidValueError('organization cannot be None')
if organization == '':
raise InvalidValueError('organization cannot be an empty string')
if not from_date:
from_date = MIN_PERIOD_DATE
if not to_date:
to_date = MAX_PERIOD_DATE
with db.connect() as session:
uidentity = find_unique_identity(session, uuid)
if not uidentity:
raise NotFoundError(entity=uuid)
org = find_organization(session, organization)
if not org:
raise NotFoundError(entity=organization)
try:
enroll_db(session, uidentity, org,
from_date=from_date, to_date=to_date)
except ValueError as e:
raise InvalidValueError(e) | Enroll a unique identity to an organization.
The function adds a new relationship between the unique identity
identified by 'uuid' and the given 'organization'. The given
identity and organization must exist prior to add this enrollment
in the registry. Otherwise, a 'NotFoundError' exception will be raised.
The period of the enrollment can be given with the parameters 'from_date'
and 'to_date', where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
If the given enrollment data is already in the registry, the function
will raise a 'AlreadyExistsError' exception.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:raises NotFoundError: when either 'uuid' or 'organization' are not
found in the registry.
:raises InvalidValeError: raised in three cases, when either identity or
organization are None or empty strings; when "from_date" < 1900-01-01 or
"to_date" > 2100-01-01; when "from_date > to_date"
:raises AlreadyExistsError: raised when given enrollment already exists
in the registry. | Below is the the instruction that describes the task:
### Input:
Enroll a unique identity to an organization.
The function adds a new relationship between the unique identity
identified by 'uuid' and the given 'organization'. The given
identity and organization must exist prior to add this enrollment
in the registry. Otherwise, a 'NotFoundError' exception will be raised.
The period of the enrollment can be given with the parameters 'from_date'
and 'to_date', where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
If the given enrollment data is already in the registry, the function
will raise a 'AlreadyExistsError' exception.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:raises NotFoundError: when either 'uuid' or 'organization' are not
found in the registry.
:raises InvalidValeError: raised in three cases, when either identity or
organization are None or empty strings; when "from_date" < 1900-01-01 or
"to_date" > 2100-01-01; when "from_date > to_date"
:raises AlreadyExistsError: raised when given enrollment already exists
in the registry.
### Response:
def add_enrollment(db, uuid, organization, from_date=None, to_date=None):
"""Enroll a unique identity to an organization.
The function adds a new relationship between the unique identity
identified by 'uuid' and the given 'organization'. The given
identity and organization must exist prior to add this enrollment
in the registry. Otherwise, a 'NotFoundError' exception will be raised.
The period of the enrollment can be given with the parameters 'from_date'
and 'to_date', where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
If the given enrollment data is already in the registry, the function
will raise a 'AlreadyExistsError' exception.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:raises NotFoundError: when either 'uuid' or 'organization' are not
found in the registry.
:raises InvalidValeError: raised in three cases, when either identity or
organization are None or empty strings; when "from_date" < 1900-01-01 or
"to_date" > 2100-01-01; when "from_date > to_date"
:raises AlreadyExistsError: raised when given enrollment already exists
in the registry.
"""
if uuid is None:
raise InvalidValueError('uuid cannot be None')
if uuid == '':
raise InvalidValueError('uuid cannot be an empty string')
if organization is None:
raise InvalidValueError('organization cannot be None')
if organization == '':
raise InvalidValueError('organization cannot be an empty string')
if not from_date:
from_date = MIN_PERIOD_DATE
if not to_date:
to_date = MAX_PERIOD_DATE
with db.connect() as session:
uidentity = find_unique_identity(session, uuid)
if not uidentity:
raise NotFoundError(entity=uuid)
org = find_organization(session, organization)
if not org:
raise NotFoundError(entity=organization)
try:
enroll_db(session, uidentity, org,
from_date=from_date, to_date=to_date)
except ValueError as e:
raise InvalidValueError(e) |
def surface_area(self):
r"""Calculate all atomic surface area.
:rtype: [float]
"""
return [self.atomic_sa(i) for i in range(len(self.rads))] | r"""Calculate all atomic surface area.
:rtype: [float] | Below is the the instruction that describes the task:
### Input:
r"""Calculate all atomic surface area.
:rtype: [float]
### Response:
def surface_area(self):
r"""Calculate all atomic surface area.
:rtype: [float]
"""
return [self.atomic_sa(i) for i in range(len(self.rads))] |
def get_single_score(self, point, centroids=None, sd=None):
"""
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \
Attributes:
observation_score (dict): Original received point and normalised point.
:Example:
>>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]}
nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \
wins.
distances (list (float)): List of distances from the Point to each cluster centroid. E.g:
>>> [2.38086238, 0.12382605, 2.0362993, 1.43195021]
centroids (list (list (float))): A list of the current centroidswhen queried. E.g:
>>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \
[0.39421099, 2.36783733] ]
:param point: the point to classify
:type point: pandas.DataFrame
:param centroids: the centroids
:type centroids: np.array
:param sd: the standard deviation
:type sd: np.array
:return score: the score for a given observation
:rtype score: int
"""
normalised_point = array(point) / array(sd)
observation_score = {
'original': point,
'normalised': normalised_point.tolist(),
}
distances = [
euclidean(normalised_point, centroid)
for centroid in centroids
]
return int(distances.index(min(distances))) | Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \
Attributes:
observation_score (dict): Original received point and normalised point.
:Example:
>>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]}
nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \
wins.
distances (list (float)): List of distances from the Point to each cluster centroid. E.g:
>>> [2.38086238, 0.12382605, 2.0362993, 1.43195021]
centroids (list (list (float))): A list of the current centroidswhen queried. E.g:
>>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \
[0.39421099, 2.36783733] ]
:param point: the point to classify
:type point: pandas.DataFrame
:param centroids: the centroids
:type centroids: np.array
:param sd: the standard deviation
:type sd: np.array
:return score: the score for a given observation
:rtype score: int | Below is the the instruction that describes the task:
### Input:
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \
Attributes:
observation_score (dict): Original received point and normalised point.
:Example:
>>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]}
nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \
wins.
distances (list (float)): List of distances from the Point to each cluster centroid. E.g:
>>> [2.38086238, 0.12382605, 2.0362993, 1.43195021]
centroids (list (list (float))): A list of the current centroidswhen queried. E.g:
>>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \
[0.39421099, 2.36783733] ]
:param point: the point to classify
:type point: pandas.DataFrame
:param centroids: the centroids
:type centroids: np.array
:param sd: the standard deviation
:type sd: np.array
:return score: the score for a given observation
:rtype score: int
### Response:
def get_single_score(self, point, centroids=None, sd=None):
"""
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \
Attributes:
observation_score (dict): Original received point and normalised point.
:Example:
>>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]}
nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \
wins.
distances (list (float)): List of distances from the Point to each cluster centroid. E.g:
>>> [2.38086238, 0.12382605, 2.0362993, 1.43195021]
centroids (list (list (float))): A list of the current centroidswhen queried. E.g:
>>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \
[0.39421099, 2.36783733] ]
:param point: the point to classify
:type point: pandas.DataFrame
:param centroids: the centroids
:type centroids: np.array
:param sd: the standard deviation
:type sd: np.array
:return score: the score for a given observation
:rtype score: int
"""
normalised_point = array(point) / array(sd)
observation_score = {
'original': point,
'normalised': normalised_point.tolist(),
}
distances = [
euclidean(normalised_point, centroid)
for centroid in centroids
]
return int(distances.index(min(distances))) |
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):
"""Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int)
"""
# If nothing is known we set end to be same as start
end = pos
# If variant is snv or indel we know that cyvcf2 can handle end pos
if category in ('snv', 'indel', 'cancer'):
end = snvend
# With SVs we have to be a bit more careful
elif category == 'sv':
# The END field from INFO usually works fine
end = svend
# For some cases like insertions the callers set end to same as pos
# In those cases we can hope that there is a svlen...
if svend == pos:
if svlen:
end = pos + svlen
# If variant is 'BND' they have ':' in alt field
# Information about other end is in the alt field
if ':' in alt:
match = BND_ALT_PATTERN.match(alt)
if match:
end = int(match.group(2))
return end | Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int) | Below is the the instruction that describes the task:
### Input:
Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int)
### Response:
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):
"""Return the end coordinate for a variant
Args:
pos(int)
alt(str)
category(str)
snvend(str)
svend(int)
svlen(int)
Returns:
end(int)
"""
# If nothing is known we set end to be same as start
end = pos
# If variant is snv or indel we know that cyvcf2 can handle end pos
if category in ('snv', 'indel', 'cancer'):
end = snvend
# With SVs we have to be a bit more careful
elif category == 'sv':
# The END field from INFO usually works fine
end = svend
# For some cases like insertions the callers set end to same as pos
# In those cases we can hope that there is a svlen...
if svend == pos:
if svlen:
end = pos + svlen
# If variant is 'BND' they have ':' in alt field
# Information about other end is in the alt field
if ':' in alt:
match = BND_ALT_PATTERN.match(alt)
if match:
end = int(match.group(2))
return end |
def login_with_password(self, params):
"""Authenticate using credentials supplied in params."""
# never allow insecure login
self.check_secure()
username = self.get_username(params['user'])
password = self.get_password(params['password'])
user = auth.authenticate(username=username, password=password)
if user is not None:
# the password verified for the user
if user.is_active:
self.do_login(user)
return get_user_token(
user=user, purpose=HashPurpose.RESUME_LOGIN,
minutes_valid=HASH_MINUTES_VALID[HashPurpose.RESUME_LOGIN],
)
# Call to `authenticate` couldn't verify the username and password.
# It will have sent the `user_login_failed` signal, no need to pass the
# `username` argument to auth_failed().
self.auth_failed() | Authenticate using credentials supplied in params. | Below is the the instruction that describes the task:
### Input:
Authenticate using credentials supplied in params.
### Response:
def login_with_password(self, params):
"""Authenticate using credentials supplied in params."""
# never allow insecure login
self.check_secure()
username = self.get_username(params['user'])
password = self.get_password(params['password'])
user = auth.authenticate(username=username, password=password)
if user is not None:
# the password verified for the user
if user.is_active:
self.do_login(user)
return get_user_token(
user=user, purpose=HashPurpose.RESUME_LOGIN,
minutes_valid=HASH_MINUTES_VALID[HashPurpose.RESUME_LOGIN],
)
# Call to `authenticate` couldn't verify the username and password.
# It will have sent the `user_login_failed` signal, no need to pass the
# `username` argument to auth_failed().
self.auth_failed() |
def user_add_link(self):
'''
Create link by user.
'''
if self.check_post_role()['ADD']:
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.get_current_user()
cur_uid = tools.get_uudd(2)
while MLink.get_by_uid(cur_uid):
cur_uid = tools.get_uudd(2)
MLink.create_link(cur_uid, post_data)
self.redirect('/link/list') | Create link by user. | Below is the the instruction that describes the task:
### Input:
Create link by user.
### Response:
def user_add_link(self):
'''
Create link by user.
'''
if self.check_post_role()['ADD']:
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.get_current_user()
cur_uid = tools.get_uudd(2)
while MLink.get_by_uid(cur_uid):
cur_uid = tools.get_uudd(2)
MLink.create_link(cur_uid, post_data)
self.redirect('/link/list') |
def sents(self, fileids=None) -> Generator[str, str, None]:
"""
:param fileids:
:return: A generator of sentences
"""
for para in self.paras(fileids):
sentences = self._sent_tokenizer.tokenize(para)
for sentence in sentences:
yield sentence | :param fileids:
:return: A generator of sentences | Below is the the instruction that describes the task:
### Input:
:param fileids:
:return: A generator of sentences
### Response:
def sents(self, fileids=None) -> Generator[str, str, None]:
"""
:param fileids:
:return: A generator of sentences
"""
for para in self.paras(fileids):
sentences = self._sent_tokenizer.tokenize(para)
for sentence in sentences:
yield sentence |
def resolve_polytomies(self):
'''Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.'''
q = deque(); q.append(self)
while len(q) != 0:
node = q.popleft()
while len(node.children) > 2:
c1 = node.children.pop(); c2 = node.children.pop()
nn = Node(edge_length=0); node.add_child(nn)
nn.add_child(c1); nn.add_child(c2)
q.extend(node.children) | Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges. | Below is the the instruction that describes the task:
### Input:
Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.
### Response:
def resolve_polytomies(self):
'''Arbitrarily resolve polytomies below this ``Node`` with 0-lengthed edges.'''
q = deque(); q.append(self)
while len(q) != 0:
node = q.popleft()
while len(node.children) > 2:
c1 = node.children.pop(); c2 = node.children.pop()
nn = Node(edge_length=0); node.add_child(nn)
nn.add_child(c1); nn.add_child(c2)
q.extend(node.children) |
def get_urls(self):
"""Manages not only TreeAdmin URLs but also TreeItemAdmin URLs."""
urls = super(TreeAdmin, self).get_urls()
prefix_change = 'change/' if DJANGO_POST_19 else ''
sitetree_urls = [
url(r'^change/$', redirects_handler, name=get_tree_item_url_name('changelist')),
url(r'^((?P<tree_id>\d+)/)?%sitem_add/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_add), name=get_tree_item_url_name('add')),
url(r'^(?P<tree_id>\d+)/%sitem_(?P<item_id>\d+)/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('change')),
url(r'^%sitem_(?P<item_id>\d+)/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('change')),
url(r'^((?P<tree_id>\d+)/)?%sitem_(?P<item_id>\d+)/delete/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_delete), name=get_tree_item_url_name('delete')),
url(r'^((?P<tree_id>\d+)/)?%sitem_(?P<item_id>\d+)/history/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_history), name=get_tree_item_url_name('history')),
url(r'^(?P<tree_id>\d+)/%sitem_(?P<item_id>\d+)/move_(?P<direction>(up|down))/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_move), name=get_tree_item_url_name('move')),
]
if not DJANGO_POST_19:
sitetree_urls = patterns_func('', *sitetree_urls)
if SMUGGLER_INSTALLED:
sitetree_urls += (url(r'^dump_all/$', self.admin_site.admin_view(self.dump_view), name='sitetree_dump'),)
return sitetree_urls + urls | Manages not only TreeAdmin URLs but also TreeItemAdmin URLs. | Below is the the instruction that describes the task:
### Input:
Manages not only TreeAdmin URLs but also TreeItemAdmin URLs.
### Response:
def get_urls(self):
"""Manages not only TreeAdmin URLs but also TreeItemAdmin URLs."""
urls = super(TreeAdmin, self).get_urls()
prefix_change = 'change/' if DJANGO_POST_19 else ''
sitetree_urls = [
url(r'^change/$', redirects_handler, name=get_tree_item_url_name('changelist')),
url(r'^((?P<tree_id>\d+)/)?%sitem_add/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_add), name=get_tree_item_url_name('add')),
url(r'^(?P<tree_id>\d+)/%sitem_(?P<item_id>\d+)/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('change')),
url(r'^%sitem_(?P<item_id>\d+)/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('change')),
url(r'^((?P<tree_id>\d+)/)?%sitem_(?P<item_id>\d+)/delete/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_delete), name=get_tree_item_url_name('delete')),
url(r'^((?P<tree_id>\d+)/)?%sitem_(?P<item_id>\d+)/history/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_history), name=get_tree_item_url_name('history')),
url(r'^(?P<tree_id>\d+)/%sitem_(?P<item_id>\d+)/move_(?P<direction>(up|down))/$' % prefix_change,
self.admin_site.admin_view(self.tree_admin.item_move), name=get_tree_item_url_name('move')),
]
if not DJANGO_POST_19:
sitetree_urls = patterns_func('', *sitetree_urls)
if SMUGGLER_INSTALLED:
sitetree_urls += (url(r'^dump_all/$', self.admin_site.admin_view(self.dump_view), name='sitetree_dump'),)
return sitetree_urls + urls |
def _validate_indices(num_qubits: int, indices: List[int]) -> None:
"""Validates that the indices have values within range of num_qubits."""
if any(index < 0 for index in indices):
raise IndexError('Negative index in indices: {}'.format(indices))
if any(index >= num_qubits for index in indices):
raise IndexError('Out of range indices, must be less than number of '
'qubits but was {}'.format(indices)) | Validates that the indices have values within range of num_qubits. | Below is the the instruction that describes the task:
### Input:
Validates that the indices have values within range of num_qubits.
### Response:
def _validate_indices(num_qubits: int, indices: List[int]) -> None:
"""Validates that the indices have values within range of num_qubits."""
if any(index < 0 for index in indices):
raise IndexError('Negative index in indices: {}'.format(indices))
if any(index >= num_qubits for index in indices):
raise IndexError('Out of range indices, must be less than number of '
'qubits but was {}'.format(indices)) |
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__))) | Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description. | Below is the the instruction that describes the task:
### Input:
Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
### Response:
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__))) |
def connect(self):
"""Create the low-level AMQP connection to RabbitMQ.
:rtype: pika.adapters.tornado_connection.TornadoConnection
"""
self.set_state(self.STATE_CONNECTING)
self.handle = tornado_connection.TornadoConnection(
self._connection_parameters,
on_open_callback=self.on_open,
on_open_error_callback=self.on_open_error,
stop_ioloop_on_close=False,
custom_ioloop=self.io_loop) | Create the low-level AMQP connection to RabbitMQ.
:rtype: pika.adapters.tornado_connection.TornadoConnection | Below is the the instruction that describes the task:
### Input:
Create the low-level AMQP connection to RabbitMQ.
:rtype: pika.adapters.tornado_connection.TornadoConnection
### Response:
def connect(self):
"""Create the low-level AMQP connection to RabbitMQ.
:rtype: pika.adapters.tornado_connection.TornadoConnection
"""
self.set_state(self.STATE_CONNECTING)
self.handle = tornado_connection.TornadoConnection(
self._connection_parameters,
on_open_callback=self.on_open,
on_open_error_callback=self.on_open_error,
stop_ioloop_on_close=False,
custom_ioloop=self.io_loop) |
def redraw(self, whence=0):
"""Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`.
"""
with self._defer_lock:
whence = min(self._defer_whence, whence)
if not self.defer_redraw:
if self._hold_redraw_cnt == 0:
self._defer_whence = self._defer_whence_reset
self.redraw_now(whence=whence)
else:
self._defer_whence = whence
return
elapsed = time.time() - self.time_last_redraw
# If there is no redraw scheduled, or we are overdue for one:
if (not self._defer_flag) or (elapsed > self.defer_lagtime):
# If more time than defer_lagtime has passed since the
# last redraw then just do the redraw immediately
if elapsed > self.defer_lagtime:
if self._hold_redraw_cnt > 0:
#self._defer_flag = True
self._defer_whence = whence
return
self._defer_whence = self._defer_whence_reset
self.logger.debug("lagtime expired--forced redraw")
self.redraw_now(whence=whence)
return
# Indicate that a redraw is necessary and record whence
self._defer_flag = True
self._defer_whence = whence
# schedule a redraw by the end of the defer_lagtime
secs = self.defer_lagtime - elapsed
self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % (
whence, secs))
self.reschedule_redraw(secs)
else:
# A redraw is already scheduled. Just record whence.
self._defer_whence = whence
self.logger.debug("update whence=%.2f" % (whence)) | Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`. | Below is the the instruction that describes the task:
### Input:
Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`.
### Response:
def redraw(self, whence=0):
"""Redraw the canvas.
Parameters
----------
whence
See :meth:`get_rgb_object`.
"""
with self._defer_lock:
whence = min(self._defer_whence, whence)
if not self.defer_redraw:
if self._hold_redraw_cnt == 0:
self._defer_whence = self._defer_whence_reset
self.redraw_now(whence=whence)
else:
self._defer_whence = whence
return
elapsed = time.time() - self.time_last_redraw
# If there is no redraw scheduled, or we are overdue for one:
if (not self._defer_flag) or (elapsed > self.defer_lagtime):
# If more time than defer_lagtime has passed since the
# last redraw then just do the redraw immediately
if elapsed > self.defer_lagtime:
if self._hold_redraw_cnt > 0:
#self._defer_flag = True
self._defer_whence = whence
return
self._defer_whence = self._defer_whence_reset
self.logger.debug("lagtime expired--forced redraw")
self.redraw_now(whence=whence)
return
# Indicate that a redraw is necessary and record whence
self._defer_flag = True
self._defer_whence = whence
# schedule a redraw by the end of the defer_lagtime
secs = self.defer_lagtime - elapsed
self.logger.debug("defer redraw (whence=%.2f) in %.f sec" % (
whence, secs))
self.reschedule_redraw(secs)
else:
# A redraw is already scheduled. Just record whence.
self._defer_whence = whence
self.logger.debug("update whence=%.2f" % (whence)) |
def split_camel(word: str) -> str:
"""
Separate any words joined in Camel case fashion using a single space.
>>> split_camel('esseCarthaginienses')
'esse Carthaginienses'
>>> split_camel('urbemCertimam')
'urbem Certimam'
"""
m = re.match('[a-z]+[A-Z][a-z]', word)
if m:
_, end = m.span()
return word[:end - 2] + ' ' + word[end - 2:]
return word | Separate any words joined in Camel case fashion using a single space.
>>> split_camel('esseCarthaginienses')
'esse Carthaginienses'
>>> split_camel('urbemCertimam')
'urbem Certimam' | Below is the the instruction that describes the task:
### Input:
Separate any words joined in Camel case fashion using a single space.
>>> split_camel('esseCarthaginienses')
'esse Carthaginienses'
>>> split_camel('urbemCertimam')
'urbem Certimam'
### Response:
def split_camel(word: str) -> str:
"""
Separate any words joined in Camel case fashion using a single space.
>>> split_camel('esseCarthaginienses')
'esse Carthaginienses'
>>> split_camel('urbemCertimam')
'urbem Certimam'
"""
m = re.match('[a-z]+[A-Z][a-z]', word)
if m:
_, end = m.span()
return word[:end - 2] + ' ' + word[end - 2:]
return word |
def namespaced_query_string(d, prefix=""):
"""Transform a nested dict into a string with namespaced query params.
>>> namespaced_query_string({
... 'oauth_token': 'foo',
... 'track': {'title': 'bar', 'sharing': 'private'}}) == {
... 'track[sharing]': 'private',
... 'oauth_token': 'foo',
... 'track[title]': 'bar'} # doctest:+ELLIPSIS
True
"""
qs = {}
prefixed = lambda k: prefix and "%s[%s]" % (prefix, k) or k
for key, value in six.iteritems(d):
if isinstance(value, dict):
qs.update(namespaced_query_string(value, prefix=key))
else:
qs[prefixed(key)] = value
return qs | Transform a nested dict into a string with namespaced query params.
>>> namespaced_query_string({
... 'oauth_token': 'foo',
... 'track': {'title': 'bar', 'sharing': 'private'}}) == {
... 'track[sharing]': 'private',
... 'oauth_token': 'foo',
... 'track[title]': 'bar'} # doctest:+ELLIPSIS
True | Below is the the instruction that describes the task:
### Input:
Transform a nested dict into a string with namespaced query params.
>>> namespaced_query_string({
... 'oauth_token': 'foo',
... 'track': {'title': 'bar', 'sharing': 'private'}}) == {
... 'track[sharing]': 'private',
... 'oauth_token': 'foo',
... 'track[title]': 'bar'} # doctest:+ELLIPSIS
True
### Response:
def namespaced_query_string(d, prefix=""):
"""Transform a nested dict into a string with namespaced query params.
>>> namespaced_query_string({
... 'oauth_token': 'foo',
... 'track': {'title': 'bar', 'sharing': 'private'}}) == {
... 'track[sharing]': 'private',
... 'oauth_token': 'foo',
... 'track[title]': 'bar'} # doctest:+ELLIPSIS
True
"""
qs = {}
prefixed = lambda k: prefix and "%s[%s]" % (prefix, k) or k
for key, value in six.iteritems(d):
if isinstance(value, dict):
qs.update(namespaced_query_string(value, prefix=key))
else:
qs[prefixed(key)] = value
return qs |
def setAccessPolicyResponse(
self, pid, accessPolicy, serialVersion, vendorSpecific=None
):
"""CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) →
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'serialVersion': str(serialVersion),
'accessPolicy': ('accessPolicy.xml', accessPolicy.toxml('utf-8')),
}
return self.PUT(['accessRules', pid], fields=mmp_dict, headers=vendorSpecific) | CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) →
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns: | Below is the the instruction that describes the task:
### Input:
CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) →
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns:
### Response:
def setAccessPolicyResponse(
self, pid, accessPolicy, serialVersion, vendorSpecific=None
):
"""CNAuthorization.setAccessPolicy(session, pid, accessPolicy, serialVersion) →
boolean https://releases.dataone.org/online/api-
documentation-v2.0.1/apis/CN_APIs.html#CNAuthorization.setAccessPolicy.
Args:
pid:
accessPolicy:
serialVersion:
vendorSpecific:
Returns:
"""
mmp_dict = {
'serialVersion': str(serialVersion),
'accessPolicy': ('accessPolicy.xml', accessPolicy.toxml('utf-8')),
}
return self.PUT(['accessRules', pid], fields=mmp_dict, headers=vendorSpecific) |
def _validate_frow(self, frow):
"""Validate frow argument."""
is_int = isinstance(frow, int) and (not isinstance(frow, bool))
pexdoc.exh.addai("frow", not (is_int and (frow >= 0)))
return frow | Validate frow argument. | Below is the the instruction that describes the task:
### Input:
Validate frow argument.
### Response:
def _validate_frow(self, frow):
"""Validate frow argument."""
is_int = isinstance(frow, int) and (not isinstance(frow, bool))
pexdoc.exh.addai("frow", not (is_int and (frow >= 0)))
return frow |
def _GetClientLib(service_class_names, language, output_path, build_system,
hostname=None, application_path=None):
"""Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries.
"""
client_libs = []
service_configs = GenApiConfig(
service_class_names, hostname=hostname,
config_string_generator=discovery_generator.DiscoveryGenerator(),
application_path=application_path)
for api_name_version, config in service_configs.iteritems():
client_name = api_name_version + '.zip'
client_libs.append(
_GenClientLibFromContents(config, language, output_path,
build_system, client_name))
return client_libs | Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries. | Below is the the instruction that describes the task:
### Input:
Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries.
### Response:
def _GetClientLib(service_class_names, language, output_path, build_system,
hostname=None, application_path=None):
"""Fetch client libraries from a cloud service.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
language: The client library language to generate. (java)
output_path: The directory to output the discovery docs to.
build_system: The target build system for the client library language.
hostname: A string hostname which will be used as the default version
hostname. If no hostname is specificied in the @endpoints.api decorator,
this value is the fallback. Defaults to None.
application_path: A string containing the path to the AppEngine app.
Returns:
A list of paths to client libraries.
"""
client_libs = []
service_configs = GenApiConfig(
service_class_names, hostname=hostname,
config_string_generator=discovery_generator.DiscoveryGenerator(),
application_path=application_path)
for api_name_version, config in service_configs.iteritems():
client_name = api_name_version + '.zip'
client_libs.append(
_GenClientLibFromContents(config, language, output_path,
build_system, client_name))
return client_libs |
def write(filename, headers, dcols, data, headerlines=[],
header_char='H', sldir='.', sep=' ', trajectory=False,
download=False):
'''
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
'''
if sldir.endswith(os.sep):
filename = str(sldir)+str(filename)
else:
filename = str(sldir)+os.sep+str(filename)
tmp=[] #temp variable
lines=[]#list of the data lines
lengthList=[]# list of the longest element (data or column name)
# in each column
if os.path.exists(filename):
print('Warning this method will overwrite '+ filename)
print('Would you like to continue? (y)es or (n)no?')
s = input('--> ')
if s=='Y' or s=='y' or s=='Yes' or s=='yes':
print('Yes selected')
print('Continuing as normal')
else:
print('No Selected')
print('Returning None')
return None
if len(data)!=len(dcols):
print('The number of data columns does not equal the number of Data attributes')
print('returning none')
return None
if trajectory:
sep=' '
for i in range(len(headers)):
if not trajectory:
tmp.append(header_char+' '+headers[i]+'\n')
else:
tmp.append(headers[i]+'\n')
headers=tmp
tmp=''
for i in range(len(data)): #Line length stuff
length=len(dcols[i])
for j in range(len(data[dcols[i]])): #len(data[i]) throws error as type(data)=dict, not list
if len(str(data[dcols[i]][j]))>length: #data[i][j] throws error as type(data)=dict, not list
length=len(str(data[dcols[i]][j]))
lengthList.append(length)
print(lengthList)
tmp=''
tmp1='9'
if trajectory:
tmp='#'
for i in range(len(dcols)):
tmp1=dcols[i]
if not trajectory:
if len(dcols[i]) < lengthList[i]:
j=lengthList[i]-len(dcols[i])
for k in range(j):
tmp1+=' '
tmp+=sep+tmp1
else:
tmp+=' '+dcols[i]
tmp+='\n'
dcols=tmp
tmp=''
for i in range(len(data[0])):
for j in range(len(data)):
tmp1=str(data[j][i])
if len(str(data[j][i])) < lengthList[j]:
l=lengthList[j]-len(str(data[j][i]))
for k in range(l):
tmp1+=' '
tmp+=sep+tmp1
lines.append(tmp+'\n')
tmp=''
f=open(filename,'w')
if not trajectory:
for i in range(len(headers)):
f.write(headers[i])
f.write(dcols)
else:
f.write(dcols)
for i in range(len(headerlines)):
f.write('# '+headerlines[i]+'\n')
for i in range(len(headers)):
f.write(headers[i])
for i in range(len(lines)):
f.write(lines[i])
f.close()
if download:
from IPython.display import FileLink, FileLinks
return FileLink(filename)
else:
return None | Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False. | Below is the the instruction that describes the task:
### Input:
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
### Response:
def write(filename, headers, dcols, data, headerlines=[],
header_char='H', sldir='.', sep=' ', trajectory=False,
download=False):
'''
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
'''
if sldir.endswith(os.sep):
filename = str(sldir)+str(filename)
else:
filename = str(sldir)+os.sep+str(filename)
tmp=[] #temp variable
lines=[]#list of the data lines
lengthList=[]# list of the longest element (data or column name)
# in each column
if os.path.exists(filename):
print('Warning this method will overwrite '+ filename)
print('Would you like to continue? (y)es or (n)no?')
s = input('--> ')
if s=='Y' or s=='y' or s=='Yes' or s=='yes':
print('Yes selected')
print('Continuing as normal')
else:
print('No Selected')
print('Returning None')
return None
if len(data)!=len(dcols):
print('The number of data columns does not equal the number of Data attributes')
print('returning none')
return None
if trajectory:
sep=' '
for i in range(len(headers)):
if not trajectory:
tmp.append(header_char+' '+headers[i]+'\n')
else:
tmp.append(headers[i]+'\n')
headers=tmp
tmp=''
for i in range(len(data)): #Line length stuff
length=len(dcols[i])
for j in range(len(data[dcols[i]])): #len(data[i]) throws error as type(data)=dict, not list
if len(str(data[dcols[i]][j]))>length: #data[i][j] throws error as type(data)=dict, not list
length=len(str(data[dcols[i]][j]))
lengthList.append(length)
print(lengthList)
tmp=''
tmp1='9'
if trajectory:
tmp='#'
for i in range(len(dcols)):
tmp1=dcols[i]
if not trajectory:
if len(dcols[i]) < lengthList[i]:
j=lengthList[i]-len(dcols[i])
for k in range(j):
tmp1+=' '
tmp+=sep+tmp1
else:
tmp+=' '+dcols[i]
tmp+='\n'
dcols=tmp
tmp=''
for i in range(len(data[0])):
for j in range(len(data)):
tmp1=str(data[j][i])
if len(str(data[j][i])) < lengthList[j]:
l=lengthList[j]-len(str(data[j][i]))
for k in range(l):
tmp1+=' '
tmp+=sep+tmp1
lines.append(tmp+'\n')
tmp=''
f=open(filename,'w')
if not trajectory:
for i in range(len(headers)):
f.write(headers[i])
f.write(dcols)
else:
f.write(dcols)
for i in range(len(headerlines)):
f.write('# '+headerlines[i]+'\n')
for i in range(len(headers)):
f.write(headers[i])
for i in range(len(lines)):
f.write(lines[i])
f.close()
if download:
from IPython.display import FileLink, FileLinks
return FileLink(filename)
else:
return None |
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
os.unlink(self._file_path)
self.remove_all_properties(True)
self.remove_all_locks(True) | Remove this resource or collection (recursive).
See DAVResource.delete() | Below is the the instruction that describes the task:
### Input:
Remove this resource or collection (recursive).
See DAVResource.delete()
### Response:
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
os.unlink(self._file_path)
self.remove_all_properties(True)
self.remove_all_locks(True) |
def generate_harvestable_catalogs(self, catalogs, harvest='all',
report=None, export_path=None):
"""Filtra los catálogos provistos según el criterio determinado en
`harvest`.
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets conservar de
cada catálogo ('all', 'none', 'valid' o 'report').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`.
export_path (str): Path a un archivo JSON o directorio donde
exportar los catálogos filtrados. Si termina en ".json" se
exportará la lista de catálogos a un único archivo. Si es un
directorio, se guardará en él un JSON por catálogo. Si se
especifica `export_path`, el método no devolverá nada.
Returns:
list of dicts: Lista de catálogos.
"""
assert isinstance(catalogs, string_types + (dict, list))
# Si se pasa un único catálogo, genero una lista que lo contenga
if isinstance(catalogs, string_types + (dict,)):
catalogs = [catalogs]
harvestable_catalogs = [readers.read_catalog(c) for c in catalogs]
catalogs_urls = [catalog if isinstance(catalog, string_types)
else None for catalog in catalogs]
# aplica los criterios de cosecha
if harvest == 'all':
pass
elif harvest == 'none':
for catalog in harvestable_catalogs:
catalog["dataset"] = []
elif harvest == 'valid':
report = self.generate_datasets_report(catalogs, harvest)
return self.generate_harvestable_catalogs(
catalogs=catalogs, harvest='report', report=report,
export_path=export_path)
elif harvest == 'report':
if not report:
raise ValueError("""
Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para
el argumento 'report'. Por favor, intentelo nuevamente.""")
datasets_to_harvest = self._extract_datasets_to_harvest(report)
for idx_cat, catalog in enumerate(harvestable_catalogs):
catalog_url = catalogs_urls[idx_cat]
if ("dataset" in catalog and
isinstance(catalog["dataset"], list)):
catalog["dataset"] = [
dataset for dataset in catalog["dataset"]
if (catalog_url, dataset.get("title")) in
datasets_to_harvest
]
else:
catalog["dataset"] = []
else:
raise ValueError("""
{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o
'report'.""".format(harvest))
# devuelve los catálogos harvesteables
if export_path and os.path.isdir(export_path):
# Creo un JSON por catálogo
for idx, catalog in enumerate(harvestable_catalogs):
filename = os.path.join(export_path, "catalog_{}".format(idx))
writers.write_json(catalog, filename)
elif export_path:
# Creo un único JSON con todos los catálogos
writers.write_json(harvestable_catalogs, export_path)
else:
return harvestable_catalogs | Filtra los catálogos provistos según el criterio determinado en
`harvest`.
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets conservar de
cada catálogo ('all', 'none', 'valid' o 'report').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`.
export_path (str): Path a un archivo JSON o directorio donde
exportar los catálogos filtrados. Si termina en ".json" se
exportará la lista de catálogos a un único archivo. Si es un
directorio, se guardará en él un JSON por catálogo. Si se
especifica `export_path`, el método no devolverá nada.
Returns:
list of dicts: Lista de catálogos. | Below is the the instruction that describes the task:
### Input:
Filtra los catálogos provistos según el criterio determinado en
`harvest`.
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets conservar de
cada catálogo ('all', 'none', 'valid' o 'report').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`.
export_path (str): Path a un archivo JSON o directorio donde
exportar los catálogos filtrados. Si termina en ".json" se
exportará la lista de catálogos a un único archivo. Si es un
directorio, se guardará en él un JSON por catálogo. Si se
especifica `export_path`, el método no devolverá nada.
Returns:
list of dicts: Lista de catálogos.
### Response:
def generate_harvestable_catalogs(self, catalogs, harvest='all',
report=None, export_path=None):
"""Filtra los catálogos provistos según el criterio determinado en
`harvest`.
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets conservar de
cada catálogo ('all', 'none', 'valid' o 'report').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`.
export_path (str): Path a un archivo JSON o directorio donde
exportar los catálogos filtrados. Si termina en ".json" se
exportará la lista de catálogos a un único archivo. Si es un
directorio, se guardará en él un JSON por catálogo. Si se
especifica `export_path`, el método no devolverá nada.
Returns:
list of dicts: Lista de catálogos.
"""
assert isinstance(catalogs, string_types + (dict, list))
# Si se pasa un único catálogo, genero una lista que lo contenga
if isinstance(catalogs, string_types + (dict,)):
catalogs = [catalogs]
harvestable_catalogs = [readers.read_catalog(c) for c in catalogs]
catalogs_urls = [catalog if isinstance(catalog, string_types)
else None for catalog in catalogs]
# aplica los criterios de cosecha
if harvest == 'all':
pass
elif harvest == 'none':
for catalog in harvestable_catalogs:
catalog["dataset"] = []
elif harvest == 'valid':
report = self.generate_datasets_report(catalogs, harvest)
return self.generate_harvestable_catalogs(
catalogs=catalogs, harvest='report', report=report,
export_path=export_path)
elif harvest == 'report':
if not report:
raise ValueError("""
Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para
el argumento 'report'. Por favor, intentelo nuevamente.""")
datasets_to_harvest = self._extract_datasets_to_harvest(report)
for idx_cat, catalog in enumerate(harvestable_catalogs):
catalog_url = catalogs_urls[idx_cat]
if ("dataset" in catalog and
isinstance(catalog["dataset"], list)):
catalog["dataset"] = [
dataset for dataset in catalog["dataset"]
if (catalog_url, dataset.get("title")) in
datasets_to_harvest
]
else:
catalog["dataset"] = []
else:
raise ValueError("""
{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o
'report'.""".format(harvest))
# devuelve los catálogos harvesteables
if export_path and os.path.isdir(export_path):
# Creo un JSON por catálogo
for idx, catalog in enumerate(harvestable_catalogs):
filename = os.path.join(export_path, "catalog_{}".format(idx))
writers.write_json(catalog, filename)
elif export_path:
# Creo un único JSON con todos los catálogos
writers.write_json(harvestable_catalogs, export_path)
else:
return harvestable_catalogs |
def _operator_handling(self, cursor):
"""Returns a string with the literal that are part of the operation."""
values = self._literal_handling(cursor)
retval = ''.join([str(val) for val in values])
return retval | Returns a string with the literal that are part of the operation. | Below is the the instruction that describes the task:
### Input:
Returns a string with the literal that are part of the operation.
### Response:
def _operator_handling(self, cursor):
"""Returns a string with the literal that are part of the operation."""
values = self._literal_handling(cursor)
retval = ''.join([str(val) for val in values])
return retval |
def update(self, equipments):
"""
Method to update equipments
:param equipments: List containing equipments desired to updated
:return: None
"""
data = {'equipments': equipments}
equipments_ids = [str(env.get('id')) for env in equipments]
return super(ApiV4Equipment, self).put('api/v4/equipment/%s/' %
';'.join(equipments_ids), data) | Method to update equipments
:param equipments: List containing equipments desired to updated
:return: None | Below is the the instruction that describes the task:
### Input:
Method to update equipments
:param equipments: List containing equipments desired to updated
:return: None
### Response:
def update(self, equipments):
"""
Method to update equipments
:param equipments: List containing equipments desired to updated
:return: None
"""
data = {'equipments': equipments}
equipments_ids = [str(env.get('id')) for env in equipments]
return super(ApiV4Equipment, self).put('api/v4/equipment/%s/' %
';'.join(equipments_ids), data) |
def ls(self,
directory,
note=None,
loglevel=logging.DEBUG):
"""Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.is_shutit_installed(directory,note=note,loglevel=loglevel) | Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings | Below is the the instruction that describes the task:
### Input:
Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
### Response:
def ls(self,
directory,
note=None,
loglevel=logging.DEBUG):
"""Helper proc to list files in a directory
@param directory: directory to list. If the directory doesn't exist, shutit.fail() is called (i.e. the build fails.)
@param note: See send()
@type directory: string
@rtype: list of strings
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_session = self.get_current_shutit_pexpect_session()
return shutit_pexpect_session.is_shutit_installed(directory,note=note,loglevel=loglevel) |
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
quantiles = np.percentile(nums, quantiles_to_get)
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count) | Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from. | Below is the the instruction that describes the task:
### Input:
Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
### Response:
def _PopulateQuantilesHistogram(self, hist, nums):
"""Fills in the histogram with quantile information from the provided array.
Args:
hist: A Histogram proto message to fill in.
nums: A list of numbers to create a quantiles histogram from.
"""
if not nums:
return
num_quantile_buckets = 10
quantiles_to_get = [
x * 100 / num_quantile_buckets for x in range(num_quantile_buckets + 1)
]
quantiles = np.percentile(nums, quantiles_to_get)
hist.type = self.histogram_proto.QUANTILES
quantiles_sample_count = float(len(nums)) / num_quantile_buckets
for low, high in zip(quantiles, quantiles[1:]):
hist.buckets.add(
low_value=low, high_value=high, sample_count=quantiles_sample_count) |
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass | Run the given function in an independent python interpreter. | Below is the the instruction that describes the task:
### Input:
Run the given function in an independent python interpreter.
### Response:
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass |
def u_string_check(self, original, loc, tokens):
"""Check for Python2-style unicode strings."""
return self.check_strict("Python-2-style unicode string", original, loc, tokens) | Check for Python2-style unicode strings. | Below is the the instruction that describes the task:
### Input:
Check for Python2-style unicode strings.
### Response:
def u_string_check(self, original, loc, tokens):
"""Check for Python2-style unicode strings."""
return self.check_strict("Python-2-style unicode string", original, loc, tokens) |
def policy_assignment_delete(name, scope, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
'''
result = False
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_assignments.delete(
policy_assignment_name=name,
scope=scope
)
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
return result | .. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852 | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
### Response:
def policy_assignment_delete(name, scope, **kwargs):
'''
.. versionadded:: 2019.2.0
Delete a policy assignment.
:param name: The name of the policy assignment to delete.
:param scope: The scope of the policy assignment.
CLI Example:
.. code-block:: bash
salt-call azurearm_resource.policy_assignment_delete testassign \
/subscriptions/bc75htn-a0fhsi-349b-56gh-4fghti-f84852
'''
result = False
polconn = __utils__['azurearm.get_client']('policy', **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_assignments.delete(
policy_assignment_name=name,
scope=scope
)
result = True
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('resource', str(exc), **kwargs)
return result |
def _is_somatic(rec):
"""Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan
"""
if _has_somatic_flag(rec):
return True
if _is_mutect2_somatic(rec):
return True
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "2":
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["somatic", "likelysomatic", "strongsomatic", "samplespecific"]:
return True
epr = rec.INFO.get("EPR", "").split(",")
if epr and all([p == "pass" for p in epr]):
return True
return False | Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan | Below is the the instruction that describes the task:
### Input:
Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan
### Response:
def _is_somatic(rec):
"""Handle somatic classifications from MuTect, MuTect2, VarDict and VarScan
"""
if _has_somatic_flag(rec):
return True
if _is_mutect2_somatic(rec):
return True
ss_flag = rec.INFO.get("SS")
if ss_flag is not None:
if str(ss_flag) == "2":
return True
status_flag = rec.INFO.get("STATUS")
if status_flag is not None:
if str(status_flag).lower() in ["somatic", "likelysomatic", "strongsomatic", "samplespecific"]:
return True
epr = rec.INFO.get("EPR", "").split(",")
if epr and all([p == "pass" for p in epr]):
return True
return False |
def close(self):
"""Commit changes and close the database."""
import sys, os
for store in self.stores:
if hasattr(store, 'save'):
store.save(reimport=False)
path, filename = os.path.split(store._filename)
modname = filename[:-3]
if modname in sys.modules:
del sys.modules[modname]
super().close() | Commit changes and close the database. | Below is the the instruction that describes the task:
### Input:
Commit changes and close the database.
### Response:
def close(self):
"""Commit changes and close the database."""
import sys, os
for store in self.stores:
if hasattr(store, 'save'):
store.save(reimport=False)
path, filename = os.path.split(store._filename)
modname = filename[:-3]
if modname in sys.modules:
del sys.modules[modname]
super().close() |
def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
tmp.append(lines[i])
return tmp | Removes leading empty lines from a list of lines.
:param list[str] lines: The lines. | Below is the the instruction that describes the task:
### Input:
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
### Response:
def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
tmp.append(lines[i])
return tmp |
def range(self, start, stop=None, step=1):
"""
Get the correctly distributed parallel chunks.
This corresponds to using the OpenMP 'static' schedule.
"""
self._assert_active()
if stop is None:
start, stop = 0, start
full_list = range(start, stop, step)
per_worker = len(full_list) // self._num_threads
rem = len(full_list) % self._num_threads
schedule = [
per_worker + 1 if thread_idx < rem else per_worker
for thread_idx in range(self._num_threads)
]
# pylint: disable=undefined-variable
start_idx = _functools.reduce(
lambda x, y: x + y, schedule[: self.thread_num], 0
)
end_idx = start_idx + schedule[self._thread_num]
return full_list[start_idx:end_idx] | Get the correctly distributed parallel chunks.
This corresponds to using the OpenMP 'static' schedule. | Below is the the instruction that describes the task:
### Input:
Get the correctly distributed parallel chunks.
This corresponds to using the OpenMP 'static' schedule.
### Response:
def range(self, start, stop=None, step=1):
"""
Get the correctly distributed parallel chunks.
This corresponds to using the OpenMP 'static' schedule.
"""
self._assert_active()
if stop is None:
start, stop = 0, start
full_list = range(start, stop, step)
per_worker = len(full_list) // self._num_threads
rem = len(full_list) % self._num_threads
schedule = [
per_worker + 1 if thread_idx < rem else per_worker
for thread_idx in range(self._num_threads)
]
# pylint: disable=undefined-variable
start_idx = _functools.reduce(
lambda x, y: x + y, schedule[: self.thread_num], 0
)
end_idx = start_idx + schedule[self._thread_num]
return full_list[start_idx:end_idx] |
def get_isoquant_fields(pqdb=False, poolnames=False):
"""Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet"""
# FIXME when is a None database passed?
if pqdb is None:
return {}
try:
channels_psms = pqdb.get_isoquant_amountpsms_channels()
except OperationalError:
# FIXME what does this catch?
return {}
quantheader, psmsheader = OrderedDict(), OrderedDict()
for chan_name, amnt_psms_name in channels_psms:
quantheader[chan_name] = poolnames
if amnt_psms_name:
psmsheader[amnt_psms_name] = poolnames
quantheader.update(psmsheader)
return quantheader | Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet | Below is the the instruction that describes the task:
### Input:
Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet
### Response:
def get_isoquant_fields(pqdb=False, poolnames=False):
"""Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet"""
# FIXME when is a None database passed?
if pqdb is None:
return {}
try:
channels_psms = pqdb.get_isoquant_amountpsms_channels()
except OperationalError:
# FIXME what does this catch?
return {}
quantheader, psmsheader = OrderedDict(), OrderedDict()
for chan_name, amnt_psms_name in channels_psms:
quantheader[chan_name] = poolnames
if amnt_psms_name:
psmsheader[amnt_psms_name] = poolnames
quantheader.update(psmsheader)
return quantheader |
def p_port_event_generation(self, p):
'''statement : SEND namespace DOUBLECOLON identifier LPAREN parameter_list RPAREN TO expression'''
p[0] = GeneratePortEventNode(port_name=p[2],
action_name=p[4],
parameter_list=p[6],
expression=p[9]) | statement : SEND namespace DOUBLECOLON identifier LPAREN parameter_list RPAREN TO expression | Below is the the instruction that describes the task:
### Input:
statement : SEND namespace DOUBLECOLON identifier LPAREN parameter_list RPAREN TO expression
### Response:
def p_port_event_generation(self, p):
'''statement : SEND namespace DOUBLECOLON identifier LPAREN parameter_list RPAREN TO expression'''
p[0] = GeneratePortEventNode(port_name=p[2],
action_name=p[4],
parameter_list=p[6],
expression=p[9]) |
def receiveData(self, connection, data):
"""
Receives some data for the given protocol.
"""
try:
protocol = self._protocols[connection]
except KeyError:
raise NoSuchConnection()
protocol.dataReceived(data)
return {} | Receives some data for the given protocol. | Below is the the instruction that describes the task:
### Input:
Receives some data for the given protocol.
### Response:
def receiveData(self, connection, data):
"""
Receives some data for the given protocol.
"""
try:
protocol = self._protocols[connection]
except KeyError:
raise NoSuchConnection()
protocol.dataReceived(data)
return {} |
def setup_parser():
"""Setup an ArgumentParser."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=5005)
parser.add_argument('-i', '--interval', type=int, default=480)
parser.add_argument('host', type=str, help='hostname')
return parser | Setup an ArgumentParser. | Below is the the instruction that describes the task:
### Input:
Setup an ArgumentParser.
### Response:
def setup_parser():
"""Setup an ArgumentParser."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=5005)
parser.add_argument('-i', '--interval', type=int, default=480)
parser.add_argument('host', type=str, help='hostname')
return parser |
def remove_obsolete_items(self):
"""Removing obsolete items"""
self.rdata = [(filename, data) for filename, data in self.rdata
if is_module_or_package(filename)] | Removing obsolete items | Below is the the instruction that describes the task:
### Input:
Removing obsolete items
### Response:
def remove_obsolete_items(self):
"""Removing obsolete items"""
self.rdata = [(filename, data) for filename, data in self.rdata
if is_module_or_package(filename)] |
def validate_config(self):
'''validate_config
High-level api: Validate config against models. ConfigError is raised
if the config has issues.
Returns
-------
None
There is no return of this method.
Raises
------
ConfigError
If config contains error.
'''
self.roots
for child in self.ele.getchildren():
self._validate_node(child) | validate_config
High-level api: Validate config against models. ConfigError is raised
if the config has issues.
Returns
-------
None
There is no return of this method.
Raises
------
ConfigError
If config contains error. | Below is the the instruction that describes the task:
### Input:
validate_config
High-level api: Validate config against models. ConfigError is raised
if the config has issues.
Returns
-------
None
There is no return of this method.
Raises
------
ConfigError
If config contains error.
### Response:
def validate_config(self):
'''validate_config
High-level api: Validate config against models. ConfigError is raised
if the config has issues.
Returns
-------
None
There is no return of this method.
Raises
------
ConfigError
If config contains error.
'''
self.roots
for child in self.ele.getchildren():
self._validate_node(child) |
def access_vlan(self, inter_type, inter, vlan_id):
"""
Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport = ET.SubElement(int_type, 'switchport')
access = ET.SubElement(switchport, 'access')
accessvlan = ET.SubElement(access, 'accessvlan')
accessvlan.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False | Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None | Below is the the instruction that describes the task:
### Input:
Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
### Response:
def access_vlan(self, inter_type, inter, vlan_id):
"""
Add a L2 Interface to a specific VLAN.
Args:
inter_type: The type of interface you want to configure. Ex.
tengigabitethernet, gigabitethernet, fortygigabitethernet.
inter: The ID for the interface you want to configure. Ex. 1/0/1
vlan_id: ID for the VLAN interface being modified. Value of 2-4096.
Returns:
True if command completes successfully or False if not.
Raises:
None
"""
config = ET.Element('config')
interface = ET.SubElement(config, 'interface',
xmlns=("urn:brocade.com:mgmt:"
"brocade-interface"))
int_type = ET.SubElement(interface, inter_type)
name = ET.SubElement(int_type, 'name')
name.text = inter
switchport = ET.SubElement(int_type, 'switchport')
access = ET.SubElement(switchport, 'access')
accessvlan = ET.SubElement(access, 'accessvlan')
accessvlan.text = vlan_id
try:
self._callback(config)
return True
# TODO add logging and narrow exception window.
except Exception as error:
logging.error(error)
return False |
def raise_unresolved_targets(build_context, conf, unknown_seeds, seed_refs):
"""Raise error about unresolved targets during graph parsing."""
def format_target(target_name):
# TODO: suggest similar known target names
build_module = split_build_module(target_name)
return '{} (in {})'.format(target_name,
conf.get_build_file_path(build_module))
def format_unresolved(seed):
if seed not in seed_refs:
return seed
seed_ref = seed_refs[seed]
reasons = []
if seed_ref.on_cli:
reasons.append('seen on command line')
if seed_ref.from_default:
reasons.append('specified as default target in {}'
.format(conf.get_project_build_file))
if seed_ref.dep_of:
reasons.append(
'dependency of ' +
', '.join(format_target(target_name)
for target_name in sorted(seed_ref.dep_of)))
if seed_ref.buildenv_of:
reasons.append(
'buildenv of ' +
', '.join(format_target(target_name)
for target_name in sorted(seed_ref.buildenv_of)))
return '{} - {}'.format(seed, ', '.join(reasons))
unresolved_str = '\n'.join(format_unresolved(target_name)
for target_name in sorted(unknown_seeds))
num_target_str = '{} target'.format(len(unknown_seeds))
if len(unknown_seeds) > 1:
num_target_str += 's'
raise ValueError('Could not resolve {}:\n{}'
.format(num_target_str, unresolved_str)) | Raise error about unresolved targets during graph parsing. | Below is the the instruction that describes the task:
### Input:
Raise error about unresolved targets during graph parsing.
### Response:
def raise_unresolved_targets(build_context, conf, unknown_seeds, seed_refs):
"""Raise error about unresolved targets during graph parsing."""
def format_target(target_name):
# TODO: suggest similar known target names
build_module = split_build_module(target_name)
return '{} (in {})'.format(target_name,
conf.get_build_file_path(build_module))
def format_unresolved(seed):
if seed not in seed_refs:
return seed
seed_ref = seed_refs[seed]
reasons = []
if seed_ref.on_cli:
reasons.append('seen on command line')
if seed_ref.from_default:
reasons.append('specified as default target in {}'
.format(conf.get_project_build_file))
if seed_ref.dep_of:
reasons.append(
'dependency of ' +
', '.join(format_target(target_name)
for target_name in sorted(seed_ref.dep_of)))
if seed_ref.buildenv_of:
reasons.append(
'buildenv of ' +
', '.join(format_target(target_name)
for target_name in sorted(seed_ref.buildenv_of)))
return '{} - {}'.format(seed, ', '.join(reasons))
unresolved_str = '\n'.join(format_unresolved(target_name)
for target_name in sorted(unknown_seeds))
num_target_str = '{} target'.format(len(unknown_seeds))
if len(unknown_seeds) > 1:
num_target_str += 's'
raise ValueError('Could not resolve {}:\n{}'
.format(num_target_str, unresolved_str)) |
def bovy_end_print(filename,**kwargs):
"""
NAME:
bovy_end_print
PURPOSE:
saves the current figure(s) to filename
INPUT:
filename - filename for plot (with extension)
OPTIONAL INPUTS:
format - file-format
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
"""
if 'format' in kwargs:
pyplot.savefig(filename,**kwargs)
else:
pyplot.savefig(filename,format=re.split(r'\.',filename)[-1],**kwargs)
pyplot.close() | NAME:
bovy_end_print
PURPOSE:
saves the current figure(s) to filename
INPUT:
filename - filename for plot (with extension)
OPTIONAL INPUTS:
format - file-format
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
bovy_end_print
PURPOSE:
saves the current figure(s) to filename
INPUT:
filename - filename for plot (with extension)
OPTIONAL INPUTS:
format - file-format
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
### Response:
def bovy_end_print(filename,**kwargs):
"""
NAME:
bovy_end_print
PURPOSE:
saves the current figure(s) to filename
INPUT:
filename - filename for plot (with extension)
OPTIONAL INPUTS:
format - file-format
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
"""
if 'format' in kwargs:
pyplot.savefig(filename,**kwargs)
else:
pyplot.savefig(filename,format=re.split(r'\.',filename)[-1],**kwargs)
pyplot.close() |
def mnest_basename(self):
"""Full path to basename
"""
if not hasattr(self, '_mnest_basename'):
s = self.labelstring
if s=='0_0':
s = 'single'
elif s=='0_0-0_1':
s = 'binary'
elif s=='0_0-0_1-0_2':
s = 'triple'
s = '{}-{}'.format(self.ic.name, s)
self._mnest_basename = os.path.join('chains', s+'-')
if os.path.isabs(self._mnest_basename):
return self._mnest_basename
else:
return os.path.join(self.directory, self._mnest_basename) | Full path to basename | Below is the the instruction that describes the task:
### Input:
Full path to basename
### Response:
def mnest_basename(self):
"""Full path to basename
"""
if not hasattr(self, '_mnest_basename'):
s = self.labelstring
if s=='0_0':
s = 'single'
elif s=='0_0-0_1':
s = 'binary'
elif s=='0_0-0_1-0_2':
s = 'triple'
s = '{}-{}'.format(self.ic.name, s)
self._mnest_basename = os.path.join('chains', s+'-')
if os.path.isabs(self._mnest_basename):
return self._mnest_basename
else:
return os.path.join(self.directory, self._mnest_basename) |
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert | Search through keys | Below is the the instruction that describes the task:
### Input:
Search through keys
### Response:
def lookup_cert(self, key):
"""
Search through keys
"""
if not self.consumers:
log.critical(("No consumers defined in settings."
"Have you created a configuration file?"))
return None
consumer = self.consumers.get(key)
if not consumer:
log.info("Did not find consumer, using key: %s ", key)
return None
cert = consumer.get('cert', None)
return cert |
def _iostat_fbsd(interval, count, disks):
'''
Tested on FreeBSD, quite likely other BSD's only need small changes in cmd syntax
'''
if disks is None:
iostat_cmd = 'iostat -xC -w {0} -c {1} '.format(interval, count)
elif isinstance(disks, six.string_types):
iostat_cmd = 'iostat -x -w {0} -c {1} {2}'.format(interval, count, disks)
else:
iostat_cmd = 'iostat -x -w {0} -c {1} {2}'.format(interval, count, ' '.join(disks))
sys_stats = []
dev_stats = collections.defaultdict(list)
sys_header = []
dev_header = []
h_len = 1000 # randomly absurdly high
ret = iter(__salt__['cmd.run_stdout'](iostat_cmd, output_loglevel='quiet').splitlines())
for line in ret:
if not line.startswith('device'):
continue
elif not dev_header:
dev_header = line.split()[1:]
while line is not False:
line = next(ret, False)
if not line or not line[0].isalnum():
break
line = line.split()
disk = line[0]
stats = [decimal.Decimal(x) for x in line[1:]]
# h_len will become smallest number of fields in stat lines
if len(stats) < h_len:
h_len = len(stats)
dev_stats[disk].append(stats)
iostats = {}
# The header was longer than the smallest number of fields
# Therefore the sys stats are hidden in there
if h_len < len(dev_header):
sys_header = dev_header[h_len:]
dev_header = dev_header[0:h_len]
for disk, stats in dev_stats.items():
if len(stats[0]) > h_len:
sys_stats = [stat[h_len:] for stat in stats]
dev_stats[disk] = [stat[0:h_len] for stat in stats]
iostats['sys'] = _iostats_dict(sys_header, sys_stats)
for disk, stats in dev_stats.items():
iostats[disk] = _iostats_dict(dev_header, stats)
return iostats | Tested on FreeBSD, quite likely other BSD's only need small changes in cmd syntax | Below is the the instruction that describes the task:
### Input:
Tested on FreeBSD, quite likely other BSD's only need small changes in cmd syntax
### Response:
def _iostat_fbsd(interval, count, disks):
'''
Tested on FreeBSD, quite likely other BSD's only need small changes in cmd syntax
'''
if disks is None:
iostat_cmd = 'iostat -xC -w {0} -c {1} '.format(interval, count)
elif isinstance(disks, six.string_types):
iostat_cmd = 'iostat -x -w {0} -c {1} {2}'.format(interval, count, disks)
else:
iostat_cmd = 'iostat -x -w {0} -c {1} {2}'.format(interval, count, ' '.join(disks))
sys_stats = []
dev_stats = collections.defaultdict(list)
sys_header = []
dev_header = []
h_len = 1000 # randomly absurdly high
ret = iter(__salt__['cmd.run_stdout'](iostat_cmd, output_loglevel='quiet').splitlines())
for line in ret:
if not line.startswith('device'):
continue
elif not dev_header:
dev_header = line.split()[1:]
while line is not False:
line = next(ret, False)
if not line or not line[0].isalnum():
break
line = line.split()
disk = line[0]
stats = [decimal.Decimal(x) for x in line[1:]]
# h_len will become smallest number of fields in stat lines
if len(stats) < h_len:
h_len = len(stats)
dev_stats[disk].append(stats)
iostats = {}
# The header was longer than the smallest number of fields
# Therefore the sys stats are hidden in there
if h_len < len(dev_header):
sys_header = dev_header[h_len:]
dev_header = dev_header[0:h_len]
for disk, stats in dev_stats.items():
if len(stats[0]) > h_len:
sys_stats = [stat[h_len:] for stat in stats]
dev_stats[disk] = [stat[0:h_len] for stat in stats]
iostats['sys'] = _iostats_dict(sys_header, sys_stats)
for disk, stats in dev_stats.items():
iostats[disk] = _iostats_dict(dev_header, stats)
return iostats |
def create(self, workflow_id, email_id, data):
"""
Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
}
"""
self.workflow_id = workflow_id
self.email_id = email_id
if 'email_address' not in data:
raise KeyError('The automation email queue must have an email_address')
check_email(data['email_address'])
response = self._mc_client._post(
url=self._build_path(workflow_id, 'emails', email_id, 'queue'),
data=data
)
if response is not None:
self.subscriber_hash = response['id']
else:
self.subscriber_hash = None
return response | Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
} | Below is the the instruction that describes the task:
### Input:
Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
}
### Response:
def create(self, workflow_id, email_id, data):
"""
Manually add a subscriber to a workflow, bypassing the default trigger
settings. You can also use this endpoint to trigger a series of
automated emails in an API 3.0 workflow type or add subscribers to an
automated email queue that uses the API request delay type.
:param workflow_id: The unique id for the Automation workflow.
:type workflow_id: :py:class:`str`
:param email_id: The unique id for the Automation workflow email.
:type email_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"email_address": string*
}
"""
self.workflow_id = workflow_id
self.email_id = email_id
if 'email_address' not in data:
raise KeyError('The automation email queue must have an email_address')
check_email(data['email_address'])
response = self._mc_client._post(
url=self._build_path(workflow_id, 'emails', email_id, 'queue'),
data=data
)
if response is not None:
self.subscriber_hash = response['id']
else:
self.subscriber_hash = None
return response |
def rUpdate(original, updates):
"""Recursively updates the values in original with the values from updates."""
# Keep a list of the sub-dictionaries that need to be updated to avoid having
# to use recursion (which could fail for dictionaries with a lot of nesting.
dictPairs = [(original, updates)]
while len(dictPairs) > 0:
original, updates = dictPairs.pop()
for k, v in updates.iteritems():
if k in original and isinstance(original[k], dict) and isinstance(v, dict):
dictPairs.append((original[k], v))
else:
original[k] = v | Recursively updates the values in original with the values from updates. | Below is the the instruction that describes the task:
### Input:
Recursively updates the values in original with the values from updates.
### Response:
def rUpdate(original, updates):
"""Recursively updates the values in original with the values from updates."""
# Keep a list of the sub-dictionaries that need to be updated to avoid having
# to use recursion (which could fail for dictionaries with a lot of nesting.
dictPairs = [(original, updates)]
while len(dictPairs) > 0:
original, updates = dictPairs.pop()
for k, v in updates.iteritems():
if k in original and isinstance(original[k], dict) and isinstance(v, dict):
dictPairs.append((original[k], v))
else:
original[k] = v |
def influx_init(self, url, port, user, pwd, db):
"""
Initialize an Influxdb database client
"""
try:
cli = DataFrameClient(url, port, user, pwd, db)
self.influx_cli = cli
except Exception as e:
self.err(e, self.influx_init,
"Can not initialize Influxdb database") | Initialize an Influxdb database client | Below is the the instruction that describes the task:
### Input:
Initialize an Influxdb database client
### Response:
def influx_init(self, url, port, user, pwd, db):
"""
Initialize an Influxdb database client
"""
try:
cli = DataFrameClient(url, port, user, pwd, db)
self.influx_cli = cli
except Exception as e:
self.err(e, self.influx_init,
"Can not initialize Influxdb database") |
def _get_kwsdag(self, goids, go2obj, **kws_all):
"""Get keyword args for a GoSubDag."""
kws_dag = {}
# Term Counts for GO Term information score
tcntobj = self._get_tcntobj(goids, go2obj, **kws_all) # TermCounts or None
if tcntobj is not None:
kws_dag['tcntobj'] = tcntobj
# GO letters specified by the user
if 'go_aliases' in kws_all:
fin_go_aliases = kws_all['go_aliases']
if os.path.exists(fin_go_aliases):
go2letter = read_d1_letter(fin_go_aliases)
if go2letter:
kws_dag['go2letter'] = go2letter
return kws_dag | Get keyword args for a GoSubDag. | Below is the the instruction that describes the task:
### Input:
Get keyword args for a GoSubDag.
### Response:
def _get_kwsdag(self, goids, go2obj, **kws_all):
"""Get keyword args for a GoSubDag."""
kws_dag = {}
# Term Counts for GO Term information score
tcntobj = self._get_tcntobj(goids, go2obj, **kws_all) # TermCounts or None
if tcntobj is not None:
kws_dag['tcntobj'] = tcntobj
# GO letters specified by the user
if 'go_aliases' in kws_all:
fin_go_aliases = kws_all['go_aliases']
if os.path.exists(fin_go_aliases):
go2letter = read_d1_letter(fin_go_aliases)
if go2letter:
kws_dag['go2letter'] = go2letter
return kws_dag |
def load(self, **kwargs):
"""Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
"""
kwargs['transform_name'] = True
kwargs = self._mutate_name(kwargs)
return self._load(**kwargs) | Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
### Response:
def load(self, **kwargs):
"""Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
"""
kwargs['transform_name'] = True
kwargs = self._mutate_name(kwargs)
return self._load(**kwargs) |
def show_category(self, category, args):
"""Show short help for all commands in `category'."""
n2cmd = self.proc.commands
names = list(n2cmd.keys())
if len(args) == 1 and args[0] == '*':
self.section("Commands in class %s:" % category)
cmds = [cmd for cmd in names if category == n2cmd[cmd].category]
cmds.sort()
self.msg_nocr(self.columnize_commands(cmds))
return
self.msg("%s.\n" % categories[category])
self.section("List of commands:")
names.sort()
for name in names: # Foo! iteritems() doesn't do sorting
if category != n2cmd[name].category: continue
self.msg("%-13s -- %s" % (name, n2cmd[name].short_help,))
pass
return | Show short help for all commands in `category'. | Below is the the instruction that describes the task:
### Input:
Show short help for all commands in `category'.
### Response:
def show_category(self, category, args):
"""Show short help for all commands in `category'."""
n2cmd = self.proc.commands
names = list(n2cmd.keys())
if len(args) == 1 and args[0] == '*':
self.section("Commands in class %s:" % category)
cmds = [cmd for cmd in names if category == n2cmd[cmd].category]
cmds.sort()
self.msg_nocr(self.columnize_commands(cmds))
return
self.msg("%s.\n" % categories[category])
self.section("List of commands:")
names.sort()
for name in names: # Foo! iteritems() doesn't do sorting
if category != n2cmd[name].category: continue
self.msg("%-13s -- %s" % (name, n2cmd[name].short_help,))
pass
return |
def process_raw_file(self, raw_file_name, field_names):
"""
takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order).
"""
#num_outouts = 0
dist_vals = []
group_dat = []
events = []
#facts = []
with open(raw_file_name) as csvfile:
reader = csv.DictReader(csvfile, fieldnames = field_names)
for num_lines, row in enumerate(reader):
#print('row = =',row)
for col_num, fld in enumerate(field_names):
try:
#print('self.maps[', col_num, '] = ', self.maps[col_num])
if self.maps[col_num].val == 'group_distinct':
group_dat.append(str(row[fld]))
elif self.maps[col_num].val == 'event_date':
events.append(str(row[fld]))
except Exception as ex:
print('parsing error - shouldnt really be splitting using a comma anyway!', str(ex))
dist_vals = sorted(list(set(group_dat)))
return num_lines, dist_vals, group_dat, sorted(list(set(events))) | takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order). | Below is the the instruction that describes the task:
### Input:
takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order).
### Response:
def process_raw_file(self, raw_file_name, field_names):
"""
takes the filename to be read and uses the maps setup
on class instantiation to process the file.
This is a top level function and uses self.maps which
should be the column descriptions (in order).
"""
#num_outouts = 0
dist_vals = []
group_dat = []
events = []
#facts = []
with open(raw_file_name) as csvfile:
reader = csv.DictReader(csvfile, fieldnames = field_names)
for num_lines, row in enumerate(reader):
#print('row = =',row)
for col_num, fld in enumerate(field_names):
try:
#print('self.maps[', col_num, '] = ', self.maps[col_num])
if self.maps[col_num].val == 'group_distinct':
group_dat.append(str(row[fld]))
elif self.maps[col_num].val == 'event_date':
events.append(str(row[fld]))
except Exception as ex:
print('parsing error - shouldnt really be splitting using a comma anyway!', str(ex))
dist_vals = sorted(list(set(group_dat)))
return num_lines, dist_vals, group_dat, sorted(list(set(events))) |
def runCommandSplits(splits, silent=False, shell=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
splits, stdout=devnull, stderr=devnull, shell=shell)
else:
subprocess.check_call(splits, shell=shell)
except OSError as exception:
if exception.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise | Run a shell command given the command's parsed command line | Below is the the instruction that describes the task:
### Input:
Run a shell command given the command's parsed command line
### Response:
def runCommandSplits(splits, silent=False, shell=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
splits, stdout=devnull, stderr=devnull, shell=shell)
else:
subprocess.check_call(splits, shell=shell)
except OSError as exception:
if exception.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise |
def right(self):
"""
Entry is right sibling of current directory entry
"""
return self.source.directory[self.right_sibling_id] \
if self.right_sibling_id != NOSTREAM else None | Entry is right sibling of current directory entry | Below is the the instruction that describes the task:
### Input:
Entry is right sibling of current directory entry
### Response:
def right(self):
"""
Entry is right sibling of current directory entry
"""
return self.source.directory[self.right_sibling_id] \
if self.right_sibling_id != NOSTREAM else None |
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None | Removes a temporary copy of the MAGICC version shipped with Pymagicc. | Below is the the instruction that describes the task:
### Input:
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
### Response:
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None |
def on_mouse_move(self, event):
"""Mouse move handler
Parameters
----------
event : instance of Event
The event.
"""
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
dxy = self.canvas_tr.map(dxy)
o = self.canvas_tr.map([0, 0])
t = dxy - o
self.move(t)
elif button == 2:
center = self.canvas_tr.map(event.press_event.pos)
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
self.shader_map() | Mouse move handler
Parameters
----------
event : instance of Event
The event. | Below is the the instruction that describes the task:
### Input:
Mouse move handler
Parameters
----------
event : instance of Event
The event.
### Response:
def on_mouse_move(self, event):
"""Mouse move handler
Parameters
----------
event : instance of Event
The event.
"""
if event.is_dragging:
dxy = event.pos - event.last_event.pos
button = event.press_event.button
if button == 1:
dxy = self.canvas_tr.map(dxy)
o = self.canvas_tr.map([0, 0])
t = dxy - o
self.move(t)
elif button == 2:
center = self.canvas_tr.map(event.press_event.pos)
if self._aspect is None:
self.zoom(np.exp(dxy * (0.01, -0.01)), center)
else:
s = dxy[1] * -0.01
self.zoom(np.exp(np.array([s, s])), center)
self.shader_map() |
def _reverse_to_source(self, target, group1):
"""
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
"""
new_group = []
for dst in group1:
new_group += target[dst]
return set(new_group) | Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group | Below is the the instruction that describes the task:
### Input:
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
### Response:
def _reverse_to_source(self, target, group1):
"""
Args:
target (dict): A table containing the reverse transitions for each state
group1 (list): A group of states
Return:
Set: A set of states for which there is a transition with the states of the group
"""
new_group = []
for dst in group1:
new_group += target[dst]
return set(new_group) |
def post(self, url, data=None, **kwargs):
"""Encapsulte requests.post to use this class instance header"""
return requests.post(url, data=data, headers=self.add_headers(**kwargs)) | Encapsulte requests.post to use this class instance header | Below is the the instruction that describes the task:
### Input:
Encapsulte requests.post to use this class instance header
### Response:
def post(self, url, data=None, **kwargs):
"""Encapsulte requests.post to use this class instance header"""
return requests.post(url, data=data, headers=self.add_headers(**kwargs)) |
def so2(df):
"""
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'SO2'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 300u": depassement(df, valeur=300),
"Seuil d'Alerte sur 3H consécutives: 500u": depassement(df, valeur=500),
"Valeur limite pour la santé humaine 24H/A: 350u": depassement(df, valeur=350),
"Valeur limite pour la santé humaine 3J/A: 125u": depassement(df.resample('D', how='mean'), valeur=125),
"Objectif de qualité en moyenne A: 50u": depassement(df.resample('A', how='mean'), valeur=50),
"Protection de la végétation en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
"Protection de la végétation du 01/10 au 31/03: 20u": depassement(
df[(df.index.month <= 3) | (df.index.month >= 10)], valeur=20),
}
return polluant, res | Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement | Below is the the instruction that describes the task:
### Input:
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
### Response:
def so2(df):
"""
Calculs réglementaires pour le dioxyde de soufre
Paramètres:
df: DataFrame contenant les mesures, avec un index temporel
(voir xair.get_mesure)
Retourne:
Une série de résultats dans un DataFrame :
******
unité (u): µg/m3 (microgramme par mètre cube)
Seuil de RI en moyenne H: 300u
Seuil d'Alerte sur 3H consécutives: 500u
Valeur limite pour la santé humaine 24H/A: 350u
Valeur limite pour la santé humaine 3J/A: 125u
Objectif de qualité en moyenne A: 50u
Protection de la végétation en moyenne A: 20u
Protection de la végétation du 01/10 au 31/03: 20u
Les résultats sont donnés en terme d'heure de dépassement
"""
polluant = 'SO2'
# Le DataFrame doit être en heure
if not isinstance(df.index.freq, pdoffset.Hour):
raise FreqException("df doit être en heure.")
res = {"Seuil de RI en moyenne H: 300u": depassement(df, valeur=300),
"Seuil d'Alerte sur 3H consécutives: 500u": depassement(df, valeur=500),
"Valeur limite pour la santé humaine 24H/A: 350u": depassement(df, valeur=350),
"Valeur limite pour la santé humaine 3J/A: 125u": depassement(df.resample('D', how='mean'), valeur=125),
"Objectif de qualité en moyenne A: 50u": depassement(df.resample('A', how='mean'), valeur=50),
"Protection de la végétation en moyenne A: 20u": depassement(df.resample('A', how='mean'), valeur=20),
"Protection de la végétation du 01/10 au 31/03: 20u": depassement(
df[(df.index.month <= 3) | (df.index.month >= 10)], valeur=20),
}
return polluant, res |
def cget(self, k):
"""cget redirect to support additional options"""
if k == "themebg":
return self._themebg
elif k == "toplevel":
return self._toplevel
elif k == "theme":
return self.current_theme
return tk.Tk.cget(self, k) | cget redirect to support additional options | Below is the the instruction that describes the task:
### Input:
cget redirect to support additional options
### Response:
def cget(self, k):
"""cget redirect to support additional options"""
if k == "themebg":
return self._themebg
elif k == "toplevel":
return self._toplevel
elif k == "theme":
return self.current_theme
return tk.Tk.cget(self, k) |
def transform_generator(fn):
"""A decorator that marks transform pipes that should be called to create the real transform"""
if six.PY2:
fn.func_dict['is_transform_generator'] = True
else:
# py3
fn.__dict__['is_transform_generator'] = True
return fn | A decorator that marks transform pipes that should be called to create the real transform | Below is the the instruction that describes the task:
### Input:
A decorator that marks transform pipes that should be called to create the real transform
### Response:
def transform_generator(fn):
"""A decorator that marks transform pipes that should be called to create the real transform"""
if six.PY2:
fn.func_dict['is_transform_generator'] = True
else:
# py3
fn.__dict__['is_transform_generator'] = True
return fn |
def make_url(self, container=None, resource=None, query_items=None):
"""Create a URL from the specified parts."""
pth = [self._base_url]
if container:
pth.append(container.strip('/'))
if resource:
pth.append(resource)
else:
pth.append('')
url = '/'.join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url | Create a URL from the specified parts. | Below is the the instruction that describes the task:
### Input:
Create a URL from the specified parts.
### Response:
def make_url(self, container=None, resource=None, query_items=None):
"""Create a URL from the specified parts."""
pth = [self._base_url]
if container:
pth.append(container.strip('/'))
if resource:
pth.append(resource)
else:
pth.append('')
url = '/'.join(pth)
if isinstance(query_items, (list, tuple, set)):
url += RestHttp._list_query_str(query_items)
query_items = None
p = requests.PreparedRequest()
p.prepare_url(url, query_items)
return p.url |
def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
) | Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports. | Below is the the instruction that describes the task:
### Input:
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
### Response:
def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
) |
def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix) | Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem. | Below is the the instruction that describes the task:
### Input:
Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
### Response:
def makeLabel(self, value):
"""Create a label for the specified value.
Create a label string containing the value and its units (if any),
based on the values of self.step, self.span, and self.unitSystem.
"""
value, prefix = format_units(value, self.step,
system=self.unitSystem)
span, spanPrefix = format_units(self.span, self.step,
system=self.unitSystem)
if prefix:
prefix += " "
if value < 0.1:
return "%g %s" % (float(value), prefix)
elif value < 1.0:
return "%.2f %s" % (float(value), prefix)
if span > 10 or spanPrefix != prefix:
if type(value) is float:
return "%.1f %s" % (value, prefix)
else:
return "%d %s" % (int(value), prefix)
elif span > 3:
return "%.1f %s" % (float(value), prefix)
elif span > 0.1:
return "%.2f %s" % (float(value), prefix)
else:
return "%g %s" % (float(value), prefix) |
def setChartType( self, chartType ):
"""
Sets the chart type for this scene to the inputed type.
:param chartType | <XChartScene.Type>
"""
self._chartType = chartType
self.setDirty()
# setup default options
if ( chartType == XChartScene.Type.Pie ):
self.setShowGrid(False)
self.horizontalRuler().setPadStart(0)
self.horizontalRuler().setPadEnd(0)
elif ( chartType == XChartScene.Type.Bar ):
self.setShowGrid(True)
self.setShowColumns(False)
self.setShowRows(True)
self.horizontalRuler().setPadStart(1)
self.horizontalRuler().setPadEnd(1)
else:
self.setShowGrid(True)
self.setShowColumns(True)
self.setShowRows(True)
self.horizontalRuler().setPadStart(0)
self.horizontalRuler().setPadEnd(0)
if ( not self.signalsBlocked() ):
self.chartTypeChanged.emit() | Sets the chart type for this scene to the inputed type.
:param chartType | <XChartScene.Type> | Below is the the instruction that describes the task:
### Input:
Sets the chart type for this scene to the inputed type.
:param chartType | <XChartScene.Type>
### Response:
def setChartType( self, chartType ):
"""
Sets the chart type for this scene to the inputed type.
:param chartType | <XChartScene.Type>
"""
self._chartType = chartType
self.setDirty()
# setup default options
if ( chartType == XChartScene.Type.Pie ):
self.setShowGrid(False)
self.horizontalRuler().setPadStart(0)
self.horizontalRuler().setPadEnd(0)
elif ( chartType == XChartScene.Type.Bar ):
self.setShowGrid(True)
self.setShowColumns(False)
self.setShowRows(True)
self.horizontalRuler().setPadStart(1)
self.horizontalRuler().setPadEnd(1)
else:
self.setShowGrid(True)
self.setShowColumns(True)
self.setShowRows(True)
self.horizontalRuler().setPadStart(0)
self.horizontalRuler().setPadEnd(0)
if ( not self.signalsBlocked() ):
self.chartTypeChanged.emit() |
def cli(ctx):
""" CLI for maildirs content analysis and deletion. """
level = logger.level
try:
level_to_name = logging._levelToName
# Fallback to pre-Python 3.4 internals.
except AttributeError:
level_to_name = logging._levelNames
level_name = level_to_name.get(level, level)
logger.debug('Verbosity set to {}.'.format(level_name))
# Print help screen and exit if no sub-commands provided.
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
ctx.exit()
# Load up global options to the context.
ctx.obj = {} | CLI for maildirs content analysis and deletion. | Below is the the instruction that describes the task:
### Input:
CLI for maildirs content analysis and deletion.
### Response:
def cli(ctx):
""" CLI for maildirs content analysis and deletion. """
level = logger.level
try:
level_to_name = logging._levelToName
# Fallback to pre-Python 3.4 internals.
except AttributeError:
level_to_name = logging._levelNames
level_name = level_to_name.get(level, level)
logger.debug('Verbosity set to {}.'.format(level_name))
# Print help screen and exit if no sub-commands provided.
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
ctx.exit()
# Load up global options to the context.
ctx.obj = {} |
def getSampleTypeTitles(self):
"""Returns a list of sample type titles
"""
sample_types = self.getSampleTypes()
sample_type_titles = map(lambda obj: obj.Title(), sample_types)
# N.B. This is used only for search purpose, because the catalog does
# not add an entry to the Keywordindex for an empty list.
#
# => This "empty" category allows to search for values with a certain
# sample type set OR with no sample type set.
# (see bika.lims.browser.analysisrequest.add2.get_sampletype_info)
if not sample_type_titles:
return [""]
return sample_type_titles | Returns a list of sample type titles | Below is the the instruction that describes the task:
### Input:
Returns a list of sample type titles
### Response:
def getSampleTypeTitles(self):
"""Returns a list of sample type titles
"""
sample_types = self.getSampleTypes()
sample_type_titles = map(lambda obj: obj.Title(), sample_types)
# N.B. This is used only for search purpose, because the catalog does
# not add an entry to the Keywordindex for an empty list.
#
# => This "empty" category allows to search for values with a certain
# sample type set OR with no sample type set.
# (see bika.lims.browser.analysisrequest.add2.get_sampletype_info)
if not sample_type_titles:
return [""]
return sample_type_titles |
def sum_string(amount, gender, items=None):
"""
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
"""
if isinstance(items, six.text_type):
items = split_values(items)
if items is None:
items = (u"", u"", u"")
try:
one_item, two_items, five_items = items
except ValueError:
raise ValueError("Items must be 3-element sequence")
check_positive(amount)
if amount == 0:
if five_items:
return u"ноль %s" % five_items
else:
return u"ноль"
into = u''
tmp_val = amount
# единицы
into, tmp_val = _sum_string_fn(into, tmp_val, gender, items)
# тысячи
into, tmp_val = _sum_string_fn(into, tmp_val, FEMALE,
(u"тысяча", u"тысячи", u"тысяч"))
# миллионы
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллион", u"миллиона", u"миллионов"))
# миллиарды
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллиард", u"миллиарда", u"миллиардов"))
if tmp_val == 0:
return into
else:
raise ValueError("Cannot operand with numbers bigger than 10**11") | Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative | Below is the the instruction that describes the task:
### Input:
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
### Response:
def sum_string(amount, gender, items=None):
"""
Get sum in words
@param amount: amount of objects
@type amount: C{integer types}
@param gender: gender of object (MALE, FEMALE or NEUTER)
@type gender: C{int}
@param items: variants of object in three forms:
for one object, for two objects and for five objects
@type items: 3-element C{sequence} of C{unicode} or
just C{unicode} (three variants with delimeter ',')
@return: in-words representation objects' amount
@rtype: C{unicode}
@raise ValueError: items isn't 3-element C{sequence} or C{unicode}
@raise ValueError: amount bigger than 10**11
@raise ValueError: amount is negative
"""
if isinstance(items, six.text_type):
items = split_values(items)
if items is None:
items = (u"", u"", u"")
try:
one_item, two_items, five_items = items
except ValueError:
raise ValueError("Items must be 3-element sequence")
check_positive(amount)
if amount == 0:
if five_items:
return u"ноль %s" % five_items
else:
return u"ноль"
into = u''
tmp_val = amount
# единицы
into, tmp_val = _sum_string_fn(into, tmp_val, gender, items)
# тысячи
into, tmp_val = _sum_string_fn(into, tmp_val, FEMALE,
(u"тысяча", u"тысячи", u"тысяч"))
# миллионы
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллион", u"миллиона", u"миллионов"))
# миллиарды
into, tmp_val = _sum_string_fn(into, tmp_val, MALE,
(u"миллиард", u"миллиарда", u"миллиардов"))
if tmp_val == 0:
return into
else:
raise ValueError("Cannot operand with numbers bigger than 10**11") |
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values | Parses a line in LIBSVM format into (label, indices, values). | Below is the the instruction that describes the task:
### Input:
Parses a line in LIBSVM format into (label, indices, values).
### Response:
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values |
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:
"""Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
"""
try:
certs_chain_get = requests.get(signature_chain_url)
except requests.exceptions.ConnectionError as e:
log.error(f'Amazon signature chain get error: {e}')
return None
certs_chain_txt = certs_chain_get.text
certs_chain = extract_certs(certs_chain_txt)
amazon_cert: crypto.X509 = certs_chain.pop(0)
# verify signature chain url
sc_url_verification = verify_sc_url(signature_chain_url)
if not sc_url_verification:
log.error(f'Amazon signature url {signature_chain_url} was not verified')
# verify not expired
expired_verification = not amazon_cert.has_expired()
if not expired_verification:
log.error(f'Amazon certificate ({signature_chain_url}) expired')
# verify subject alternative names
sans_verification = verify_sans(amazon_cert)
if not sans_verification:
log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')
# verify certs chain
chain_verification = verify_certs_chain(certs_chain, amazon_cert)
if not chain_verification:
log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')
result = (sc_url_verification and expired_verification and sans_verification and chain_verification)
return amazon_cert if result else None | Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not. | Below is the the instruction that describes the task:
### Input:
Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
### Response:
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:
"""Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.
Args:
signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.
Returns:
result: Amazon certificate if verification was successful, None if not.
"""
try:
certs_chain_get = requests.get(signature_chain_url)
except requests.exceptions.ConnectionError as e:
log.error(f'Amazon signature chain get error: {e}')
return None
certs_chain_txt = certs_chain_get.text
certs_chain = extract_certs(certs_chain_txt)
amazon_cert: crypto.X509 = certs_chain.pop(0)
# verify signature chain url
sc_url_verification = verify_sc_url(signature_chain_url)
if not sc_url_verification:
log.error(f'Amazon signature url {signature_chain_url} was not verified')
# verify not expired
expired_verification = not amazon_cert.has_expired()
if not expired_verification:
log.error(f'Amazon certificate ({signature_chain_url}) expired')
# verify subject alternative names
sans_verification = verify_sans(amazon_cert)
if not sans_verification:
log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed')
# verify certs chain
chain_verification = verify_certs_chain(certs_chain, amazon_cert)
if not chain_verification:
log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed')
result = (sc_url_verification and expired_verification and sans_verification and chain_verification)
return amazon_cert if result else None |
def list_networks(**kwargs):
'''
List all virtual networks.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_networks
'''
conn = __get_conn(**kwargs)
try:
return [net.name() for net in conn.listAllNetworks()]
finally:
conn.close() | List all virtual networks.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_networks | Below is the the instruction that describes the task:
### Input:
List all virtual networks.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_networks
### Response:
def list_networks(**kwargs):
'''
List all virtual networks.
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.list_networks
'''
conn = __get_conn(**kwargs)
try:
return [net.name() for net in conn.listAllNetworks()]
finally:
conn.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.