code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def load_labeled_events(filename, delimiter=r'\s+'):
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
"""
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels | r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels | Below is the the instruction that describes the task:
### Input:
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
### Response:
def load_labeled_events(filename, delimiter=r'\s+'):
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
"""
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels |
def add_prefix_actions(self, prefix_actions_proxy):
"""
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
"""
prefix_s = "prefix_"
prefix_pos = len(prefix_s)
def is_method(t):
return callable(t[1])
def is_prefix_action(t):
return t[0].startswith(prefix_s)
def drop_prefix(k, w):
return (k[prefix_pos:], w)
members_t = inspect.getmembers(prefix_actions_proxy)
methods_t = filter(is_method, members_t)
prefix_actions_t = filter(is_prefix_action, methods_t)
prefix_actions_d = dict(map(drop_prefix, prefix_actions_t))
for widget in self.get_widgets():
prefixes = gtk.Widget.get_data(widget, "prefixes")
if prefixes:
for prefix in prefixes:
if prefix in prefix_actions_d:
prefix_action = prefix_actions_d[prefix]
prefix_action(widget) | By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc. | Below is the the instruction that describes the task:
### Input:
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
### Response:
def add_prefix_actions(self, prefix_actions_proxy):
"""
By using a gui designer (glade-2, gazpacho, etc)
widgets can have a prefix in theirs names
like foo:entry1 or foo:label3
It means entry1 and label3 has a prefix action named foo.
Then, prefix_actions_proxy must have a method named prefix_foo which
is called everytime a widget with prefix foo is found, using the found widget
as argument.
prefix_actions_proxy:
An instance with methods as prefix actions.
It means it has methods like prefix_foo, prefix_bar, etc.
"""
prefix_s = "prefix_"
prefix_pos = len(prefix_s)
def is_method(t):
return callable(t[1])
def is_prefix_action(t):
return t[0].startswith(prefix_s)
def drop_prefix(k, w):
return (k[prefix_pos:], w)
members_t = inspect.getmembers(prefix_actions_proxy)
methods_t = filter(is_method, members_t)
prefix_actions_t = filter(is_prefix_action, methods_t)
prefix_actions_d = dict(map(drop_prefix, prefix_actions_t))
for widget in self.get_widgets():
prefixes = gtk.Widget.get_data(widget, "prefixes")
if prefixes:
for prefix in prefixes:
if prefix in prefix_actions_d:
prefix_action = prefix_actions_d[prefix]
prefix_action(widget) |
def publish(self, exchange, routing_key, message, properties=None, mandatory=False):
"""
Publish a message to an AMQP exchange.
Parameters
----------
exchange: string
Exchange to publish to.
routing_key: string
Routing key to use for this message.
message: string
Message to publish.
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
mandatory: boolean
If set to True, the mandatory bit will be set on the published message.
"""
return publish_message(self.channel, exchange, routing_key, message, properties, mandatory) | Publish a message to an AMQP exchange.
Parameters
----------
exchange: string
Exchange to publish to.
routing_key: string
Routing key to use for this message.
message: string
Message to publish.
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
mandatory: boolean
If set to True, the mandatory bit will be set on the published message. | Below is the the instruction that describes the task:
### Input:
Publish a message to an AMQP exchange.
Parameters
----------
exchange: string
Exchange to publish to.
routing_key: string
Routing key to use for this message.
message: string
Message to publish.
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
mandatory: boolean
If set to True, the mandatory bit will be set on the published message.
### Response:
def publish(self, exchange, routing_key, message, properties=None, mandatory=False):
"""
Publish a message to an AMQP exchange.
Parameters
----------
exchange: string
Exchange to publish to.
routing_key: string
Routing key to use for this message.
message: string
Message to publish.
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
mandatory: boolean
If set to True, the mandatory bit will be set on the published message.
"""
return publish_message(self.channel, exchange, routing_key, message, properties, mandatory) |
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning) | Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler. | Below is the the instruction that describes the task:
### Input:
Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
### Response:
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning) |
def notify_peer_message(self, message, sender_id):
"""A new message was received from a peer"""
payload = message.SerializeToString()
self._notify(
"consensus_notifier_notify_peer_message",
payload,
len(payload),
sender_id,
len(sender_id)) | A new message was received from a peer | Below is the the instruction that describes the task:
### Input:
A new message was received from a peer
### Response:
def notify_peer_message(self, message, sender_id):
"""A new message was received from a peer"""
payload = message.SerializeToString()
self._notify(
"consensus_notifier_notify_peer_message",
payload,
len(payload),
sender_id,
len(sender_id)) |
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
elb_usage = self._find_usage_elbv1()
alb_usage = self._find_usage_elbv2()
logger.debug('ELBs in use: %d, ALBs in use: %d', elb_usage, alb_usage)
self.limits['Active load balancers']._add_current_usage(
(elb_usage + alb_usage),
aws_type='AWS::ElasticLoadBalancing::LoadBalancer',
)
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. | Below is the the instruction that describes the task:
### Input:
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
### Response:
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
for lim in self.limits.values():
lim._reset_usage()
elb_usage = self._find_usage_elbv1()
alb_usage = self._find_usage_elbv2()
logger.debug('ELBs in use: %d, ALBs in use: %d', elb_usage, alb_usage)
self.limits['Active load balancers']._add_current_usage(
(elb_usage + alb_usage),
aws_type='AWS::ElasticLoadBalancing::LoadBalancer',
)
self._have_usage = True
logger.debug("Done checking usage.") |
def _bse_cli_get_refs(args):
'''Handles the get-refs subcommand'''
return api.get_references(
basis_name=args.basis, elements=args.elements, version=args.version, fmt=args.reffmt, data_dir=args.data_dir) | Handles the get-refs subcommand | Below is the the instruction that describes the task:
### Input:
Handles the get-refs subcommand
### Response:
def _bse_cli_get_refs(args):
'''Handles the get-refs subcommand'''
return api.get_references(
basis_name=args.basis, elements=args.elements, version=args.version, fmt=args.reffmt, data_dir=args.data_dir) |
def split(self, sep=None, maxsplit=None, regex=False):
"""Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr."""
if maxsplit is not None:
raise NotImplementedError('no maxsplit yet')
s = self.s
if sep is None:
sep = r'\s+'
elif not regex:
sep = re.escape(sep)
matches = list(re.finditer(sep, s))
return [self[start:end] for start, end in zip(
[0] + [m.end() for m in matches],
[m.start() for m in matches] + [len(s)])] | Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr. | Below is the the instruction that describes the task:
### Input:
Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr.
### Response:
def split(self, sep=None, maxsplit=None, regex=False):
"""Split based on seperator, optionally using a regex
Capture groups are ignored in regex, the whole pattern is matched
and used to split the original FmtStr."""
if maxsplit is not None:
raise NotImplementedError('no maxsplit yet')
s = self.s
if sep is None:
sep = r'\s+'
elif not regex:
sep = re.escape(sep)
matches = list(re.finditer(sep, s))
return [self[start:end] for start, end in zip(
[0] + [m.end() for m in matches],
[m.start() for m in matches] + [len(s)])] |
def get_client_id(self):
""" Attempt to get client_id from soundcloud homepage. """
# FIXME: This method doesn't works
id = re.search(
"\"clientID\":\"([a-z0-9]*)\"",
self.send_request(self.SC_HOME).read().decode("utf-8"))
if not id:
raise serror("Cannot retrieve client_id.")
return id.group(1) | Attempt to get client_id from soundcloud homepage. | Below is the the instruction that describes the task:
### Input:
Attempt to get client_id from soundcloud homepage.
### Response:
def get_client_id(self):
""" Attempt to get client_id from soundcloud homepage. """
# FIXME: This method doesn't works
id = re.search(
"\"clientID\":\"([a-z0-9]*)\"",
self.send_request(self.SC_HOME).read().decode("utf-8"))
if not id:
raise serror("Cannot retrieve client_id.")
return id.group(1) |
def read(self, size=None):
"""Read a chunk from rfile buffer and return it.
Args:
size (int): amount of data to read
Returns:
bytes: Chunk from rfile, limited by size if specified.
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
self.buffer = EMPTY | Read a chunk from rfile buffer and return it.
Args:
size (int): amount of data to read
Returns:
bytes: Chunk from rfile, limited by size if specified. | Below is the the instruction that describes the task:
### Input:
Read a chunk from rfile buffer and return it.
Args:
size (int): amount of data to read
Returns:
bytes: Chunk from rfile, limited by size if specified.
### Response:
def read(self, size=None):
"""Read a chunk from rfile buffer and return it.
Args:
size (int): amount of data to read
Returns:
bytes: Chunk from rfile, limited by size if specified.
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
self.buffer = EMPTY |
def from_json_file(cls, path):
"""
Return a template from a json allocated in a path.
:param path: string
:return: ServiceAgreementTemplate
"""
with open(path) as jsf:
template_json = json.load(jsf)
return cls(template_json=template_json) | Return a template from a json allocated in a path.
:param path: string
:return: ServiceAgreementTemplate | Below is the the instruction that describes the task:
### Input:
Return a template from a json allocated in a path.
:param path: string
:return: ServiceAgreementTemplate
### Response:
def from_json_file(cls, path):
"""
Return a template from a json allocated in a path.
:param path: string
:return: ServiceAgreementTemplate
"""
with open(path) as jsf:
template_json = json.load(jsf)
return cls(template_json=template_json) |
def doTransaction(conn, start=True, startSQL='START TRANSACTION'):
'''
wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server.
'''
try:
if start:
executeSQL(conn, startSQL)
yield conn
except:
if conn is not None:
conn.rollback()
raise
else:
conn.commit() | wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server. | Below is the the instruction that describes the task:
### Input:
wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server.
### Response:
def doTransaction(conn, start=True, startSQL='START TRANSACTION'):
'''
wrap a connection in a transaction. starts a transaction, yields the conn, and then if an exception occurs, calls rollback(). otherwise calls commit().
start: if True, executes 'START TRANSACTION' sql before yielding conn. Useful for connections that are autocommit by default.
startSQL: override if 'START TRANSACTION' does not work for your db server.
'''
try:
if start:
executeSQL(conn, startSQL)
yield conn
except:
if conn is not None:
conn.rollback()
raise
else:
conn.commit() |
def collections(self, username, page=1, per_page=10):
"""
Get a list of collections created by the user.
:param username [string]: The user’s username. Required.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the Collection list.
"""
url = "/users/{username}/collections".format(username=username)
params = {
"page": page,
"per_page": per_page
}
result = self._get(url, params=params)
return CollectionModel.parse_list(result) | Get a list of collections created by the user.
:param username [string]: The user’s username. Required.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the Collection list. | Below is the the instruction that describes the task:
### Input:
Get a list of collections created by the user.
:param username [string]: The user’s username. Required.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the Collection list.
### Response:
def collections(self, username, page=1, per_page=10):
"""
Get a list of collections created by the user.
:param username [string]: The user’s username. Required.
:param page [integer]: Page number to retrieve. (Optional; default: 1)
:param per_page [integer]: Number of items per page. (Optional; default: 10)
:return: [Array]: A single page of the Collection list.
"""
url = "/users/{username}/collections".format(username=username)
params = {
"page": page,
"per_page": per_page
}
result = self._get(url, params=params)
return CollectionModel.parse_list(result) |
def fit(self, X, y, weights=None):
"""Fit the generalized additive model.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values,
ie integers in classification, real numbers in
regression)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
Returns
-------
self : object
Returns fitted GAM object
"""
# validate parameters
self._validate_params()
# validate data
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
X = check_X(X, verbose=self.verbose)
check_X_y(X, y)
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
check_lengths(y, weights)
else:
weights = np.ones_like(y).astype('float64')
# validate data-dependent parameters
self._validate_data_dep_params(X)
# set up logging
if not hasattr(self, 'logs_'):
self.logs_ = defaultdict(list)
# begin capturing statistics
self.statistics_ = {}
self.statistics_['n_samples'] = len(y)
self.statistics_['m_features'] = X.shape[1]
# optimize
self._pirls(X, y, weights)
# if self._opt == 0:
# self._pirls(X, y, weights)
# if self._opt == 1:
# self._pirls_naive(X, y)
return self | Fit the generalized additive model.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values,
ie integers in classification, real numbers in
regression)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
Returns
-------
self : object
Returns fitted GAM object | Below is the the instruction that describes the task:
### Input:
Fit the generalized additive model.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values,
ie integers in classification, real numbers in
regression)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
Returns
-------
self : object
Returns fitted GAM object
### Response:
def fit(self, X, y, weights=None):
"""Fit the generalized additive model.
Parameters
----------
X : array-like, shape (n_samples, m_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values,
ie integers in classification, real numbers in
regression)
weights : array-like shape (n_samples,) or None, optional
Sample weights.
if None, defaults to array of ones
Returns
-------
self : object
Returns fitted GAM object
"""
# validate parameters
self._validate_params()
# validate data
y = check_y(y, self.link, self.distribution, verbose=self.verbose)
X = check_X(X, verbose=self.verbose)
check_X_y(X, y)
if weights is not None:
weights = np.array(weights).astype('f').ravel()
weights = check_array(weights, name='sample weights',
ndim=1, verbose=self.verbose)
check_lengths(y, weights)
else:
weights = np.ones_like(y).astype('float64')
# validate data-dependent parameters
self._validate_data_dep_params(X)
# set up logging
if not hasattr(self, 'logs_'):
self.logs_ = defaultdict(list)
# begin capturing statistics
self.statistics_ = {}
self.statistics_['n_samples'] = len(y)
self.statistics_['m_features'] = X.shape[1]
# optimize
self._pirls(X, y, weights)
# if self._opt == 0:
# self._pirls(X, y, weights)
# if self._opt == 1:
# self._pirls_naive(X, y)
return self |
def p_bound_terminal(self, p):
"""bound_terminal : unbound_terminal"""
if p[1][0].literal in ['*', '**']:
p[0] = [_Segment(_BINDING, '$%d' % self.binding_var_count),
p[1][0],
_Segment(_END_BINDING, '')]
self.binding_var_count += 1
else:
p[0] = p[1] | bound_terminal : unbound_terminal | Below is the the instruction that describes the task:
### Input:
bound_terminal : unbound_terminal
### Response:
def p_bound_terminal(self, p):
"""bound_terminal : unbound_terminal"""
if p[1][0].literal in ['*', '**']:
p[0] = [_Segment(_BINDING, '$%d' % self.binding_var_count),
p[1][0],
_Segment(_END_BINDING, '')]
self.binding_var_count += 1
else:
p[0] = p[1] |
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None):
'''Loads channel information into recording extractor. If a .prb file is given,
then 'location' and 'group' information for each channel is stored. If a .csv
file is given, then it will only store 'location'
Parameters
----------
recording: RecordingExtractor
The recording extractor to channel information
probe_file: str
Path to probe file. Either .prb or .csv
Returns
---------
subRecordingExtractor
'''
probe_file = Path(probe_file)
if probe_file.suffix == '.prb':
probe_dict = read_python(probe_file)
if 'channel_groups' in probe_dict.keys():
ordered_channels = np.array([], dtype=int)
groups = sorted(probe_dict['channel_groups'].keys())
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
ordered_channels = np.concatenate((ordered_channels, prop_val))
if list(ordered_channels) == recording.get_channel_ids():
subrecording = recording
else:
if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]):
print('Some channel in PRB file are in original recording')
present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()]
subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels)
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
if 'channels' not in cgroup.keys() and len(groups) > 1:
raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field"
"for each channel group is required")
elif 'channels' not in cgroup.keys():
channels_in_group = subrecording.get_num_channels()
else:
channels_in_group = len(cgroup['channels'])
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
for i_ch, prop in enumerate(prop_val):
if prop in subrecording.get_channel_ids():
subrecording.set_channel_property(prop, 'group', int(cgroup_id))
elif key_prop == 'geometry' or key_prop == 'location':
if isinstance(prop_val, dict):
if len(prop_val.keys()) == channels_in_group:
print('geometry in PRB have not the same length as channel in group')
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
else:
if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group:
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
# create dummy locations
if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys():
for i, chan in enumerate(subrecording.get_channel_ids()):
subrecording.set_channel_property(chan, 'location', [i, 0])
else:
raise AttributeError("'.prb' file should contain the 'channel_groups' field")
elif probe_file.suffix == '.csv':
if channel_map is not None:
assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \
"all channel_ids in 'channel_map' must be in the original recording channel ids"
subrecording = SubRecordingExtractor(recording, channel_ids=channel_map)
else:
subrecording = recording
with probe_file.open() as csvfile:
posreader = csv.reader(csvfile)
row_count = 0
loaded_pos = []
for pos in (posreader):
row_count += 1
loaded_pos.append(pos)
assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \
"rows as the number of channels in the recordings"
for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float)))
if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()):
for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'group', chg)
else:
raise NotImplementedError("Only .csv and .prb probe files can be loaded.")
return subrecording | Loads channel information into recording extractor. If a .prb file is given,
then 'location' and 'group' information for each channel is stored. If a .csv
file is given, then it will only store 'location'
Parameters
----------
recording: RecordingExtractor
The recording extractor to channel information
probe_file: str
Path to probe file. Either .prb or .csv
Returns
---------
subRecordingExtractor | Below is the the instruction that describes the task:
### Input:
Loads channel information into recording extractor. If a .prb file is given,
then 'location' and 'group' information for each channel is stored. If a .csv
file is given, then it will only store 'location'
Parameters
----------
recording: RecordingExtractor
The recording extractor to channel information
probe_file: str
Path to probe file. Either .prb or .csv
Returns
---------
subRecordingExtractor
### Response:
def load_probe_file(recording, probe_file, channel_map=None, channel_groups=None):
'''Loads channel information into recording extractor. If a .prb file is given,
then 'location' and 'group' information for each channel is stored. If a .csv
file is given, then it will only store 'location'
Parameters
----------
recording: RecordingExtractor
The recording extractor to channel information
probe_file: str
Path to probe file. Either .prb or .csv
Returns
---------
subRecordingExtractor
'''
probe_file = Path(probe_file)
if probe_file.suffix == '.prb':
probe_dict = read_python(probe_file)
if 'channel_groups' in probe_dict.keys():
ordered_channels = np.array([], dtype=int)
groups = sorted(probe_dict['channel_groups'].keys())
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
ordered_channels = np.concatenate((ordered_channels, prop_val))
if list(ordered_channels) == recording.get_channel_ids():
subrecording = recording
else:
if not np.all([chan in recording.get_channel_ids() for chan in ordered_channels]):
print('Some channel in PRB file are in original recording')
present_ordered_channels = [chan for chan in ordered_channels if chan in recording.get_channel_ids()]
subrecording = SubRecordingExtractor(recording, channel_ids=present_ordered_channels)
for cgroup_id in groups:
cgroup = probe_dict['channel_groups'][cgroup_id]
if 'channels' not in cgroup.keys() and len(groups) > 1:
raise Exception("If more than one 'channel_group' is in the probe file, the 'channels' field"
"for each channel group is required")
elif 'channels' not in cgroup.keys():
channels_in_group = subrecording.get_num_channels()
else:
channels_in_group = len(cgroup['channels'])
for key_prop, prop_val in cgroup.items():
if key_prop == 'channels':
for i_ch, prop in enumerate(prop_val):
if prop in subrecording.get_channel_ids():
subrecording.set_channel_property(prop, 'group', int(cgroup_id))
elif key_prop == 'geometry' or key_prop == 'location':
if isinstance(prop_val, dict):
if len(prop_val.keys()) == channels_in_group:
print('geometry in PRB have not the same length as channel in group')
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', prop)
else:
if isinstance(prop_val, dict) and len(prop_val.keys()) == channels_in_group:
for (i_ch, prop) in prop_val.items():
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
elif isinstance(prop_val, (list, np.ndarray)) and len(prop_val) == channels_in_group:
for (i_ch, prop) in zip(subrecording.get_channel_ids(), prop_val):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, key_prop, prop)
# create dummy locations
if 'geometry' not in cgroup.keys() and 'location' not in cgroup.keys():
for i, chan in enumerate(subrecording.get_channel_ids()):
subrecording.set_channel_property(chan, 'location', [i, 0])
else:
raise AttributeError("'.prb' file should contain the 'channel_groups' field")
elif probe_file.suffix == '.csv':
if channel_map is not None:
assert np.all([chan in channel_map for chan in recording.get_channel_ids()]), \
"all channel_ids in 'channel_map' must be in the original recording channel ids"
subrecording = SubRecordingExtractor(recording, channel_ids=channel_map)
else:
subrecording = recording
with probe_file.open() as csvfile:
posreader = csv.reader(csvfile)
row_count = 0
loaded_pos = []
for pos in (posreader):
row_count += 1
loaded_pos.append(pos)
assert len(subrecording.get_channel_ids()) == row_count, "The .csv file must contain as many " \
"rows as the number of channels in the recordings"
for i_ch, pos in zip(subrecording.get_channel_ids(), loaded_pos):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'location', list(np.array(pos).astype(float)))
if channel_groups is not None and len(channel_groups) == len(subrecording.get_channel_ids()):
for i_ch, chg in zip(subrecording.get_channel_ids(), channel_groups):
if i_ch in subrecording.get_channel_ids():
subrecording.set_channel_property(i_ch, 'group', chg)
else:
raise NotImplementedError("Only .csv and .prb probe files can be loaded.")
return subrecording |
def _pic(self):
"""Return the new `p:pic` element referencing the video."""
return CT_Picture.new_video_pic(
self._shape_id, self._shape_name, self._video_rId,
self._media_rId, self._poster_frame_rId, self._x, self._y,
self._cx, self._cy
) | Return the new `p:pic` element referencing the video. | Below is the the instruction that describes the task:
### Input:
Return the new `p:pic` element referencing the video.
### Response:
def _pic(self):
"""Return the new `p:pic` element referencing the video."""
return CT_Picture.new_video_pic(
self._shape_id, self._shape_name, self._video_rId,
self._media_rId, self._poster_frame_rId, self._x, self._y,
self._cx, self._cy
) |
def import_keys(hsm, args):
"""
The main stdin iteration loop.
"""
res = True
# ykksm 1
#123456,ftftftcccc,534543524554,fcacd309a20ce1809c2db257f0e8d6ea,000000000000,,,
for line in sys.stdin:
if line[0] == '#':
continue
l = line.split(',')
modhex_id = l[1]
uid = l[2].decode('hex')
key = l[3].decode('hex')
if modhex_id and uid and key:
public_id = pyhsm.yubikey.modhex_decode(modhex_id)
padded_id = modhex_id.rjust(args.public_id_chars, 'c')
if int(public_id, 16) == 0:
print "WARNING: Skipping import of key with public ID: %s" % (padded_id)
print "This public ID is unsupported by the YubiHSM.\n"
continue
if args.verbose:
print " %s" % (padded_id)
secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(key, uid)
hsm.load_secret(secret)
for kh in args.key_handles.keys():
if(args.random_nonce):
nonce = ""
else:
nonce = public_id.decode('hex')
aead = hsm.generate_aead(nonce, kh)
if args.internal_db:
if not store_in_internal_db(
args, hsm, modhex_id, public_id, kh, aead):
res = False
continue
filename = output_filename(
args.output_dir, args.key_handles[kh], padded_id)
if args.verbose:
print " %4s, %i bytes (%s) -> %s" % \
(args.key_handles[kh], len(aead.data), shorten_aead(aead), filename)
aead.save(filename)
if args.verbose:
print ""
if res:
print "\nDone\n"
else:
print "\nDone (one or more entries rejected)"
return res | The main stdin iteration loop. | Below is the the instruction that describes the task:
### Input:
The main stdin iteration loop.
### Response:
def import_keys(hsm, args):
"""
The main stdin iteration loop.
"""
res = True
# ykksm 1
#123456,ftftftcccc,534543524554,fcacd309a20ce1809c2db257f0e8d6ea,000000000000,,,
for line in sys.stdin:
if line[0] == '#':
continue
l = line.split(',')
modhex_id = l[1]
uid = l[2].decode('hex')
key = l[3].decode('hex')
if modhex_id and uid and key:
public_id = pyhsm.yubikey.modhex_decode(modhex_id)
padded_id = modhex_id.rjust(args.public_id_chars, 'c')
if int(public_id, 16) == 0:
print "WARNING: Skipping import of key with public ID: %s" % (padded_id)
print "This public ID is unsupported by the YubiHSM.\n"
continue
if args.verbose:
print " %s" % (padded_id)
secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(key, uid)
hsm.load_secret(secret)
for kh in args.key_handles.keys():
if(args.random_nonce):
nonce = ""
else:
nonce = public_id.decode('hex')
aead = hsm.generate_aead(nonce, kh)
if args.internal_db:
if not store_in_internal_db(
args, hsm, modhex_id, public_id, kh, aead):
res = False
continue
filename = output_filename(
args.output_dir, args.key_handles[kh], padded_id)
if args.verbose:
print " %4s, %i bytes (%s) -> %s" % \
(args.key_handles[kh], len(aead.data), shorten_aead(aead), filename)
aead.save(filename)
if args.verbose:
print ""
if res:
print "\nDone\n"
else:
print "\nDone (one or more entries rejected)"
return res |
def _fit_strel_to_im_3d(im, strel, r, x, y, z):
r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit.
"""
elem = strel.copy()
x_dim, y_dim, z_dim = im.shape
x_min = x-r
x_max = x+r+1
y_min = y-r
y_max = y+r+1
z_min = z-r
z_max = z+r+1
if x_min < 0:
x_adj = -x_min
elem = elem[x_adj:, :, :]
x_min = 0
elif x_max > x_dim:
x_adj = x_max - x_dim
elem = elem[:-x_adj, :, :]
if y_min < 0:
y_adj = -y_min
elem = elem[:, y_adj:, :]
y_min = 0
elif y_max > y_dim:
y_adj = y_max - y_dim
elem = elem[:, :-y_adj, :]
if z_min < 0:
z_adj = -z_min
elem = elem[:, :, z_adj:]
z_min = 0
elif z_max > z_dim:
z_adj = z_max - z_dim
elem = elem[:, :, :-z_adj]
ex, ey, ez = elem.shape
im[x_min:x_min+ex, y_min:y_min+ey, z_min:z_min+ez] += elem
return im | r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit. | Below is the the instruction that describes the task:
### Input:
r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit.
### Response:
def _fit_strel_to_im_3d(im, strel, r, x, y, z):
r"""
Helper function to add a structuring element to a 2D image.
Used by RSA. Makes sure if center is less than r pixels from edge of image
that the strel is sliced to fit.
"""
elem = strel.copy()
x_dim, y_dim, z_dim = im.shape
x_min = x-r
x_max = x+r+1
y_min = y-r
y_max = y+r+1
z_min = z-r
z_max = z+r+1
if x_min < 0:
x_adj = -x_min
elem = elem[x_adj:, :, :]
x_min = 0
elif x_max > x_dim:
x_adj = x_max - x_dim
elem = elem[:-x_adj, :, :]
if y_min < 0:
y_adj = -y_min
elem = elem[:, y_adj:, :]
y_min = 0
elif y_max > y_dim:
y_adj = y_max - y_dim
elem = elem[:, :-y_adj, :]
if z_min < 0:
z_adj = -z_min
elem = elem[:, :, z_adj:]
z_min = 0
elif z_max > z_dim:
z_adj = z_max - z_dim
elem = elem[:, :, :-z_adj]
ex, ey, ez = elem.shape
im[x_min:x_min+ex, y_min:y_min+ey, z_min:z_min+ez] += elem
return im |
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None):
"""
Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
"""
pass | Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types. | Below is the the instruction that describes the task:
### Input:
Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
### Response:
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None):
"""
Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
"""
pass |
def _get_switchports(profile):
"""Return list of (switch_ip, interface) tuples from local_link_info"""
switchports = []
if profile.get('local_link_information'):
for link in profile['local_link_information']:
if 'switch_info' in link and 'port_id' in link:
switch = link['switch_info']
interface = link['port_id']
switchports.append((switch, interface))
else:
LOG.warning("Incomplete link information: %s", link)
return switchports | Return list of (switch_ip, interface) tuples from local_link_info | Below is the the instruction that describes the task:
### Input:
Return list of (switch_ip, interface) tuples from local_link_info
### Response:
def _get_switchports(profile):
"""Return list of (switch_ip, interface) tuples from local_link_info"""
switchports = []
if profile.get('local_link_information'):
for link in profile['local_link_information']:
if 'switch_info' in link and 'port_id' in link:
switch = link['switch_info']
interface = link['port_id']
switchports.append((switch, interface))
else:
LOG.warning("Incomplete link information: %s", link)
return switchports |
def _init_random_gaussians(self, X):
"""Initialize gaussian randomly"""
n_samples = np.shape(X)[0]
self.priors = (1 / self.k) * np.ones(self.k)
for _ in range(self.k):
params = {}
params["mean"] = X[np.random.choice(range(n_samples))]
params["cov"] = calculate_covariance_matrix(X)
self.parameters.append(params) | Initialize gaussian randomly | Below is the the instruction that describes the task:
### Input:
Initialize gaussian randomly
### Response:
def _init_random_gaussians(self, X):
"""Initialize gaussian randomly"""
n_samples = np.shape(X)[0]
self.priors = (1 / self.k) * np.ones(self.k)
for _ in range(self.k):
params = {}
params["mean"] = X[np.random.choice(range(n_samples))]
params["cov"] = calculate_covariance_matrix(X)
self.parameters.append(params) |
def exit_cleanly(errnum=None):
"""exit_cleanly
Exits the runtime cleanly using SystemExit. This is the official, handling
exception here as this is mostly meant to be a standalone script.
"""
default = "An Unknown Error has Occurred!"
cases = {errno.EINVAL: "Improper or invalid input value",
errno.ESPIPE: "Could not complete action due to a broken" +
" dependency",
errno.ENOSYS: "Could not complete action due to unexpected setup",
errno.EIO: "Could not access an expected file",
errno.EPERM: "Could not access an item due to permissions issues",
-1: default}
help_stmt = """
%s [--opt [option]]
With opts:
working_directory - ONLY use this if you're overwriting `pwd`!
""" % (sys.argv[0])
if not errnum:
errnum = 0
elif not isinstance(errnum, int) and hasattr(errno, errnum):
errnum = getattr(errno, errnum)
try:
errnum = int(errnum)
except TypeError:
errnum = -1
if errnum == 0:
help_stmt = ''
stmt = "Successful in configuration!"
elif errnum in cases:
stmt = cases[errnum]
else:
stmt = default
print("%s\n\n%s" % (stmt, help_stmt)) | exit_cleanly
Exits the runtime cleanly using SystemExit. This is the official, handling
exception here as this is mostly meant to be a standalone script. | Below is the the instruction that describes the task:
### Input:
exit_cleanly
Exits the runtime cleanly using SystemExit. This is the official, handling
exception here as this is mostly meant to be a standalone script.
### Response:
def exit_cleanly(errnum=None):
"""exit_cleanly
Exits the runtime cleanly using SystemExit. This is the official, handling
exception here as this is mostly meant to be a standalone script.
"""
default = "An Unknown Error has Occurred!"
cases = {errno.EINVAL: "Improper or invalid input value",
errno.ESPIPE: "Could not complete action due to a broken" +
" dependency",
errno.ENOSYS: "Could not complete action due to unexpected setup",
errno.EIO: "Could not access an expected file",
errno.EPERM: "Could not access an item due to permissions issues",
-1: default}
help_stmt = """
%s [--opt [option]]
With opts:
working_directory - ONLY use this if you're overwriting `pwd`!
""" % (sys.argv[0])
if not errnum:
errnum = 0
elif not isinstance(errnum, int) and hasattr(errno, errnum):
errnum = getattr(errno, errnum)
try:
errnum = int(errnum)
except TypeError:
errnum = -1
if errnum == 0:
help_stmt = ''
stmt = "Successful in configuration!"
elif errnum in cases:
stmt = cases[errnum]
else:
stmt = default
print("%s\n\n%s" % (stmt, help_stmt)) |
def netengine(self):
""" access netengine instance """
# return None if no backend chosen yet
if not self.backend:
return None
# init instance of the netengine backend if not already done
if not self.__netengine:
NetengineBackend = self.backend_class
arguments = self._build_netengine_arguments()
self.__netengine = NetengineBackend(**arguments)
# return netengine instance
return self.__netengine | access netengine instance | Below is the the instruction that describes the task:
### Input:
access netengine instance
### Response:
def netengine(self):
""" access netengine instance """
# return None if no backend chosen yet
if not self.backend:
return None
# init instance of the netengine backend if not already done
if not self.__netengine:
NetengineBackend = self.backend_class
arguments = self._build_netengine_arguments()
self.__netengine = NetengineBackend(**arguments)
# return netengine instance
return self.__netengine |
def sort_versions(versions=(), reverse=False, sep=u'.'):
"""Sort a list of version number strings.
This function ensures that the package sorting based on number name is
performed correctly when including alpha, dev rc1 etc...
"""
if versions == []:
return []
digits = u'0123456789'
def toint(x):
try:
n = int(x)
except:
n = x
return n
versions = list(versions)
new_versions, alpha, sizes = [], set(), set()
for item in versions:
it = item.split(sep)
temp = []
for i in it:
x = toint(i)
if not isinstance(x, int):
x = u(x)
middle = x.lstrip(digits).rstrip(digits)
tail = toint(x.lstrip(digits).replace(middle, u''))
head = toint(x.rstrip(digits).replace(middle, u''))
middle = toint(middle)
res = [head, middle, tail]
while u'' in res:
res.remove(u'')
for r in res:
if is_unicode(r):
alpha.add(r)
else:
res = [x]
temp += res
sizes.add(len(temp))
new_versions.append(temp)
# replace letters found by a negative number
replace_dic = {}
alpha = sorted(alpha, reverse=True)
if len(alpha):
replace_dic = dict(zip(alpha, list(range(-1, -(len(alpha)+1), -1))))
# Complete with zeros based on longest item and replace alphas with number
nmax = max(sizes)
for i in range(len(new_versions)):
item = []
for z in new_versions[i]:
if z in replace_dic:
item.append(replace_dic[z])
else:
item.append(z)
nzeros = nmax - len(item)
item += [0]*nzeros
item += [versions[i]]
new_versions[i] = item
new_versions = sorted(new_versions, reverse=reverse)
return [n[-1] for n in new_versions] | Sort a list of version number strings.
This function ensures that the package sorting based on number name is
performed correctly when including alpha, dev rc1 etc... | Below is the the instruction that describes the task:
### Input:
Sort a list of version number strings.
This function ensures that the package sorting based on number name is
performed correctly when including alpha, dev rc1 etc...
### Response:
def sort_versions(versions=(), reverse=False, sep=u'.'):
"""Sort a list of version number strings.
This function ensures that the package sorting based on number name is
performed correctly when including alpha, dev rc1 etc...
"""
if versions == []:
return []
digits = u'0123456789'
def toint(x):
try:
n = int(x)
except:
n = x
return n
versions = list(versions)
new_versions, alpha, sizes = [], set(), set()
for item in versions:
it = item.split(sep)
temp = []
for i in it:
x = toint(i)
if not isinstance(x, int):
x = u(x)
middle = x.lstrip(digits).rstrip(digits)
tail = toint(x.lstrip(digits).replace(middle, u''))
head = toint(x.rstrip(digits).replace(middle, u''))
middle = toint(middle)
res = [head, middle, tail]
while u'' in res:
res.remove(u'')
for r in res:
if is_unicode(r):
alpha.add(r)
else:
res = [x]
temp += res
sizes.add(len(temp))
new_versions.append(temp)
# replace letters found by a negative number
replace_dic = {}
alpha = sorted(alpha, reverse=True)
if len(alpha):
replace_dic = dict(zip(alpha, list(range(-1, -(len(alpha)+1), -1))))
# Complete with zeros based on longest item and replace alphas with number
nmax = max(sizes)
for i in range(len(new_versions)):
item = []
for z in new_versions[i]:
if z in replace_dic:
item.append(replace_dic[z])
else:
item.append(z)
nzeros = nmax - len(item)
item += [0]*nzeros
item += [versions[i]]
new_versions[i] = item
new_versions = sorted(new_versions, reverse=reverse)
return [n[-1] for n in new_versions] |
def get_storyline(self, timezone_offset, first_date, start=0.0, end=0.0, track_points=False):
''' a method to retrieve storyline details for a period of time
NOTE: start and end must be no more than 30 days, 1 second apart
NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart
:param timezone_offset: integer with timezone offset from user profile details
:param first_date: string with ISO date from user profile details firstDate
:param start: [optional] float with starting datetime for daily summaries
:param end: [optional] float with ending datetime for daily summaries
:param track_points: [optional] boolean to provide detailed tracking of user movement
:return: dictionary of response details with storyline list inside json key
{
'headers': { ... },
'code': 200,
'error': '',
'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily'
'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ]
}
'''
title = '%s.get_storyline' % self.__class__.__name__
# validate scope
if {'location', 'activity'} - set(self.service_scope):
raise ValueError('%s requires service scope to contain "location" and "activity".' % title)
# construct request fields
url_string = '%s/user/storyline/daily' % self.endpoint
parameters = self._process_dates(timezone_offset, first_date, start, end, title, track_points)
if track_points:
parameters['trackPoints'] = 'true'
# send request
storyline_details = self._get_request(url_string, params=parameters)
return storyline_details | a method to retrieve storyline details for a period of time
NOTE: start and end must be no more than 30 days, 1 second apart
NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart
:param timezone_offset: integer with timezone offset from user profile details
:param first_date: string with ISO date from user profile details firstDate
:param start: [optional] float with starting datetime for daily summaries
:param end: [optional] float with ending datetime for daily summaries
:param track_points: [optional] boolean to provide detailed tracking of user movement
:return: dictionary of response details with storyline list inside json key
{
'headers': { ... },
'code': 200,
'error': '',
'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily'
'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ]
} | Below is the the instruction that describes the task:
### Input:
a method to retrieve storyline details for a period of time
NOTE: start and end must be no more than 30 days, 1 second apart
NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart
:param timezone_offset: integer with timezone offset from user profile details
:param first_date: string with ISO date from user profile details firstDate
:param start: [optional] float with starting datetime for daily summaries
:param end: [optional] float with ending datetime for daily summaries
:param track_points: [optional] boolean to provide detailed tracking of user movement
:return: dictionary of response details with storyline list inside json key
{
'headers': { ... },
'code': 200,
'error': '',
'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily'
'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ]
}
### Response:
def get_storyline(self, timezone_offset, first_date, start=0.0, end=0.0, track_points=False):
''' a method to retrieve storyline details for a period of time
NOTE: start and end must be no more than 30 days, 1 second apart
NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart
:param timezone_offset: integer with timezone offset from user profile details
:param first_date: string with ISO date from user profile details firstDate
:param start: [optional] float with starting datetime for daily summaries
:param end: [optional] float with ending datetime for daily summaries
:param track_points: [optional] boolean to provide detailed tracking of user movement
:return: dictionary of response details with storyline list inside json key
{
'headers': { ... },
'code': 200,
'error': '',
'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily'
'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ]
}
'''
title = '%s.get_storyline' % self.__class__.__name__
# validate scope
if {'location', 'activity'} - set(self.service_scope):
raise ValueError('%s requires service scope to contain "location" and "activity".' % title)
# construct request fields
url_string = '%s/user/storyline/daily' % self.endpoint
parameters = self._process_dates(timezone_offset, first_date, start, end, title, track_points)
if track_points:
parameters['trackPoints'] = 'true'
# send request
storyline_details = self._get_request(url_string, params=parameters)
return storyline_details |
def get_season_points(self, season_key):
"""
Calling Season Points API.
Arg:
season_key: key of the season
Return:
json data
"""
season_points_url = self.api_path + "season/" + season_key + "/points/"
response = self.get_response(season_points_url)
return response | Calling Season Points API.
Arg:
season_key: key of the season
Return:
json data | Below is the the instruction that describes the task:
### Input:
Calling Season Points API.
Arg:
season_key: key of the season
Return:
json data
### Response:
def get_season_points(self, season_key):
"""
Calling Season Points API.
Arg:
season_key: key of the season
Return:
json data
"""
season_points_url = self.api_path + "season/" + season_key + "/points/"
response = self.get_response(season_points_url)
return response |
def _clear(self):
"""
Clear the current canvas.
"""
# It's orders of magnitude faster to reset with a print like this
# instead of recreating the screen buffers.
(colour, attr, bg) = self.palette["background"]
self._canvas.clear_buffer(colour, attr, bg) | Clear the current canvas. | Below is the the instruction that describes the task:
### Input:
Clear the current canvas.
### Response:
def _clear(self):
"""
Clear the current canvas.
"""
# It's orders of magnitude faster to reset with a print like this
# instead of recreating the screen buffers.
(colour, attr, bg) = self.palette["background"]
self._canvas.clear_buffer(colour, attr, bg) |
def plot_daily_turnover_hist(transactions, positions,
ax=None, **kwargs):
"""
Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions)
sns.distplot(turnover, ax=ax, **kwargs)
ax.set_title('Distribution of daily turnover rates')
ax.set_xlabel('Turnover rate')
return ax | Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on. | Below is the the instruction that describes the task:
### Input:
Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
### Response:
def plot_daily_turnover_hist(transactions, positions,
ax=None, **kwargs):
"""
Plots a histogram of daily turnover rates.
Parameters
----------
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in tears.create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to seaborn plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
turnover = txn.get_turnover(positions, transactions)
sns.distplot(turnover, ax=ax, **kwargs)
ax.set_title('Distribution of daily turnover rates')
ax.set_xlabel('Turnover rate')
return ax |
def additions_install(**kwargs):
'''
Install VirtualBox Guest Additions. Uses the CD, connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
See https://www.virtualbox.org/manual/ch04.html#idp52733088 for more details.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_install
salt '*' vbox_guest.additions_install reboot=True
salt '*' vbox_guest.additions_install upgrade_os=True
:param reboot: reboot computer to complete installation
:type reboot: bool
:param upgrade_os: upgrade OS (to ensure the latests version of kernel and developer tools are installed)
:type upgrade_os: bool
:return: version of VirtualBox Guest Additions or string with error
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_install_linux(mount_point, **kwargs) | Install VirtualBox Guest Additions. Uses the CD, connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
See https://www.virtualbox.org/manual/ch04.html#idp52733088 for more details.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_install
salt '*' vbox_guest.additions_install reboot=True
salt '*' vbox_guest.additions_install upgrade_os=True
:param reboot: reboot computer to complete installation
:type reboot: bool
:param upgrade_os: upgrade OS (to ensure the latests version of kernel and developer tools are installed)
:type upgrade_os: bool
:return: version of VirtualBox Guest Additions or string with error | Below is the the instruction that describes the task:
### Input:
Install VirtualBox Guest Additions. Uses the CD, connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
See https://www.virtualbox.org/manual/ch04.html#idp52733088 for more details.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_install
salt '*' vbox_guest.additions_install reboot=True
salt '*' vbox_guest.additions_install upgrade_os=True
:param reboot: reboot computer to complete installation
:type reboot: bool
:param upgrade_os: upgrade OS (to ensure the latests version of kernel and developer tools are installed)
:type upgrade_os: bool
:return: version of VirtualBox Guest Additions or string with error
### Response:
def additions_install(**kwargs):
'''
Install VirtualBox Guest Additions. Uses the CD, connected by VirtualBox.
To connect VirtualBox Guest Additions via VirtualBox graphical interface
press 'Host+D' ('Host' is usually 'Right Ctrl').
See https://www.virtualbox.org/manual/ch04.html#idp52733088 for more details.
CLI Example:
.. code-block:: bash
salt '*' vbox_guest.additions_install
salt '*' vbox_guest.additions_install reboot=True
salt '*' vbox_guest.additions_install upgrade_os=True
:param reboot: reboot computer to complete installation
:type reboot: bool
:param upgrade_os: upgrade OS (to ensure the latests version of kernel and developer tools are installed)
:type upgrade_os: bool
:return: version of VirtualBox Guest Additions or string with error
'''
with _additions_mounted() as mount_point:
kernel = __grains__.get('kernel', '')
if kernel == 'Linux':
return _additions_install_linux(mount_point, **kwargs) |
def _structure_unicode(self, obj, cl):
"""Just call ``cl`` with the given ``obj``"""
if not isinstance(obj, (bytes, unicode)):
return cl(str(obj))
else:
return obj | Just call ``cl`` with the given ``obj`` | Below is the the instruction that describes the task:
### Input:
Just call ``cl`` with the given ``obj``
### Response:
def _structure_unicode(self, obj, cl):
"""Just call ``cl`` with the given ``obj``"""
if not isinstance(obj, (bytes, unicode)):
return cl(str(obj))
else:
return obj |
def type(self):
'''returns kind of stim ("column" or "times"), based on what parameters are set'''
if self.column!=None or self.column_file:
return "column"
if self.times!=None or self.times_file:
return "times"
return None | returns kind of stim ("column" or "times"), based on what parameters are set | Below is the the instruction that describes the task:
### Input:
returns kind of stim ("column" or "times"), based on what parameters are set
### Response:
def type(self):
'''returns kind of stim ("column" or "times"), based on what parameters are set'''
if self.column!=None or self.column_file:
return "column"
if self.times!=None or self.times_file:
return "times"
return None |
def range(cls, start=None, stop=None, step=None, inclusive=False):
"""Generator of a date range
Args:
start (Date):
stop (Date or datetime.timedelta)!
step (timedelta):
Keyword Args:
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
Yield:
Date:
"""
def sign(x):
"""Inner function for determining the sign of a float
"""
return (-1, 1)[x >= 0]
if not step:
raise ValueError("Null step")
# Convert stop from timedelta to Date object
if isinstance(stop, timedelta):
stop = start + stop
if sign((stop - start).total_seconds()) != sign(step.total_seconds()):
raise ValueError("start/stop order not coherent with step")
date = start
if step.total_seconds() > 0:
oper = "__le__" if inclusive else "__lt__"
else:
oper = "__ge__" if inclusive else "__gt__"
while getattr(date, oper)(stop):
yield date
date += step | Generator of a date range
Args:
start (Date):
stop (Date or datetime.timedelta)!
step (timedelta):
Keyword Args:
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
Yield:
Date: | Below is the the instruction that describes the task:
### Input:
Generator of a date range
Args:
start (Date):
stop (Date or datetime.timedelta)!
step (timedelta):
Keyword Args:
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
Yield:
Date:
### Response:
def range(cls, start=None, stop=None, step=None, inclusive=False):
"""Generator of a date range
Args:
start (Date):
stop (Date or datetime.timedelta)!
step (timedelta):
Keyword Args:
inclusive (bool): If ``False``, the stopping date is not included.
This is the same behavior as the built-in :py:func:`range`.
Yield:
Date:
"""
def sign(x):
"""Inner function for determining the sign of a float
"""
return (-1, 1)[x >= 0]
if not step:
raise ValueError("Null step")
# Convert stop from timedelta to Date object
if isinstance(stop, timedelta):
stop = start + stop
if sign((stop - start).total_seconds()) != sign(step.total_seconds()):
raise ValueError("start/stop order not coherent with step")
date = start
if step.total_seconds() > 0:
oper = "__le__" if inclusive else "__lt__"
else:
oper = "__ge__" if inclusive else "__gt__"
while getattr(date, oper)(stop):
yield date
date += step |
def enable_log(level=logging.DEBUG):
"""Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG
"""
logger = logging.getLogger(__name__)
logger.setLevel(level)
if not logger.handlers:
logger.info('enabling logging to console.')
logger.addHandler(logging.StreamHandler(sys.stdout)) | Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG | Below is the the instruction that describes the task:
### Input:
Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG
### Response:
def enable_log(level=logging.DEBUG):
"""Enable console logging.
This is a utils method for try run with storops.
:param level: log level, default to DEBUG
"""
logger = logging.getLogger(__name__)
logger.setLevel(level)
if not logger.handlers:
logger.info('enabling logging to console.')
logger.addHandler(logging.StreamHandler(sys.stdout)) |
def removedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['puntedIssuesEstimateSum']['value'] | Return the total incompleted points this sprint. | Below is the the instruction that describes the task:
### Input:
Return the total incompleted points this sprint.
### Response:
def removedIssuesEstimateSum(self, board_id, sprint_id):
"""Return the total incompleted points this sprint."""
return self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)['contents']['puntedIssuesEstimateSum']['value'] |
def save_nff(self, dest, _strategy="auto"):
"""
Save Frame in binary NFF/Jay format.
:param dest: destination where the Frame should be saved.
:param _strategy: one of "mmap", "write" or "auto"
"""
if _strategy not in ("auto", "write", "mmap"):
raise TValueError("Invalid parameter _strategy: only 'write' / 'mmap' "
"/ 'auto' are allowed")
dest = os.path.expanduser(dest)
if not os.path.exists(dest):
os.makedirs(dest)
self.materialize()
mins = self.min().to_list()
maxs = self.max().to_list()
metafile = os.path.join(dest, "_meta.nff")
with _builtin_open(metafile, "w", encoding="utf-8") as out:
out.write("# NFF2\n")
out.write("# nrows = %d\n" % self.nrows)
out.write('filename,stype,meta,colname,min,max\n')
l = len(str(self.ncols))
for i in range(self.ncols):
filename = "c%0*d" % (l, i + 1)
colname = self.names[i].replace('"', '""')
stype = self.stypes[i]
if stype == dt.stype.obj64:
dtwarn("Column %r of type obj64 was not saved" % self.names[i])
continue
smin = _stringify(mins[i][0])
smax = _stringify(maxs[i][0])
out.write('%s,%s,,"%s",%s,%s\n'
% (filename, stype.code, colname, smin, smax))
filename = os.path.join(dest, filename)
core._column_save_to_disk(self, i, filename, _strategy) | Save Frame in binary NFF/Jay format.
:param dest: destination where the Frame should be saved.
:param _strategy: one of "mmap", "write" or "auto" | Below is the the instruction that describes the task:
### Input:
Save Frame in binary NFF/Jay format.
:param dest: destination where the Frame should be saved.
:param _strategy: one of "mmap", "write" or "auto"
### Response:
def save_nff(self, dest, _strategy="auto"):
"""
Save Frame in binary NFF/Jay format.
:param dest: destination where the Frame should be saved.
:param _strategy: one of "mmap", "write" or "auto"
"""
if _strategy not in ("auto", "write", "mmap"):
raise TValueError("Invalid parameter _strategy: only 'write' / 'mmap' "
"/ 'auto' are allowed")
dest = os.path.expanduser(dest)
if not os.path.exists(dest):
os.makedirs(dest)
self.materialize()
mins = self.min().to_list()
maxs = self.max().to_list()
metafile = os.path.join(dest, "_meta.nff")
with _builtin_open(metafile, "w", encoding="utf-8") as out:
out.write("# NFF2\n")
out.write("# nrows = %d\n" % self.nrows)
out.write('filename,stype,meta,colname,min,max\n')
l = len(str(self.ncols))
for i in range(self.ncols):
filename = "c%0*d" % (l, i + 1)
colname = self.names[i].replace('"', '""')
stype = self.stypes[i]
if stype == dt.stype.obj64:
dtwarn("Column %r of type obj64 was not saved" % self.names[i])
continue
smin = _stringify(mins[i][0])
smax = _stringify(maxs[i][0])
out.write('%s,%s,,"%s",%s,%s\n'
% (filename, stype.code, colname, smin, smax))
filename = os.path.join(dest, filename)
core._column_save_to_disk(self, i, filename, _strategy) |
def new(cls, alias, sealed_obj, algorithm, key, key_size):
"""
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
raise NotImplementedError("Creating Secret Keys not implemented") | Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore. | Below is the the instruction that describes the task:
### Input:
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
### Response:
def new(cls, alias, sealed_obj, algorithm, key, key_size):
"""
Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore.
"""
timestamp = int(time.time()) * 1000
raise NotImplementedError("Creating Secret Keys not implemented") |
def setPluginPath(cls, pluginpath):
"""
Sets the plugin path for this class to the given path. The inputted
pluginpath value can either be a list of strings, or a string
containing paths separated by the OS specific path separator (':' on
Mac & Linux, ';' on Windows)
:param pluginpath | [<str>, ..] || <str>
"""
setattr(cls, '_%s__pluginpath' % cls.__name__, None)
cls.addPluginPath(pluginpath) | Sets the plugin path for this class to the given path. The inputted
pluginpath value can either be a list of strings, or a string
containing paths separated by the OS specific path separator (':' on
Mac & Linux, ';' on Windows)
:param pluginpath | [<str>, ..] || <str> | Below is the the instruction that describes the task:
### Input:
Sets the plugin path for this class to the given path. The inputted
pluginpath value can either be a list of strings, or a string
containing paths separated by the OS specific path separator (':' on
Mac & Linux, ';' on Windows)
:param pluginpath | [<str>, ..] || <str>
### Response:
def setPluginPath(cls, pluginpath):
"""
Sets the plugin path for this class to the given path. The inputted
pluginpath value can either be a list of strings, or a string
containing paths separated by the OS specific path separator (':' on
Mac & Linux, ';' on Windows)
:param pluginpath | [<str>, ..] || <str>
"""
setattr(cls, '_%s__pluginpath' % cls.__name__, None)
cls.addPluginPath(pluginpath) |
def render_field_description(field):
"""
Render a field description as HTML.
"""
if hasattr(field, 'description') and field.description != '':
html = """<p class="help-block">{field.description}</p>"""
html = html.format(
field=field
)
return HTMLString(html)
return '' | Render a field description as HTML. | Below is the the instruction that describes the task:
### Input:
Render a field description as HTML.
### Response:
def render_field_description(field):
"""
Render a field description as HTML.
"""
if hasattr(field, 'description') and field.description != '':
html = """<p class="help-block">{field.description}</p>"""
html = html.format(
field=field
)
return HTMLString(html)
return '' |
def return_values(self):
"""
list(Return Values): List of the return values
"""
from slither.core.cfg.node import NodeType
from slither.slithir.operations import Return
from slither.slithir.variables import Constant
if self._return_values is None:
return_values = list()
returns = [n for n in self.nodes if n.type == NodeType.RETURN]
[return_values.extend(ir.values) for node in returns for ir in node.irs if isinstance(ir, Return)]
self._return_values = list(set([x for x in return_values if not isinstance(x, Constant)]))
return self._return_values | list(Return Values): List of the return values | Below is the the instruction that describes the task:
### Input:
list(Return Values): List of the return values
### Response:
def return_values(self):
"""
list(Return Values): List of the return values
"""
from slither.core.cfg.node import NodeType
from slither.slithir.operations import Return
from slither.slithir.variables import Constant
if self._return_values is None:
return_values = list()
returns = [n for n in self.nodes if n.type == NodeType.RETURN]
[return_values.extend(ir.values) for node in returns for ir in node.irs if isinstance(ir, Return)]
self._return_values = list(set([x for x in return_values if not isinstance(x, Constant)]))
return self._return_values |
def random(magnitude=1):
""" Create a unit vector pointing in a random direction. """
theta = random.uniform(0, 2 * math.pi)
return magnitude * Vector(math.cos(theta), math.sin(theta)) | Create a unit vector pointing in a random direction. | Below is the the instruction that describes the task:
### Input:
Create a unit vector pointing in a random direction.
### Response:
def random(magnitude=1):
""" Create a unit vector pointing in a random direction. """
theta = random.uniform(0, 2 * math.pi)
return magnitude * Vector(math.cos(theta), math.sin(theta)) |
def unzip(x, split_dim, current_length, num_splits=2, name=None):
"""Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence.
"""
with tf.name_scope(name, 'unzip', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
# There is probably a more efficient way to do this.
all_splits = tf.split(
value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)
splits = [[] for _ in xrange(num_splits)]
for i in xrange(current_length):
splits[i % num_splits].append(all_splits[i])
return [tf.concat(s, split_dim) for s in splits] | Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence. | Below is the the instruction that describes the task:
### Input:
Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence.
### Response:
def unzip(x, split_dim, current_length, num_splits=2, name=None):
"""Splits a tensor by unzipping along the split_dim.
For example the following array split into 2 would be:
[1, 2, 3, 4, 5, 6] -> [1, 3, 5], [2, 4, 6]
and by 3:
[1, 2, 3, 4] -> [1, 4], [2], [3]
Args:
x: The tensor to split.
split_dim: The dimension to split along.
current_length: Current length along the split_dim.
num_splits: The number of splits.
name: Optional name for this op.
Returns:
A length num_splits sequence.
"""
with tf.name_scope(name, 'unzip', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
# There is probably a more efficient way to do this.
all_splits = tf.split(
value=x, num_or_size_splits=current_length, axis=split_dim, name=scope)
splits = [[] for _ in xrange(num_splits)]
for i in xrange(current_length):
splits[i % num_splits].append(all_splits[i])
return [tf.concat(s, split_dim) for s in splits] |
def _save_account(self, account, username):
""" Called when account is created/updated. With username override. """
# retrieve default project, or use null project if none
default_project_name = self._null_project
if account.default_project is not None:
default_project_name = account.default_project.pid
# account created
# account updated
ds_user = self.get_user(username)
if account.date_deleted is None:
# date_deleted is not set, user should exist
logger.debug("account is active")
if ds_user is None:
# create user if doesn't exist
self._create_user(username, default_project_name)
else:
# or just set default project
self._set_user_default_project(username, default_project_name)
# update user meta information
self._call([
"gchuser",
"-n", self._filter_string(account.person.get_full_name()),
"-u", username])
self._call([
"gchuser",
"-E", self._filter_string(account.person.email),
"-u", username])
# add rest of projects user belongs to
mam_projects = self.get_projects_in_user(username)
for project in account.person.projects.all():
if project.pid not in mam_projects:
self.add_account_to_project(account, project)
else:
# date_deleted is not set, user should not exist
logger.debug("account is not active")
if ds_user is not None:
# delete MAM user if account marked as deleted
self._call(["grmuser", "-u", username], ignore_errors=[8])
return | Called when account is created/updated. With username override. | Below is the the instruction that describes the task:
### Input:
Called when account is created/updated. With username override.
### Response:
def _save_account(self, account, username):
""" Called when account is created/updated. With username override. """
# retrieve default project, or use null project if none
default_project_name = self._null_project
if account.default_project is not None:
default_project_name = account.default_project.pid
# account created
# account updated
ds_user = self.get_user(username)
if account.date_deleted is None:
# date_deleted is not set, user should exist
logger.debug("account is active")
if ds_user is None:
# create user if doesn't exist
self._create_user(username, default_project_name)
else:
# or just set default project
self._set_user_default_project(username, default_project_name)
# update user meta information
self._call([
"gchuser",
"-n", self._filter_string(account.person.get_full_name()),
"-u", username])
self._call([
"gchuser",
"-E", self._filter_string(account.person.email),
"-u", username])
# add rest of projects user belongs to
mam_projects = self.get_projects_in_user(username)
for project in account.person.projects.all():
if project.pid not in mam_projects:
self.add_account_to_project(account, project)
else:
# date_deleted is not set, user should not exist
logger.debug("account is not active")
if ds_user is not None:
# delete MAM user if account marked as deleted
self._call(["grmuser", "-u", username], ignore_errors=[8])
return |
def create_new_dispatch(self, dispatch):
"""
Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create
"""
self._validate_uuid(dispatch.dispatch_id)
# Create new dispatch
url = "/notification/v1/dispatch"
post_response = NWS_DAO().postURL(
url, self._write_headers(), self._json_body(dispatch.json_data()))
if post_response.status != 200:
raise DataFailureException(
url, post_response.status, post_response.data)
return post_response.status | Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create | Below is the the instruction that describes the task:
### Input:
Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create
### Response:
def create_new_dispatch(self, dispatch):
"""
Create a new dispatch
:param dispatch:
is the new dispatch that the client wants to create
"""
self._validate_uuid(dispatch.dispatch_id)
# Create new dispatch
url = "/notification/v1/dispatch"
post_response = NWS_DAO().postURL(
url, self._write_headers(), self._json_body(dispatch.json_data()))
if post_response.status != 200:
raise DataFailureException(
url, post_response.status, post_response.data)
return post_response.status |
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000}) | Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30) | Below is the the instruction that describes the task:
### Input:
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
### Response:
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
::
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000}) |
def validate_regions_schema(self, data):
"""Performs field validation for regions. This should be
a dict with region names as the key and RegionSchema as the value
"""
region_schema = RegionSchema()
supplied_regions = data.get('regions', {})
for region in supplied_regions.keys():
result = region_schema.validate(supplied_regions[region])
if len(result.keys()) > 0:
raise ValidationError(result) | Performs field validation for regions. This should be
a dict with region names as the key and RegionSchema as the value | Below is the the instruction that describes the task:
### Input:
Performs field validation for regions. This should be
a dict with region names as the key and RegionSchema as the value
### Response:
def validate_regions_schema(self, data):
"""Performs field validation for regions. This should be
a dict with region names as the key and RegionSchema as the value
"""
region_schema = RegionSchema()
supplied_regions = data.get('regions', {})
for region in supplied_regions.keys():
result = region_schema.validate(supplied_regions[region])
if len(result.keys()) > 0:
raise ValidationError(result) |
def simulated_data_iter(
self, n=None, pexact=None, add_priornoise=False, bootstrap=None
):
""" Iterator that returns simulated data based upon a fit's data.
Simulated data is generated from a fit's data ``fit.y`` by
replacing the mean values in that data with random numbers
drawn from a distribution whose mean is ``self.fcn(pexact)``
and whose covariance matrix is the same as that of ``self.y``.
Each iteration of the iterator returns new simulated data,
with different random numbers for the means and a covariance
matrix equal to that of ``self.y``. This iterator is used by
``self.simulated_fit_iter``.
Typical usage::
fit = nonlinear_fit(data=(x,y), prior=prior, fcn=fcn)
...
for ysim, priorsim in fit.simulate_data_iter(n=10):
fitsim = nonlinear_fit(data=(x, ysim), prior=priorsim, fcn=fcn)
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
This code tests the fitting protocol on simulated data, comparing the
best fit parameters in each case with the correct values (``fit.p``).
The loop in this code is functionally the same as (but probably not
as fast as)::
for fitsim in fit.simulated_fit_iter(n=10):
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
Args:
n (int or None): Maximum number of iterations (equals
infinity if ``None``).
pexact (None or dict/array of numbers): Fit-parameter values for
the underlying distribution used to generate simulated data;
replaced by ``self.pmean`` if is ``None`` (default).
add_priornoise (bool): Vary prior means if ``True``; otherwise
vary only the means in ``self.y`` (default).
Returns:
An iterator that returns a 2-tuple containing simulated
versions of self.y and self.prior: ``(ysim, priorsim)``.
"""
pexact = self.pmean if pexact is None else pexact
# bootstrap is old name for add_priornoise; keep for legacy code
if bootstrap is not None:
add_priornoise = bootstrap
f = self.fcn(pexact) if self.x is False else self.fcn(self.x, pexact)
y = copy.deepcopy(self.y)
if isinstance(y, _gvar.BufferDict):
# y,f dictionaries; fresh copy of y, reorder f
tmp_f = _gvar.BufferDict([(k, f[k]) for k in y])
y.buf += tmp_f.buf - _gvar.mean(y.buf)
else:
# y,f arrays; fresh copy of y
y += numpy.asarray(f) - _gvar.mean(y)
prior = copy.deepcopy(self.prior)
if prior is None or not add_priornoise:
yiter = _gvar.bootstrap_iter(y, n)
for ysim in _gvar.bootstrap_iter(y, n):
yield ysim, prior
else:
yp = numpy.empty(y.size + prior.size, object)
yp[:y.size] = y.flat
yp[y.size:] = prior.flat
for ypsim in _gvar.bootstrap_iter(yp, n):
y.flat = ypsim[:y.size]
prior.flat = ypsim[y.size:]
yield y, prior | Iterator that returns simulated data based upon a fit's data.
Simulated data is generated from a fit's data ``fit.y`` by
replacing the mean values in that data with random numbers
drawn from a distribution whose mean is ``self.fcn(pexact)``
and whose covariance matrix is the same as that of ``self.y``.
Each iteration of the iterator returns new simulated data,
with different random numbers for the means and a covariance
matrix equal to that of ``self.y``. This iterator is used by
``self.simulated_fit_iter``.
Typical usage::
fit = nonlinear_fit(data=(x,y), prior=prior, fcn=fcn)
...
for ysim, priorsim in fit.simulate_data_iter(n=10):
fitsim = nonlinear_fit(data=(x, ysim), prior=priorsim, fcn=fcn)
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
This code tests the fitting protocol on simulated data, comparing the
best fit parameters in each case with the correct values (``fit.p``).
The loop in this code is functionally the same as (but probably not
as fast as)::
for fitsim in fit.simulated_fit_iter(n=10):
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
Args:
n (int or None): Maximum number of iterations (equals
infinity if ``None``).
pexact (None or dict/array of numbers): Fit-parameter values for
the underlying distribution used to generate simulated data;
replaced by ``self.pmean`` if is ``None`` (default).
add_priornoise (bool): Vary prior means if ``True``; otherwise
vary only the means in ``self.y`` (default).
Returns:
An iterator that returns a 2-tuple containing simulated
versions of self.y and self.prior: ``(ysim, priorsim)``. | Below is the the instruction that describes the task:
### Input:
Iterator that returns simulated data based upon a fit's data.
Simulated data is generated from a fit's data ``fit.y`` by
replacing the mean values in that data with random numbers
drawn from a distribution whose mean is ``self.fcn(pexact)``
and whose covariance matrix is the same as that of ``self.y``.
Each iteration of the iterator returns new simulated data,
with different random numbers for the means and a covariance
matrix equal to that of ``self.y``. This iterator is used by
``self.simulated_fit_iter``.
Typical usage::
fit = nonlinear_fit(data=(x,y), prior=prior, fcn=fcn)
...
for ysim, priorsim in fit.simulate_data_iter(n=10):
fitsim = nonlinear_fit(data=(x, ysim), prior=priorsim, fcn=fcn)
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
This code tests the fitting protocol on simulated data, comparing the
best fit parameters in each case with the correct values (``fit.p``).
The loop in this code is functionally the same as (but probably not
as fast as)::
for fitsim in fit.simulated_fit_iter(n=10):
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
Args:
n (int or None): Maximum number of iterations (equals
infinity if ``None``).
pexact (None or dict/array of numbers): Fit-parameter values for
the underlying distribution used to generate simulated data;
replaced by ``self.pmean`` if is ``None`` (default).
add_priornoise (bool): Vary prior means if ``True``; otherwise
vary only the means in ``self.y`` (default).
Returns:
An iterator that returns a 2-tuple containing simulated
versions of self.y and self.prior: ``(ysim, priorsim)``.
### Response:
def simulated_data_iter(
self, n=None, pexact=None, add_priornoise=False, bootstrap=None
):
""" Iterator that returns simulated data based upon a fit's data.
Simulated data is generated from a fit's data ``fit.y`` by
replacing the mean values in that data with random numbers
drawn from a distribution whose mean is ``self.fcn(pexact)``
and whose covariance matrix is the same as that of ``self.y``.
Each iteration of the iterator returns new simulated data,
with different random numbers for the means and a covariance
matrix equal to that of ``self.y``. This iterator is used by
``self.simulated_fit_iter``.
Typical usage::
fit = nonlinear_fit(data=(x,y), prior=prior, fcn=fcn)
...
for ysim, priorsim in fit.simulate_data_iter(n=10):
fitsim = nonlinear_fit(data=(x, ysim), prior=priorsim, fcn=fcn)
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
This code tests the fitting protocol on simulated data, comparing the
best fit parameters in each case with the correct values (``fit.p``).
The loop in this code is functionally the same as (but probably not
as fast as)::
for fitsim in fit.simulated_fit_iter(n=10):
print(fitsim)
print('chi2 =', gv.chi2(fit.p, fitsim.p))
Args:
n (int or None): Maximum number of iterations (equals
infinity if ``None``).
pexact (None or dict/array of numbers): Fit-parameter values for
the underlying distribution used to generate simulated data;
replaced by ``self.pmean`` if is ``None`` (default).
add_priornoise (bool): Vary prior means if ``True``; otherwise
vary only the means in ``self.y`` (default).
Returns:
An iterator that returns a 2-tuple containing simulated
versions of self.y and self.prior: ``(ysim, priorsim)``.
"""
pexact = self.pmean if pexact is None else pexact
# bootstrap is old name for add_priornoise; keep for legacy code
if bootstrap is not None:
add_priornoise = bootstrap
f = self.fcn(pexact) if self.x is False else self.fcn(self.x, pexact)
y = copy.deepcopy(self.y)
if isinstance(y, _gvar.BufferDict):
# y,f dictionaries; fresh copy of y, reorder f
tmp_f = _gvar.BufferDict([(k, f[k]) for k in y])
y.buf += tmp_f.buf - _gvar.mean(y.buf)
else:
# y,f arrays; fresh copy of y
y += numpy.asarray(f) - _gvar.mean(y)
prior = copy.deepcopy(self.prior)
if prior is None or not add_priornoise:
yiter = _gvar.bootstrap_iter(y, n)
for ysim in _gvar.bootstrap_iter(y, n):
yield ysim, prior
else:
yp = numpy.empty(y.size + prior.size, object)
yp[:y.size] = y.flat
yp[y.size:] = prior.flat
for ypsim in _gvar.bootstrap_iter(yp, n):
y.flat = ypsim[:y.size]
prior.flat = ypsim[y.size:]
yield y, prior |
def install_trigger(connection: connection, table: str, schema: str='public', overwrite: bool=False) -> None:
"""Install a psycopg2-pgevents trigger against a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be installed.
schema: str
Schema to which the table belongs.
overwrite: bool
Whether or not to overwrite existing installation of trigger for the
given table, if existing installation is found.
Returns
-------
None
"""
prior_install = False
if not overwrite:
prior_install = trigger_installed(connection, table, schema)
if not prior_install:
log('Installing {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME)
statement = INSTALL_TRIGGER_STATEMENT.format(
schema=schema,
table=table
)
execute(connection, statement)
else:
log('{}.{} trigger already installed; skipping...'.format(schema, table), logger_name=_LOGGER_NAME) | Install a psycopg2-pgevents trigger against a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be installed.
schema: str
Schema to which the table belongs.
overwrite: bool
Whether or not to overwrite existing installation of trigger for the
given table, if existing installation is found.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Install a psycopg2-pgevents trigger against a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be installed.
schema: str
Schema to which the table belongs.
overwrite: bool
Whether or not to overwrite existing installation of trigger for the
given table, if existing installation is found.
Returns
-------
None
### Response:
def install_trigger(connection: connection, table: str, schema: str='public', overwrite: bool=False) -> None:
"""Install a psycopg2-pgevents trigger against a table.
Parameters
----------
connection: psycopg2.extensions.connection
Active connection to a PostGreSQL database.
table: str
Table for which the trigger should be installed.
schema: str
Schema to which the table belongs.
overwrite: bool
Whether or not to overwrite existing installation of trigger for the
given table, if existing installation is found.
Returns
-------
None
"""
prior_install = False
if not overwrite:
prior_install = trigger_installed(connection, table, schema)
if not prior_install:
log('Installing {}.{} trigger...'.format(schema, table), logger_name=_LOGGER_NAME)
statement = INSTALL_TRIGGER_STATEMENT.format(
schema=schema,
table=table
)
execute(connection, statement)
else:
log('{}.{} trigger already installed; skipping...'.format(schema, table), logger_name=_LOGGER_NAME) |
def current_arg_text(self) -> str:
"""
Plain text part in the current argument, without any CQ codes.
"""
if self._current_arg_text is None:
self._current_arg_text = Message(
self.current_arg).extract_plain_text()
return self._current_arg_text | Plain text part in the current argument, without any CQ codes. | Below is the the instruction that describes the task:
### Input:
Plain text part in the current argument, without any CQ codes.
### Response:
def current_arg_text(self) -> str:
"""
Plain text part in the current argument, without any CQ codes.
"""
if self._current_arg_text is None:
self._current_arg_text = Message(
self.current_arg).extract_plain_text()
return self._current_arg_text |
def transit_verify_signed_data(self, name, input_data, algorithm=None, signature=None, hmac=None, context=None,
prehashed=None, mount_point='transit', signature_algorithm='pss'):
"""POST /<mount_point>/verify/<name>(/<algorithm>)
:param name:
:type name:
:param input_data:
:type input_data:
:param algorithm:
:type algorithm:
:param signature:
:type signature:
:param hmac:
:type hmac:
:param context:
:type context:
:param prehashed:
:type prehashed:
:param mount_point:
:type mount_point:
:param signature_algorithm:
:type signature_algorithm:
:return:
:rtype:
"""
if algorithm is not None:
url = '/v1/{0}/verify/{1}/{2}'.format(mount_point, name, algorithm)
else:
url = '/v1/{0}/verify/{1}'.format(mount_point, name)
params = {
'input': input_data
}
if signature is not None:
params['signature'] = signature
if hmac is not None:
params['hmac'] = hmac
if context is not None:
params['context'] = context
if prehashed is not None:
params['prehashed'] = prehashed
params['signature_algorithm'] = signature_algorithm
return self._adapter.post(url, json=params).json() | POST /<mount_point>/verify/<name>(/<algorithm>)
:param name:
:type name:
:param input_data:
:type input_data:
:param algorithm:
:type algorithm:
:param signature:
:type signature:
:param hmac:
:type hmac:
:param context:
:type context:
:param prehashed:
:type prehashed:
:param mount_point:
:type mount_point:
:param signature_algorithm:
:type signature_algorithm:
:return:
:rtype: | Below is the the instruction that describes the task:
### Input:
POST /<mount_point>/verify/<name>(/<algorithm>)
:param name:
:type name:
:param input_data:
:type input_data:
:param algorithm:
:type algorithm:
:param signature:
:type signature:
:param hmac:
:type hmac:
:param context:
:type context:
:param prehashed:
:type prehashed:
:param mount_point:
:type mount_point:
:param signature_algorithm:
:type signature_algorithm:
:return:
:rtype:
### Response:
def transit_verify_signed_data(self, name, input_data, algorithm=None, signature=None, hmac=None, context=None,
prehashed=None, mount_point='transit', signature_algorithm='pss'):
"""POST /<mount_point>/verify/<name>(/<algorithm>)
:param name:
:type name:
:param input_data:
:type input_data:
:param algorithm:
:type algorithm:
:param signature:
:type signature:
:param hmac:
:type hmac:
:param context:
:type context:
:param prehashed:
:type prehashed:
:param mount_point:
:type mount_point:
:param signature_algorithm:
:type signature_algorithm:
:return:
:rtype:
"""
if algorithm is not None:
url = '/v1/{0}/verify/{1}/{2}'.format(mount_point, name, algorithm)
else:
url = '/v1/{0}/verify/{1}'.format(mount_point, name)
params = {
'input': input_data
}
if signature is not None:
params['signature'] = signature
if hmac is not None:
params['hmac'] = hmac
if context is not None:
params['context'] = context
if prehashed is not None:
params['prehashed'] = prehashed
params['signature_algorithm'] = signature_algorithm
return self._adapter.post(url, json=params).json() |
def last_bed_temp(self):
"""Return avg bed temperature for last session."""
try:
bedtemps = self.intervals[1]['timeseries']['tempBedC']
except KeyError:
return None
tmp = 0
num_temps = len(bedtemps)
if num_temps == 0:
return None
for temp in bedtemps:
tmp += temp[1]
bedtemp = tmp/num_temps
return bedtemp | Return avg bed temperature for last session. | Below is the the instruction that describes the task:
### Input:
Return avg bed temperature for last session.
### Response:
def last_bed_temp(self):
"""Return avg bed temperature for last session."""
try:
bedtemps = self.intervals[1]['timeseries']['tempBedC']
except KeyError:
return None
tmp = 0
num_temps = len(bedtemps)
if num_temps == 0:
return None
for temp in bedtemps:
tmp += temp[1]
bedtemp = tmp/num_temps
return bedtemp |
def add_tag(
self,
tag):
"""*Add a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to add to the object
**Usage:**
.. code-block:: python
aTask.add_tag("@due")
"""
if tag.replace("@", "") in self.tags:
return
self.refresh
oldContent = self.to_string(indentLevel=1)
self.tags += [tag.replace("@", "")]
newContent = self.to_string(indentLevel=1)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.refresh
return None | *Add a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to add to the object
**Usage:**
.. code-block:: python
aTask.add_tag("@due") | Below is the the instruction that describes the task:
### Input:
*Add a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to add to the object
**Usage:**
.. code-block:: python
aTask.add_tag("@due")
### Response:
def add_tag(
self,
tag):
"""*Add a tag this taskpaper object*
**Key Arguments:**
- ``tag`` -- the tag to add to the object
**Usage:**
.. code-block:: python
aTask.add_tag("@due")
"""
if tag.replace("@", "") in self.tags:
return
self.refresh
oldContent = self.to_string(indentLevel=1)
self.tags += [tag.replace("@", "")]
newContent = self.to_string(indentLevel=1)
# ADD DIRECTLY TO CONTENT IF THE PROJECT IS BEING ADDED SPECIFICALLY TO
# THIS OBJECT
self.parent._update_document_tree(
oldContent=oldContent,
newContent=newContent
)
self.refresh
return None |
def list_abundance_cartesian_expansion(graph: BELGraph) -> None:
"""Expand all list abundances to simple subject-predicate-object networks."""
for u, v, k, d in list(graph.edges(keys=True, data=True)):
if CITATION not in d:
continue
if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):
for u_member, v_member in itt.product(u.members, v.members):
graph.add_qualified_edge(
u_member, v_member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, ListAbundance):
for member in u.members:
graph.add_qualified_edge(
member, v,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, ListAbundance):
for member in v.members:
graph.add_qualified_edge(
u, member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_list_abundance_nodes(graph) | Expand all list abundances to simple subject-predicate-object networks. | Below is the the instruction that describes the task:
### Input:
Expand all list abundances to simple subject-predicate-object networks.
### Response:
def list_abundance_cartesian_expansion(graph: BELGraph) -> None:
"""Expand all list abundances to simple subject-predicate-object networks."""
for u, v, k, d in list(graph.edges(keys=True, data=True)):
if CITATION not in d:
continue
if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):
for u_member, v_member in itt.product(u.members, v.members):
graph.add_qualified_edge(
u_member, v_member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, ListAbundance):
for member in u.members:
graph.add_qualified_edge(
member, v,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, ListAbundance):
for member in v.members:
graph.add_qualified_edge(
u, member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_list_abundance_nodes(graph) |
def _find_countour_yaml(start, checked, names=None):
"""Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
"""
extensions = []
if names:
for name in names:
if not os.path.splitext(name)[1]:
extensions.append(name + ".yaml")
extensions.append(name + ".yml")
yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions
directory = start
while directory not in checked:
checked.add(directory)
for fs_yaml_name in yaml_names:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return | Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found | Below is the the instruction that describes the task:
### Input:
Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
### Response:
def _find_countour_yaml(start, checked, names=None):
"""Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of countour.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the countour.yaml file or None if it is not found
"""
extensions = []
if names:
for name in names:
if not os.path.splitext(name)[1]:
extensions.append(name + ".yaml")
extensions.append(name + ".yml")
yaml_names = (names or []) + CONTOUR_YAML_NAMES + extensions
directory = start
while directory not in checked:
checked.add(directory)
for fs_yaml_name in yaml_names:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return |
def get_revision(self, id, rev, **kwargs):
"""
Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_revision_with_http_info(id, rev, **kwargs)
else:
(data) = self.get_revision_with_http_info(id, rev, **kwargs)
return data | Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread.
### Response:
def get_revision(self, id, rev, **kwargs):
"""
Get specific audited revision of this build configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_revision(id, rev, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration id (required)
:param int rev: Build configuration rev (required)
:return: BuildConfigurationAuditedSingleton
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_revision_with_http_info(id, rev, **kwargs)
else:
(data) = self.get_revision_with_http_info(id, rev, **kwargs)
return data |
def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=unused-argument
"""
Creates a div which identifies the aside, points to the original block,
and writes out the json_init_args into a script tag.
The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have
javascript, you'll need to override this impl
"""
return self._wrap_ele(
aside, view, frag, {
'block_id': block.scope_ids.usage_id,
'url_selector': 'asideBaseUrl',
}) | Creates a div which identifies the aside, points to the original block,
and writes out the json_init_args into a script tag.
The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have
javascript, you'll need to override this impl | Below is the the instruction that describes the task:
### Input:
Creates a div which identifies the aside, points to the original block,
and writes out the json_init_args into a script tag.
The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have
javascript, you'll need to override this impl
### Response:
def wrap_aside(self, block, aside, view, frag, context): # pylint: disable=unused-argument
"""
Creates a div which identifies the aside, points to the original block,
and writes out the json_init_args into a script tag.
The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have
javascript, you'll need to override this impl
"""
return self._wrap_ele(
aside, view, frag, {
'block_id': block.scope_ids.usage_id,
'url_selector': 'asideBaseUrl',
}) |
async def get_tournament(self, t_id: int = None, url: str = None, subdomain: str = None, force_update=False) -> Tournament:
""" gets a tournament with its id or url or url+subdomain
Note: from the API, it can't be known if the retrieved tournament was made from this user.
Thus, any tournament is added to the local list of tournaments, but some functions (updates/destroy...) cannot be used for tournaments not owned by this user.
|methcoro|
Args:
t_id: tournament id
url: last part of the tournament url (http://challonge.com/XXX)
subdomain: first part of the tournament url, if any (http://XXX.challonge.com/...)
force_update: *optional* set to True to force the data update from Challonge
Returns:
Tournament
Raises:
APIException
ValueError: if neither of the arguments are provided
"""
assert_or_raise((t_id is None) ^ (url is None),
ValueError,
'One of t_id or url must not be None')
found_t = self._find_tournament_by_id(t_id) if t_id is not None else self._find_tournament_by_url(url, subdomain)
if force_update or found_t is None:
param = t_id
if param is None:
if subdomain is not None:
param = '{}-{}'.format(subdomain, url)
else:
param = url
res = await self.connection('GET', 'tournaments/{}'.format(param))
self._refresh_tournament_from_json(res)
found_t = self._find_tournament_by_id(res['tournament']['id'])
return found_t | gets a tournament with its id or url or url+subdomain
Note: from the API, it can't be known if the retrieved tournament was made from this user.
Thus, any tournament is added to the local list of tournaments, but some functions (updates/destroy...) cannot be used for tournaments not owned by this user.
|methcoro|
Args:
t_id: tournament id
url: last part of the tournament url (http://challonge.com/XXX)
subdomain: first part of the tournament url, if any (http://XXX.challonge.com/...)
force_update: *optional* set to True to force the data update from Challonge
Returns:
Tournament
Raises:
APIException
ValueError: if neither of the arguments are provided | Below is the the instruction that describes the task:
### Input:
gets a tournament with its id or url or url+subdomain
Note: from the API, it can't be known if the retrieved tournament was made from this user.
Thus, any tournament is added to the local list of tournaments, but some functions (updates/destroy...) cannot be used for tournaments not owned by this user.
|methcoro|
Args:
t_id: tournament id
url: last part of the tournament url (http://challonge.com/XXX)
subdomain: first part of the tournament url, if any (http://XXX.challonge.com/...)
force_update: *optional* set to True to force the data update from Challonge
Returns:
Tournament
Raises:
APIException
ValueError: if neither of the arguments are provided
### Response:
async def get_tournament(self, t_id: int = None, url: str = None, subdomain: str = None, force_update=False) -> Tournament:
""" gets a tournament with its id or url or url+subdomain
Note: from the API, it can't be known if the retrieved tournament was made from this user.
Thus, any tournament is added to the local list of tournaments, but some functions (updates/destroy...) cannot be used for tournaments not owned by this user.
|methcoro|
Args:
t_id: tournament id
url: last part of the tournament url (http://challonge.com/XXX)
subdomain: first part of the tournament url, if any (http://XXX.challonge.com/...)
force_update: *optional* set to True to force the data update from Challonge
Returns:
Tournament
Raises:
APIException
ValueError: if neither of the arguments are provided
"""
assert_or_raise((t_id is None) ^ (url is None),
ValueError,
'One of t_id or url must not be None')
found_t = self._find_tournament_by_id(t_id) if t_id is not None else self._find_tournament_by_url(url, subdomain)
if force_update or found_t is None:
param = t_id
if param is None:
if subdomain is not None:
param = '{}-{}'.format(subdomain, url)
else:
param = url
res = await self.connection('GET', 'tournaments/{}'.format(param))
self._refresh_tournament_from_json(res)
found_t = self._find_tournament_by_id(res['tournament']['id'])
return found_t |
def from_filename(cls, path_string, origin=MISSING, **kwargs):
""" Read Sass source from a String specifying the path
"""
path = Path(path_string)
return cls.from_path(path, origin, **kwargs) | Read Sass source from a String specifying the path | Below is the the instruction that describes the task:
### Input:
Read Sass source from a String specifying the path
### Response:
def from_filename(cls, path_string, origin=MISSING, **kwargs):
""" Read Sass source from a String specifying the path
"""
path = Path(path_string)
return cls.from_path(path, origin, **kwargs) |
def calls(self):
"""Return a list of custom gate names in this gate body."""
lst = []
for children in self.children:
if children.type == "custom_unitary":
lst.append(children.name)
return lst | Return a list of custom gate names in this gate body. | Below is the the instruction that describes the task:
### Input:
Return a list of custom gate names in this gate body.
### Response:
def calls(self):
"""Return a list of custom gate names in this gate body."""
lst = []
for children in self.children:
if children.type == "custom_unitary":
lst.append(children.name)
return lst |
def _PrintProcessingTime(self, processing_status):
"""Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
processing_time = '00:00:00'
else:
processing_time = time.time() - processing_status.start_time
time_struct = time.gmtime(processing_time)
processing_time = time.strftime('%H:%M:%S', time_struct)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time)) | Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status. | Below is the the instruction that describes the task:
### Input:
Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status.
### Response:
def _PrintProcessingTime(self, processing_status):
"""Prints the processing time.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
processing_time = '00:00:00'
else:
processing_time = time.time() - processing_status.start_time
time_struct = time.gmtime(processing_time)
processing_time = time.strftime('%H:%M:%S', time_struct)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time)) |
def createGroup(self, networkId, body, verbose=None):
"""
Create a new group in the network specified by the parameter `networkId`. The contents are specified the message body.
:param networkId: SUID of the Network
:param body: New Group name and contents
:param verbose: print more
:returns: 200: successful operation
"""
PARAMS=set_param(['networkId','body'],[networkId,body])
response=api(url=self.___url+'networks/'+str(networkId)+'/groups', PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Create a new group in the network specified by the parameter `networkId`. The contents are specified the message body.
:param networkId: SUID of the Network
:param body: New Group name and contents
:param verbose: print more
:returns: 200: successful operation | Below is the the instruction that describes the task:
### Input:
Create a new group in the network specified by the parameter `networkId`. The contents are specified the message body.
:param networkId: SUID of the Network
:param body: New Group name and contents
:param verbose: print more
:returns: 200: successful operation
### Response:
def createGroup(self, networkId, body, verbose=None):
"""
Create a new group in the network specified by the parameter `networkId`. The contents are specified the message body.
:param networkId: SUID of the Network
:param body: New Group name and contents
:param verbose: print more
:returns: 200: successful operation
"""
PARAMS=set_param(['networkId','body'],[networkId,body])
response=api(url=self.___url+'networks/'+str(networkId)+'/groups', PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def raise_exception_if(self, exname, condition, edata=None, _keys=None):
r"""
Raise exception conditionally.
:param exname: Exception name
:type exname: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*
:type condition: boolean
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:type edata: dictionary, iterable of dictionaries or None
:raises:
* RuntimeError (Argument \\`condition\\` is not valid)
* RuntimeError (Argument \\`edata\\` is not valid)
* RuntimeError (Argument \\`exname\\` is not valid)
* RuntimeError (Field *[field_name]* not in exception message)
* ValueError (Exception name *[name]* not found')
"""
# _edict is an argument used by the _ExObj class which saves a
# second exception look-up since the _ExObj class can save the
# call dictionary
if not isinstance(condition, bool):
raise RuntimeError("Argument `condition` is not valid")
if not self._validate_edata(edata):
raise RuntimeError("Argument `edata` is not valid")
if _keys is None:
if not isinstance(exname, str):
raise RuntimeError("Argument `exname` is not valid")
# Find exception object
func_id, func_name = self._get_ex_data()
name = "{0}{1}{2}".format(func_id, self._callables_separator, exname)
for key, value in self._ex_dict[func_id].items():
if value["name"] == name:
break
else:
raise ValueError(
"Exception name {exname} not found".format(exname=exname)
)
_keys = (func_id, key, func_name)
eobj = self._ex_dict[_keys[0]][_keys[1]]
if condition:
eobj["raised"][eobj["function"].index(_keys[2])] = True
self._raise_exception({"type": _keys[1][0], "msg": _keys[1][1]}, edata) | r"""
Raise exception conditionally.
:param exname: Exception name
:type exname: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*
:type condition: boolean
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:type edata: dictionary, iterable of dictionaries or None
:raises:
* RuntimeError (Argument \\`condition\\` is not valid)
* RuntimeError (Argument \\`edata\\` is not valid)
* RuntimeError (Argument \\`exname\\` is not valid)
* RuntimeError (Field *[field_name]* not in exception message)
* ValueError (Exception name *[name]* not found') | Below is the the instruction that describes the task:
### Input:
r"""
Raise exception conditionally.
:param exname: Exception name
:type exname: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*
:type condition: boolean
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:type edata: dictionary, iterable of dictionaries or None
:raises:
* RuntimeError (Argument \\`condition\\` is not valid)
* RuntimeError (Argument \\`edata\\` is not valid)
* RuntimeError (Argument \\`exname\\` is not valid)
* RuntimeError (Field *[field_name]* not in exception message)
* ValueError (Exception name *[name]* not found')
### Response:
def raise_exception_if(self, exname, condition, edata=None, _keys=None):
r"""
Raise exception conditionally.
:param exname: Exception name
:type exname: string
:param condition: Flag that indicates whether the exception is
raised *(True)* or not *(False)*
:type condition: boolean
:param edata: Replacement values for fields in the exception message
(see :py:meth:`pexdoc.ExHandle.add_exception` for how
to define fields). Each dictionary entry can only have
these two keys:
* **field** *(string)* -- Field name
* **value** *(any)* -- Field value, to be converted into
a string with the `format
<https://docs.python.org/2/library/stdtypes.html#
str.format>`_ string method
If None no field replacement is done
:type edata: dictionary, iterable of dictionaries or None
:raises:
* RuntimeError (Argument \\`condition\\` is not valid)
* RuntimeError (Argument \\`edata\\` is not valid)
* RuntimeError (Argument \\`exname\\` is not valid)
* RuntimeError (Field *[field_name]* not in exception message)
* ValueError (Exception name *[name]* not found')
"""
# _edict is an argument used by the _ExObj class which saves a
# second exception look-up since the _ExObj class can save the
# call dictionary
if not isinstance(condition, bool):
raise RuntimeError("Argument `condition` is not valid")
if not self._validate_edata(edata):
raise RuntimeError("Argument `edata` is not valid")
if _keys is None:
if not isinstance(exname, str):
raise RuntimeError("Argument `exname` is not valid")
# Find exception object
func_id, func_name = self._get_ex_data()
name = "{0}{1}{2}".format(func_id, self._callables_separator, exname)
for key, value in self._ex_dict[func_id].items():
if value["name"] == name:
break
else:
raise ValueError(
"Exception name {exname} not found".format(exname=exname)
)
_keys = (func_id, key, func_name)
eobj = self._ex_dict[_keys[0]][_keys[1]]
if condition:
eobj["raised"][eobj["function"].index(_keys[2])] = True
self._raise_exception({"type": _keys[1][0], "msg": _keys[1][1]}, edata) |
def get_func_sourcecode(func):
"""
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
"""
def getsource(func):
lines, lnum = getsourcelines(func)
return ''.join(lines)
def getsourcelines(func):
lines, lnum = findsource(func)
return inspect.getblock(lines[lnum:]), lnum + 1
def findsource(func):
file = getfile(func) # file path
module = inspect.getmodule(func, file)
lines = linecache.getlines(file, module.__dict__)
code = func.__code__
lnum = code.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]):
break
lnum = lnum - 1 # pragma: no cover
return lines, lnum
def getfile(func):
module = inspect.getmodule(func)
return module.__file__
try:
return inspect.getsource(func)
except Exception:
return getsource(func) | Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code. | Below is the the instruction that describes the task:
### Input:
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
### Response:
def get_func_sourcecode(func):
"""
Try to get sourcecode using standard inspect.getsource().
If the function comes from a module which has been created dynamically
(not from the filesystem), then it tries to read the sourcecode on the
filesystem anyway.
WARNING: can do weird things if the filesystem code slightly differs from
the original module code.
"""
def getsource(func):
lines, lnum = getsourcelines(func)
return ''.join(lines)
def getsourcelines(func):
lines, lnum = findsource(func)
return inspect.getblock(lines[lnum:]), lnum + 1
def findsource(func):
file = getfile(func) # file path
module = inspect.getmodule(func, file)
lines = linecache.getlines(file, module.__dict__)
code = func.__code__
lnum = code.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]):
break
lnum = lnum - 1 # pragma: no cover
return lines, lnum
def getfile(func):
module = inspect.getmodule(func)
return module.__file__
try:
return inspect.getsource(func)
except Exception:
return getsource(func) |
def get_meta_options(self, model):
"""
Returns a dictionary of Meta options for the
autdit log model.
"""
result = {
'ordering' : ('-action_date',),
'app_label' : model._meta.app_label,
}
from django.db.models.options import DEFAULT_NAMES
if 'default_permissions' in DEFAULT_NAMES:
result.update({'default_permissions': ()})
return result | Returns a dictionary of Meta options for the
autdit log model. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary of Meta options for the
autdit log model.
### Response:
def get_meta_options(self, model):
"""
Returns a dictionary of Meta options for the
autdit log model.
"""
result = {
'ordering' : ('-action_date',),
'app_label' : model._meta.app_label,
}
from django.db.models.options import DEFAULT_NAMES
if 'default_permissions' in DEFAULT_NAMES:
result.update({'default_permissions': ()})
return result |
def bind(self, fn: "Callable[[Any], Reader]") -> 'Reader':
r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w
"""
return Reader(lambda x: fn(self.run(x)).run(x)) | r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w | Below is the the instruction that describes the task:
### Input:
r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w
### Response:
def bind(self, fn: "Callable[[Any], Reader]") -> 'Reader':
r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w
"""
return Reader(lambda x: fn(self.run(x)).run(x)) |
def _parse_feature(self, info):
"""Parse a feature command."""
parts = info.split(b'=', 1)
name = parts[0]
if len(parts) > 1:
value = self._path(parts[1])
else:
value = None
self.features[name] = value
return commands.FeatureCommand(name, value, lineno=self.lineno) | Parse a feature command. | Below is the the instruction that describes the task:
### Input:
Parse a feature command.
### Response:
def _parse_feature(self, info):
"""Parse a feature command."""
parts = info.split(b'=', 1)
name = parts[0]
if len(parts) > 1:
value = self._path(parts[1])
else:
value = None
self.features[name] = value
return commands.FeatureCommand(name, value, lineno=self.lineno) |
def prepare(self, input_files, bundle):
"""
:type input_files: list[static_bundle.files.StaticFileResult]
:type bundle: static_bundle.bundles.AbstractBundle
:rtype: list
"""
out = []
for input_file in input_files:
if input_file.extension == "less" and os.path.isfile(input_file.abs_path):
output_file = self.get_compile_file(input_file, bundle)
self.compile(input_file, output_file)
out.append(output_file)
else:
out.append(input_file)
return out | :type input_files: list[static_bundle.files.StaticFileResult]
:type bundle: static_bundle.bundles.AbstractBundle
:rtype: list | Below is the the instruction that describes the task:
### Input:
:type input_files: list[static_bundle.files.StaticFileResult]
:type bundle: static_bundle.bundles.AbstractBundle
:rtype: list
### Response:
def prepare(self, input_files, bundle):
"""
:type input_files: list[static_bundle.files.StaticFileResult]
:type bundle: static_bundle.bundles.AbstractBundle
:rtype: list
"""
out = []
for input_file in input_files:
if input_file.extension == "less" and os.path.isfile(input_file.abs_path):
output_file = self.get_compile_file(input_file, bundle)
self.compile(input_file, output_file)
out.append(output_file)
else:
out.append(input_file)
return out |
def _generate_union_tag_vars_funcs(self, union):
"""Emits the getter methods for retrieving tag-specific state. Setters throw
an error in the event an associated tag state variable is accessed without
the correct tag state."""
for field in union.all_fields:
if not is_void_type(field.data_type):
enum_field_name = fmt_enum_name(field.name, union)
with self.block_func(
func=fmt_camel(field.name),
args=[],
return_type=fmt_type(field.data_type)):
with self.block(
'if (![self is{}])'.format(
fmt_camel_upper(field.name)),
delim=('{', '}')):
error_msg = 'Invalid tag: required {}, but was %@.'.format(
enum_field_name)
throw_exc = (
'[NSException raise:@"IllegalStateException" '
'format:@"{}", [self tagName]];')
self.emit(throw_exc.format(error_msg))
self.emit('return _{};'.format(fmt_var(field.name)))
self.emit() | Emits the getter methods for retrieving tag-specific state. Setters throw
an error in the event an associated tag state variable is accessed without
the correct tag state. | Below is the the instruction that describes the task:
### Input:
Emits the getter methods for retrieving tag-specific state. Setters throw
an error in the event an associated tag state variable is accessed without
the correct tag state.
### Response:
def _generate_union_tag_vars_funcs(self, union):
"""Emits the getter methods for retrieving tag-specific state. Setters throw
an error in the event an associated tag state variable is accessed without
the correct tag state."""
for field in union.all_fields:
if not is_void_type(field.data_type):
enum_field_name = fmt_enum_name(field.name, union)
with self.block_func(
func=fmt_camel(field.name),
args=[],
return_type=fmt_type(field.data_type)):
with self.block(
'if (![self is{}])'.format(
fmt_camel_upper(field.name)),
delim=('{', '}')):
error_msg = 'Invalid tag: required {}, but was %@.'.format(
enum_field_name)
throw_exc = (
'[NSException raise:@"IllegalStateException" '
'format:@"{}", [self tagName]];')
self.emit(throw_exc.format(error_msg))
self.emit('return _{};'.format(fmt_var(field.name)))
self.emit() |
def get_cell_umi_read_string(read_id, sep="_"):
''' extract the umi and cell barcode from the read id (input as a
string) using the specified separator '''
try:
return (read_id.split(sep)[-1].encode('utf-8'),
read_id.split(sep)[-2].encode('utf-8'))
except IndexError:
raise ValueError(
"Could not extract UMI or CB from the read ID, please"
"check UMI and CB are encoded in the read name:"
"%s" % read_id) | extract the umi and cell barcode from the read id (input as a
string) using the specified separator | Below is the the instruction that describes the task:
### Input:
extract the umi and cell barcode from the read id (input as a
string) using the specified separator
### Response:
def get_cell_umi_read_string(read_id, sep="_"):
''' extract the umi and cell barcode from the read id (input as a
string) using the specified separator '''
try:
return (read_id.split(sep)[-1].encode('utf-8'),
read_id.split(sep)[-2].encode('utf-8'))
except IndexError:
raise ValueError(
"Could not extract UMI or CB from the read ID, please"
"check UMI and CB are encoded in the read name:"
"%s" % read_id) |
def write_PIA0_B_control(self, cpu_cycles, op_address, address, value):
"""
write to 0xff03 -> PIA 0 B side Control reg.
TODO: Handle IRQ
bit 7 | IRQ 1 (VSYNC) flag
bit 6 | IRQ 2 flag(not used)
bit 5 | Control line 2 (CB2) is an output = 1
bit 4 | Control line 2 (CB2) set by bit 3 = 1
bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI
bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines
bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI
bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ
"""
log.critical(
"%04x| write $%02x (%s) to $%04x -> PIA 0 B side Control reg.\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
if is_bit_set(value, bit=0):
log.critical(
"%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: enable\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
self.cpu.irq_enabled = True
value = set_bit(value, bit=7)
else:
log.critical(
"%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: disable\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
self.cpu.irq_enabled = False
if not is_bit_set(value, bit=2):
self.pia_0_B_control.select_pdr()
else:
self.pia_0_B_control.deselect_pdr()
self.pia_0_B_control.set(value) | write to 0xff03 -> PIA 0 B side Control reg.
TODO: Handle IRQ
bit 7 | IRQ 1 (VSYNC) flag
bit 6 | IRQ 2 flag(not used)
bit 5 | Control line 2 (CB2) is an output = 1
bit 4 | Control line 2 (CB2) set by bit 3 = 1
bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI
bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines
bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI
bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ | Below is the the instruction that describes the task:
### Input:
write to 0xff03 -> PIA 0 B side Control reg.
TODO: Handle IRQ
bit 7 | IRQ 1 (VSYNC) flag
bit 6 | IRQ 2 flag(not used)
bit 5 | Control line 2 (CB2) is an output = 1
bit 4 | Control line 2 (CB2) set by bit 3 = 1
bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI
bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines
bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI
bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ
### Response:
def write_PIA0_B_control(self, cpu_cycles, op_address, address, value):
"""
write to 0xff03 -> PIA 0 B side Control reg.
TODO: Handle IRQ
bit 7 | IRQ 1 (VSYNC) flag
bit 6 | IRQ 2 flag(not used)
bit 5 | Control line 2 (CB2) is an output = 1
bit 4 | Control line 2 (CB2) set by bit 3 = 1
bit 3 | select line MSB of analog multiplexor (MUX): 0 = control line 2 LO / 1 = control line 2 HI
bit 2 | set data direction: 0 = $FF02 is DDR / 1 = $FF02 is normal data lines
bit 1 | control line 1 (CB1): IRQ polarity 0 = IRQ on HI to LO / 1 = IRQ on LO to HI
bit 0 | VSYNC IRQ: 0 = disable IRQ / 1 = enable IRQ
"""
log.critical(
"%04x| write $%02x (%s) to $%04x -> PIA 0 B side Control reg.\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
if is_bit_set(value, bit=0):
log.critical(
"%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: enable\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
self.cpu.irq_enabled = True
value = set_bit(value, bit=7)
else:
log.critical(
"%04x| write $%02x (%s) to $%04x -> VSYNC IRQ: disable\t|%s",
op_address, value, byte2bit_string(value),
address, self.cfg.mem_info.get_shortest(op_address)
)
self.cpu.irq_enabled = False
if not is_bit_set(value, bit=2):
self.pia_0_B_control.select_pdr()
else:
self.pia_0_B_control.deselect_pdr()
self.pia_0_B_control.set(value) |
def add_accounts_to_institute(accounts_query, institute):
""" Add accounts to institute. """
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
add_account_to_institute(account, institute) | Add accounts to institute. | Below is the the instruction that describes the task:
### Input:
Add accounts to institute.
### Response:
def add_accounts_to_institute(accounts_query, institute):
""" Add accounts to institute. """
query = accounts_query.filter(date_deleted__isnull=True)
for account in query:
add_account_to_institute(account, institute) |
def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if (h < hrg) or (w < wrg):
raise AssertionError("The size of cropping should smaller than or equal to the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg))
w_offset = int(np.random.uniform(0, w - wrg))
# tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)
return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
return x[h_offset:h_end, w_offset:w_end] | Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image. | Below is the the instruction that describes the task:
### Input:
Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
### Response:
def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if (h < hrg) or (w < wrg):
raise AssertionError("The size of cropping should smaller than or equal to the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg))
w_offset = int(np.random.uniform(0, w - wrg))
# tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)
return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
return x[h_offset:h_end, w_offset:w_end] |
def add(config, username, filename):
"""Add user's SSH public key to their LDAP entry."""
try:
client = Client()
client.prepare_connection()
user_api = UserApi(client)
key_api = API(client)
key_api.add(username, user_api, filename)
except (ldap3.core.exceptions.LDAPNoSuchAttributeResult,
ldap_tools.exceptions.InvalidResult,
ldap3.core.exceptions.LDAPAttributeOrValueExistsResult
) as err: # pragma: no cover
print('{}: {}'.format(type(err), err.args[0]))
except Exception as err: # pragma: no cover
raise err from None | Add user's SSH public key to their LDAP entry. | Below is the the instruction that describes the task:
### Input:
Add user's SSH public key to their LDAP entry.
### Response:
def add(config, username, filename):
"""Add user's SSH public key to their LDAP entry."""
try:
client = Client()
client.prepare_connection()
user_api = UserApi(client)
key_api = API(client)
key_api.add(username, user_api, filename)
except (ldap3.core.exceptions.LDAPNoSuchAttributeResult,
ldap_tools.exceptions.InvalidResult,
ldap3.core.exceptions.LDAPAttributeOrValueExistsResult
) as err: # pragma: no cover
print('{}: {}'.format(type(err), err.args[0]))
except Exception as err: # pragma: no cover
raise err from None |
def extract_lzma(path):
"""Extract an lzma file and return the temporary file name"""
tlfile = pathlib.Path(path)
# open lzma file
with tlfile.open("rb") as td:
data = lzma.decompress(td.read())
# write temporary tar file
fd, tmpname = tempfile.mkstemp(prefix="odt_ex_", suffix=".tar")
with open(fd, "wb") as fo:
fo.write(data)
return tmpname | Extract an lzma file and return the temporary file name | Below is the the instruction that describes the task:
### Input:
Extract an lzma file and return the temporary file name
### Response:
def extract_lzma(path):
"""Extract an lzma file and return the temporary file name"""
tlfile = pathlib.Path(path)
# open lzma file
with tlfile.open("rb") as td:
data = lzma.decompress(td.read())
# write temporary tar file
fd, tmpname = tempfile.mkstemp(prefix="odt_ex_", suffix=".tar")
with open(fd, "wb") as fo:
fo.write(data)
return tmpname |
def check_image_format(fformat):
"""Check that *fformat* is valid
"""
cases = {"jpg": "jpeg",
"jpeg": "jpeg",
"tif": "tiff",
"tiff": "tif",
"pgm": "ppm",
"pbm": "ppm",
"ppm": "ppm",
"bmp": "bmp",
"dib": "bmp",
"gif": "gif",
"im": "im",
"pcx": "pcx",
"png": "png",
"xbm": "xbm",
"xpm": "xpm",
'jp2': 'jp2',
}
fformat = fformat.lower()
try:
fformat = cases[fformat]
except KeyError:
raise UnknownImageFormat("Unknown image format '%s'." % fformat)
return fformat | Check that *fformat* is valid | Below is the the instruction that describes the task:
### Input:
Check that *fformat* is valid
### Response:
def check_image_format(fformat):
"""Check that *fformat* is valid
"""
cases = {"jpg": "jpeg",
"jpeg": "jpeg",
"tif": "tiff",
"tiff": "tif",
"pgm": "ppm",
"pbm": "ppm",
"ppm": "ppm",
"bmp": "bmp",
"dib": "bmp",
"gif": "gif",
"im": "im",
"pcx": "pcx",
"png": "png",
"xbm": "xbm",
"xpm": "xpm",
'jp2': 'jp2',
}
fformat = fformat.lower()
try:
fformat = cases[fformat]
except KeyError:
raise UnknownImageFormat("Unknown image format '%s'." % fformat)
return fformat |
def get_parents(self):
"""
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')]
for definition in self.network.findall('DEFINITION')}
return variable_parents | Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']} | Below is the the instruction that describes the task:
### Input:
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
### Response:
def get_parents(self):
"""
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')]
for definition in self.network.findall('DEFINITION')}
return variable_parents |
def services(self):
"""
:rtype: twilio.rest.messaging.v1.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services | :rtype: twilio.rest.messaging.v1.service.ServiceList | Below is the the instruction that describes the task:
### Input:
:rtype: twilio.rest.messaging.v1.service.ServiceList
### Response:
def services(self):
"""
:rtype: twilio.rest.messaging.v1.service.ServiceList
"""
if self._services is None:
self._services = ServiceList(self)
return self._services |
def setup_regex(self):
"""Sets up the patterns and regex objects for parsing the docstrings."""
#Regex for grabbing out valid XML tags that represent known docstrings that we can work with.
self.keywords = [ "summary", "usage", "errors", "member", "group", "local",
"comments", "parameter" ]
#Regex for extracting the contents of docstrings minus the !! and any leading spaces.
self._RX_DOCS = "^\s*!!(?P<docstring>.+?)$"
self.RE_DOCS = re.compile(self._RX_DOCS, re.M)
#Regex for handling cross references in the documentation
self._RX_REFS = r"@CREF\[(?P<reference>[^\]]+)\]"
self.RE_REFS = re.compile(self._RX_REFS)
#Regex to match first lines of declarations for code elements that can be
#decorated by docstrings.
self._RX_DECOR = (r"((?P<type>character|real|type|logical|integer|complex)?"
r"(?P<kind>\([a-z0-9_]+\))?)?(,?(?P<modifiers>[^\n]+?))?"
r"\s*(?P<codetype>subroutine|function|type|module|interface)\s+(?P<name>[^(]+)")
self.RE_DECOR = re.compile(self._RX_DECOR, re.I)
#Regex for getting the docstrings decorating one or more modules in a code file,
#Since they aren't contained inside any other code element, we can't just use
#the normal docblocks routines.
self._RX_MODDOCS = (r"^(?P<docstring>\s*!!.+?)\n\s*module\s+(?P<name>[A-Za-z0-9_]+)"
".+?end\s+module(\s+(?P=name))?")
self.RE_MODDOCS = re.compile(self._RX_MODDOCS, re.DOTALL | re.I) | Sets up the patterns and regex objects for parsing the docstrings. | Below is the the instruction that describes the task:
### Input:
Sets up the patterns and regex objects for parsing the docstrings.
### Response:
def setup_regex(self):
"""Sets up the patterns and regex objects for parsing the docstrings."""
#Regex for grabbing out valid XML tags that represent known docstrings that we can work with.
self.keywords = [ "summary", "usage", "errors", "member", "group", "local",
"comments", "parameter" ]
#Regex for extracting the contents of docstrings minus the !! and any leading spaces.
self._RX_DOCS = "^\s*!!(?P<docstring>.+?)$"
self.RE_DOCS = re.compile(self._RX_DOCS, re.M)
#Regex for handling cross references in the documentation
self._RX_REFS = r"@CREF\[(?P<reference>[^\]]+)\]"
self.RE_REFS = re.compile(self._RX_REFS)
#Regex to match first lines of declarations for code elements that can be
#decorated by docstrings.
self._RX_DECOR = (r"((?P<type>character|real|type|logical|integer|complex)?"
r"(?P<kind>\([a-z0-9_]+\))?)?(,?(?P<modifiers>[^\n]+?))?"
r"\s*(?P<codetype>subroutine|function|type|module|interface)\s+(?P<name>[^(]+)")
self.RE_DECOR = re.compile(self._RX_DECOR, re.I)
#Regex for getting the docstrings decorating one or more modules in a code file,
#Since they aren't contained inside any other code element, we can't just use
#the normal docblocks routines.
self._RX_MODDOCS = (r"^(?P<docstring>\s*!!.+?)\n\s*module\s+(?P<name>[A-Za-z0-9_]+)"
".+?end\s+module(\s+(?P=name))?")
self.RE_MODDOCS = re.compile(self._RX_MODDOCS, re.DOTALL | re.I) |
def smart_open_write(path=None, mode='wb', encoding=None):
"""Open a file for writing or return ``stdout``.
Adapted from StackOverflow user "Wolph"
(http://stackoverflow.com/a/17603000).
"""
if path is not None:
# open a file
fh = io.open(path, mode=mode, encoding=encoding)
else:
# open stdout
fh = io.open(sys.stdout.fileno(), mode=mode, encoding=encoding)
#fh = sys.stdout
try:
yield fh
finally:
# make sure we don't close stdout
if fh.fileno() != sys.stdout.fileno():
fh.close() | Open a file for writing or return ``stdout``.
Adapted from StackOverflow user "Wolph"
(http://stackoverflow.com/a/17603000). | Below is the the instruction that describes the task:
### Input:
Open a file for writing or return ``stdout``.
Adapted from StackOverflow user "Wolph"
(http://stackoverflow.com/a/17603000).
### Response:
def smart_open_write(path=None, mode='wb', encoding=None):
"""Open a file for writing or return ``stdout``.
Adapted from StackOverflow user "Wolph"
(http://stackoverflow.com/a/17603000).
"""
if path is not None:
# open a file
fh = io.open(path, mode=mode, encoding=encoding)
else:
# open stdout
fh = io.open(sys.stdout.fileno(), mode=mode, encoding=encoding)
#fh = sys.stdout
try:
yield fh
finally:
# make sure we don't close stdout
if fh.fileno() != sys.stdout.fileno():
fh.close() |
def token(self):
"""
Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing.
"""
if self.next_:
t = self.next_
self.next_ = None
return t
while True:
t = self.lexer.token()
if not t:
return t
if t.type == 't_ws' and (
self.pretok or
(self.last and self.last.type not in self.significant_ws)):
continue
self.pretok = False
if t.type == 't_bclose' and self.last and self.last.type not in ['t_bopen', 't_bclose'] and self.last.type != 't_semicolon' \
and not (hasattr(t, 'lexer') and (t.lexer.lexstate == 'escapequotes' or t.lexer.lexstate == 'escapeapostrophe')):
self.next_ = t
tok = lex.LexToken()
tok.type = 't_semicolon'
tok.value = ';'
tok.lineno = t.lineno
tok.lexpos = t.lexpos
self.last = tok
self.lexer.in_property_decl = False
return tok
self.last = t
break
return t | Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing. | Below is the the instruction that describes the task:
### Input:
Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing.
### Response:
def token(self):
"""
Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing.
"""
if self.next_:
t = self.next_
self.next_ = None
return t
while True:
t = self.lexer.token()
if not t:
return t
if t.type == 't_ws' and (
self.pretok or
(self.last and self.last.type not in self.significant_ws)):
continue
self.pretok = False
if t.type == 't_bclose' and self.last and self.last.type not in ['t_bopen', 't_bclose'] and self.last.type != 't_semicolon' \
and not (hasattr(t, 'lexer') and (t.lexer.lexstate == 'escapequotes' or t.lexer.lexstate == 'escapeapostrophe')):
self.next_ = t
tok = lex.LexToken()
tok.type = 't_semicolon'
tok.value = ';'
tok.lineno = t.lineno
tok.lexpos = t.lexpos
self.last = tok
self.lexer.in_property_decl = False
return tok
self.last = t
break
return t |
def upload_logs(self, release_singleton=True):
'''
uploads a log to a server using the method and gui specifed
in self.pcfg
singleton mode can be disabled so a new version can be restarted whille uploading oges
on typicallin in the case of uploadnig after a crash or sys exit
set self.cfg log_upload_interface to gui/cli/or background
'''
if release_singleton:
self.release_singleton()
def _upload():
for log in self.get_logs():
new_name = self._uniquename(log)
self._upload(log, new_name)
# Todo: keep log around for a few days
self.delete_log(log)
if self.pcfg['log_server_interface'] == 'gui':
raise NotImplementedError
threading.Thread(target=threadme)
gui_uploader(threadme)
elif self.pcfg['log_server_interface'] == 'cli':
raise NotImplementedError
elif self.pcfg['log_server_interface'] == 'background':
_upload() | uploads a log to a server using the method and gui specifed
in self.pcfg
singleton mode can be disabled so a new version can be restarted whille uploading oges
on typicallin in the case of uploadnig after a crash or sys exit
set self.cfg log_upload_interface to gui/cli/or background | Below is the the instruction that describes the task:
### Input:
uploads a log to a server using the method and gui specifed
in self.pcfg
singleton mode can be disabled so a new version can be restarted whille uploading oges
on typicallin in the case of uploadnig after a crash or sys exit
set self.cfg log_upload_interface to gui/cli/or background
### Response:
def upload_logs(self, release_singleton=True):
'''
uploads a log to a server using the method and gui specifed
in self.pcfg
singleton mode can be disabled so a new version can be restarted whille uploading oges
on typicallin in the case of uploadnig after a crash or sys exit
set self.cfg log_upload_interface to gui/cli/or background
'''
if release_singleton:
self.release_singleton()
def _upload():
for log in self.get_logs():
new_name = self._uniquename(log)
self._upload(log, new_name)
# Todo: keep log around for a few days
self.delete_log(log)
if self.pcfg['log_server_interface'] == 'gui':
raise NotImplementedError
threading.Thread(target=threadme)
gui_uploader(threadme)
elif self.pcfg['log_server_interface'] == 'cli':
raise NotImplementedError
elif self.pcfg['log_server_interface'] == 'background':
_upload() |
def _clean(self, n):
"""
obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs].
"""
# delete the n % oldest entries in the database
import sqlite3
num_delete = int(self.num_entries / 100.0 * n)
logger.debug("removing %i entries from db" % num_delete)
lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall()
lru_dbs.sort(key=itemgetter(1))
hashs_by_db = {}
age_by_hash = []
for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)):
hashs_by_db[k] = list(x[0] for x in v)
# debug: distribution
len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()}
logger.debug("distribution of lru: %s", str(len_by_db))
### end dbg
# collect timestamps from databases
for db in hashs_by_db.keys():
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
rows = conn.execute("select hash, last_read from usage").fetchall()
for r in rows:
age_by_hash.append((r[0], float(r[1]), db))
# sort by age
age_by_hash.sort(key=itemgetter(1))
if len(age_by_hash)>=2:
assert[age_by_hash[-1] > age_by_hash[-2]]
ids = map(itemgetter(0), age_by_hash[:num_delete])
ids = tuple(map(str, ids))
sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids)
with self._database as c:
c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids)
# iterate over all LRU databases and delete those ids, we've just deleted from the main db.
# Do this within the same execution block of the main database, because we do not want the entry to be deleted,
# in case of a subsequent failure.
age_by_hash.sort(key=itemgetter(2))
for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)):
values = tuple(v[0] for v in values)
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
stmnt = "DELETE FROM usage WHERE hash IN (%s)" \
% SqliteDB._format_tuple_for_sql(values)
curr = conn.execute(stmnt)
assert curr.rowcount == len(values), curr.rowcount | obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs]. | Below is the the instruction that describes the task:
### Input:
obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs].
### Response:
def _clean(self, n):
"""
obtain n% oldest entries by looking into the usage databases. Then these entries
are deleted first from the traj_info db and afterwards from the associated LRU dbs.
:param n: delete n% entries in traj_info db [and associated LRU (usage) dbs].
"""
# delete the n % oldest entries in the database
import sqlite3
num_delete = int(self.num_entries / 100.0 * n)
logger.debug("removing %i entries from db" % num_delete)
lru_dbs = self._database.execute("select hash, lru_db from traj_info").fetchall()
lru_dbs.sort(key=itemgetter(1))
hashs_by_db = {}
age_by_hash = []
for k, v in itertools.groupby(lru_dbs, key=itemgetter(1)):
hashs_by_db[k] = list(x[0] for x in v)
# debug: distribution
len_by_db = {os.path.basename(db): len(hashs_by_db[db]) for db in hashs_by_db.keys()}
logger.debug("distribution of lru: %s", str(len_by_db))
### end dbg
# collect timestamps from databases
for db in hashs_by_db.keys():
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
rows = conn.execute("select hash, last_read from usage").fetchall()
for r in rows:
age_by_hash.append((r[0], float(r[1]), db))
# sort by age
age_by_hash.sort(key=itemgetter(1))
if len(age_by_hash)>=2:
assert[age_by_hash[-1] > age_by_hash[-2]]
ids = map(itemgetter(0), age_by_hash[:num_delete])
ids = tuple(map(str, ids))
sql_compatible_ids = SqliteDB._format_tuple_for_sql(ids)
with self._database as c:
c.execute("DELETE FROM traj_info WHERE hash in (%s)" % sql_compatible_ids)
# iterate over all LRU databases and delete those ids, we've just deleted from the main db.
# Do this within the same execution block of the main database, because we do not want the entry to be deleted,
# in case of a subsequent failure.
age_by_hash.sort(key=itemgetter(2))
for db, values in itertools.groupby(age_by_hash, key=itemgetter(2)):
values = tuple(v[0] for v in values)
with sqlite3.connect(db, timeout=self.lru_timeout) as conn:
stmnt = "DELETE FROM usage WHERE hash IN (%s)" \
% SqliteDB._format_tuple_for_sql(values)
curr = conn.execute(stmnt)
assert curr.rowcount == len(values), curr.rowcount |
def save_xml(self, doc, element):
'''Save this participant into an xml.dom.Element object.'''
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'Participant')
self.target_component.save_xml(doc, new_element)
element.appendChild(new_element) | Save this participant into an xml.dom.Element object. | Below is the the instruction that describes the task:
### Input:
Save this participant into an xml.dom.Element object.
### Response:
def save_xml(self, doc, element):
'''Save this participant into an xml.dom.Element object.'''
new_element = doc.createElementNS(RTS_NS, RTS_NS_S + 'Participant')
self.target_component.save_xml(doc, new_element)
element.appendChild(new_element) |
def sample(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout:
""" Sample experience from replay buffer and return a batch """
if self.forward_steps > 1:
transitions = self.replay_buffer.sample_forward_transitions(
batch_size=number_of_steps, batch_info=batch_info, forward_steps=self.forward_steps,
discount_factor=self.discount_factor
)
else:
transitions = self.replay_buffer.sample_transitions(batch_size=number_of_steps, batch_info=batch_info)
if self.ret_rms is not None:
rewards = transitions.transition_tensors['rewards']
new_rewards = torch.clamp(rewards / np.sqrt(self.ret_rms.var + 1e-8), -self.clip_obs, self.clip_obs)
transitions.transition_tensors['rewards'] = new_rewards
return transitions | Sample experience from replay buffer and return a batch | Below is the the instruction that describes the task:
### Input:
Sample experience from replay buffer and return a batch
### Response:
def sample(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout:
""" Sample experience from replay buffer and return a batch """
if self.forward_steps > 1:
transitions = self.replay_buffer.sample_forward_transitions(
batch_size=number_of_steps, batch_info=batch_info, forward_steps=self.forward_steps,
discount_factor=self.discount_factor
)
else:
transitions = self.replay_buffer.sample_transitions(batch_size=number_of_steps, batch_info=batch_info)
if self.ret_rms is not None:
rewards = transitions.transition_tensors['rewards']
new_rewards = torch.clamp(rewards / np.sqrt(self.ret_rms.var + 1e-8), -self.clip_obs, self.clip_obs)
transitions.transition_tensors['rewards'] = new_rewards
return transitions |
def numenta_new_local_inhibition(self, X, Y):
r"""
Weight updates for the new local inhibition procedure (experimental and work in progress).
See "Numenta’s local inhibition revisited" (Section 6) in `latex/notes.pdf`.
Note that we directly encode the "activation probability" $a_{ij}$ as $h_{ij}$.
This is just a temporary hack. In Consequence, the hidden-to-hidden connections
are NOT symmetric anymore!
"""
batchSize = len(X)
W = self.connections.visible_to_hidden
H = self.connections.hidden_to_hidden
n, m = W.shape
bias = self.connections.hidden_bias
incr = self.weight_incr
decr = self.weight_decr
boost_bias = self.boost_strength_bias
boost_hidden = self.boost_strength_hidden
alpha = self.average_activity
#---------------------------
# visible-to-hidden updates
#---------------------------
for i in range(batchSize):
y = Y[i]
x = X[i]
x_bar = np.ones(m) - x
# Hebbian-like update
W[ np.where(y == 1)[0] ] += incr*x - decr*x_bar
# Clip the visible-to-hidden connections
# to be between $0$ and $1$
tooSmall = np.where(W < 0.)
tooBig = np.where(W > 1.)
W[ tooSmall ] = 0.
W[ tooBig ] = 1.
#---------------
# (Hidden) Bias
#---------------
for i in range(n):
bias[i] = boost_bias * alpha[i,i]
#--------------------------
# Hidden-to-hidden updates
#--------------------------
for i in range(n):
for j in range(n):
H[i,j] = alpha[i,j] /( np.sum( alpha[i,:] ) - alpha[i,i] )
for i in range(n):
H[i,i] = 0. | r"""
Weight updates for the new local inhibition procedure (experimental and work in progress).
See "Numenta’s local inhibition revisited" (Section 6) in `latex/notes.pdf`.
Note that we directly encode the "activation probability" $a_{ij}$ as $h_{ij}$.
This is just a temporary hack. In Consequence, the hidden-to-hidden connections
are NOT symmetric anymore! | Below is the the instruction that describes the task:
### Input:
r"""
Weight updates for the new local inhibition procedure (experimental and work in progress).
See "Numenta’s local inhibition revisited" (Section 6) in `latex/notes.pdf`.
Note that we directly encode the "activation probability" $a_{ij}$ as $h_{ij}$.
This is just a temporary hack. In Consequence, the hidden-to-hidden connections
are NOT symmetric anymore!
### Response:
def numenta_new_local_inhibition(self, X, Y):
r"""
Weight updates for the new local inhibition procedure (experimental and work in progress).
See "Numenta’s local inhibition revisited" (Section 6) in `latex/notes.pdf`.
Note that we directly encode the "activation probability" $a_{ij}$ as $h_{ij}$.
This is just a temporary hack. In Consequence, the hidden-to-hidden connections
are NOT symmetric anymore!
"""
batchSize = len(X)
W = self.connections.visible_to_hidden
H = self.connections.hidden_to_hidden
n, m = W.shape
bias = self.connections.hidden_bias
incr = self.weight_incr
decr = self.weight_decr
boost_bias = self.boost_strength_bias
boost_hidden = self.boost_strength_hidden
alpha = self.average_activity
#---------------------------
# visible-to-hidden updates
#---------------------------
for i in range(batchSize):
y = Y[i]
x = X[i]
x_bar = np.ones(m) - x
# Hebbian-like update
W[ np.where(y == 1)[0] ] += incr*x - decr*x_bar
# Clip the visible-to-hidden connections
# to be between $0$ and $1$
tooSmall = np.where(W < 0.)
tooBig = np.where(W > 1.)
W[ tooSmall ] = 0.
W[ tooBig ] = 1.
#---------------
# (Hidden) Bias
#---------------
for i in range(n):
bias[i] = boost_bias * alpha[i,i]
#--------------------------
# Hidden-to-hidden updates
#--------------------------
for i in range(n):
for j in range(n):
H[i,j] = alpha[i,j] /( np.sum( alpha[i,:] ) - alpha[i,i] )
for i in range(n):
H[i,i] = 0. |
def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray:
"""
Returns the fractional coordinates given cartesian coordinates.
Args:
cart_coords (3x1 array): Cartesian coords.
Returns:
Fractional coordinates.
"""
return dot(cart_coords, self.inv_matrix) | Returns the fractional coordinates given cartesian coordinates.
Args:
cart_coords (3x1 array): Cartesian coords.
Returns:
Fractional coordinates. | Below is the the instruction that describes the task:
### Input:
Returns the fractional coordinates given cartesian coordinates.
Args:
cart_coords (3x1 array): Cartesian coords.
Returns:
Fractional coordinates.
### Response:
def get_fractional_coords(self, cart_coords: Vector3Like) -> np.ndarray:
"""
Returns the fractional coordinates given cartesian coordinates.
Args:
cart_coords (3x1 array): Cartesian coords.
Returns:
Fractional coordinates.
"""
return dot(cart_coords, self.inv_matrix) |
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line) | Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition. | Below is the the instruction that describes the task:
### Input:
Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
### Response:
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line) |
def CreateDefaultPartition(client, ad_group_id):
"""Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
"""
ad_group_criterion_service = client.GetService('AdGroupCriterionService',
version='v201809')
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
# Make sure that caseValue and parentCriterionId are left unspecified.
# This makes this partition as generic as possible to use as a
# fallback when others don't match.
'criterion': {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
},
'biddingStrategyConfiguration': {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': 500000
}
}]
}
}
}]
ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print ('Ad group criterion with ID "%d" in ad group with ID "%d" was added.'
% (ad_group_criterion['criterion']['id'],
ad_group_criterion['adGroupId'])) | Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group. | Below is the the instruction that describes the task:
### Input:
Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
### Response:
def CreateDefaultPartition(client, ad_group_id):
"""Creates a default partition.
Args:
client: an AdWordsClient instance.
ad_group_id: an integer ID for an ad group.
"""
ad_group_criterion_service = client.GetService('AdGroupCriterionService',
version='v201809')
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
# Make sure that caseValue and parentCriterionId are left unspecified.
# This makes this partition as generic as possible to use as a
# fallback when others don't match.
'criterion': {
'xsi_type': 'ProductPartition',
'partitionType': 'UNIT'
},
'biddingStrategyConfiguration': {
'bids': [{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': 500000
}
}]
}
}
}]
ad_group_criterion = ad_group_criterion_service.mutate(operations)['value'][0]
print ('Ad group criterion with ID "%d" in ad group with ID "%d" was added.'
% (ad_group_criterion['criterion']['id'],
ad_group_criterion['adGroupId'])) |
def prepare_transfer_transaction(*,
inputs,
recipients,
asset,
metadata=None):
"""Prepares a ``"TRANSFER"`` transaction payload, ready to be
fulfilled.
Args:
inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`): One or more
inputs holding the condition(s) that this transaction
intends to fulfill. Each input is expected to be a
:obj:`dict`.
recipients (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more public keys representing the new recipients(s) of the
asset being transferred.
asset (:obj:`dict`): A single-key dictionary holding the ``id``
of the asset being transferred with this transaction.
metadata (:obj:`dict`): Metadata associated with the
transaction. Defaults to ``None``.
Returns:
dict: The prepared ``"TRANSFER"`` transaction.
.. important::
* ``asset`` MUST be in the form of::
{
'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>'
}
Example:
.. todo:: Replace this section with docs.
In case it may not be clear what an input should look like, say
Alice (public key: ``'3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf'``)
wishes to transfer an asset over to Bob
(public key: ``'EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA'``).
Let the asset creation transaction payload be denoted by
``tx``::
# noqa E501
>>> tx
{'asset': {'data': {'msg': 'Hello BigchainDB!'}},
'id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87',
'inputs': [{'fulfillment': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'fulfills': None,
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'metadata': None,
'operation': 'CREATE',
'outputs': [{'amount': '1',
'condition': {'details': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'uri': 'ni:///sha-256;7ApQLsLLQgj5WOUipJg1txojmge68pctwFxvc3iOl54?fpt=ed25519-sha-256&cost=131072'},
'public_keys': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'version': '2.0'}
Then, the input may be constructed in this way::
output_index
output = tx['transaction']['outputs'][output_index]
input_ = {
'fulfillment': output['condition']['details'],
'input': {
'output_index': output_index,
'transaction_id': tx['id'],
},
'owners_before': output['owners_after'],
}
Displaying the input on the prompt would look like::
>>> input_
{'fulfillment': {
'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'input': {'output_index': 0,
'transaction_id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87'},
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}
To prepare the transfer:
>>> prepare_transfer_transaction(
... inputs=input_,
... recipients='EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA',
... asset=tx['transaction']['asset'],
... )
"""
if not isinstance(inputs, (list, tuple)):
inputs = (inputs, )
if not isinstance(recipients, (list, tuple)):
recipients = [([recipients], 1)]
# NOTE: Needed for the time being. See
# https://github.com/bigchaindb/bigchaindb/issues/797
if isinstance(recipients, tuple):
recipients = [(list(recipients), 1)]
fulfillments = [
Input(_fulfillment_from_details(input_['fulfillment']),
input_['owners_before'],
fulfills=TransactionLink(
txid=input_['fulfills']['transaction_id'],
output=input_['fulfills']['output_index']))
for input_ in inputs
]
transaction = Transaction.transfer(
fulfillments,
recipients,
asset_id=asset['id'],
metadata=metadata,
)
return transaction.to_dict() | Prepares a ``"TRANSFER"`` transaction payload, ready to be
fulfilled.
Args:
inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`): One or more
inputs holding the condition(s) that this transaction
intends to fulfill. Each input is expected to be a
:obj:`dict`.
recipients (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more public keys representing the new recipients(s) of the
asset being transferred.
asset (:obj:`dict`): A single-key dictionary holding the ``id``
of the asset being transferred with this transaction.
metadata (:obj:`dict`): Metadata associated with the
transaction. Defaults to ``None``.
Returns:
dict: The prepared ``"TRANSFER"`` transaction.
.. important::
* ``asset`` MUST be in the form of::
{
'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>'
}
Example:
.. todo:: Replace this section with docs.
In case it may not be clear what an input should look like, say
Alice (public key: ``'3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf'``)
wishes to transfer an asset over to Bob
(public key: ``'EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA'``).
Let the asset creation transaction payload be denoted by
``tx``::
# noqa E501
>>> tx
{'asset': {'data': {'msg': 'Hello BigchainDB!'}},
'id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87',
'inputs': [{'fulfillment': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'fulfills': None,
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'metadata': None,
'operation': 'CREATE',
'outputs': [{'amount': '1',
'condition': {'details': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'uri': 'ni:///sha-256;7ApQLsLLQgj5WOUipJg1txojmge68pctwFxvc3iOl54?fpt=ed25519-sha-256&cost=131072'},
'public_keys': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'version': '2.0'}
Then, the input may be constructed in this way::
output_index
output = tx['transaction']['outputs'][output_index]
input_ = {
'fulfillment': output['condition']['details'],
'input': {
'output_index': output_index,
'transaction_id': tx['id'],
},
'owners_before': output['owners_after'],
}
Displaying the input on the prompt would look like::
>>> input_
{'fulfillment': {
'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'input': {'output_index': 0,
'transaction_id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87'},
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}
To prepare the transfer:
>>> prepare_transfer_transaction(
... inputs=input_,
... recipients='EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA',
... asset=tx['transaction']['asset'],
... ) | Below is the the instruction that describes the task:
### Input:
Prepares a ``"TRANSFER"`` transaction payload, ready to be
fulfilled.
Args:
inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`): One or more
inputs holding the condition(s) that this transaction
intends to fulfill. Each input is expected to be a
:obj:`dict`.
recipients (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more public keys representing the new recipients(s) of the
asset being transferred.
asset (:obj:`dict`): A single-key dictionary holding the ``id``
of the asset being transferred with this transaction.
metadata (:obj:`dict`): Metadata associated with the
transaction. Defaults to ``None``.
Returns:
dict: The prepared ``"TRANSFER"`` transaction.
.. important::
* ``asset`` MUST be in the form of::
{
'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>'
}
Example:
.. todo:: Replace this section with docs.
In case it may not be clear what an input should look like, say
Alice (public key: ``'3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf'``)
wishes to transfer an asset over to Bob
(public key: ``'EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA'``).
Let the asset creation transaction payload be denoted by
``tx``::
# noqa E501
>>> tx
{'asset': {'data': {'msg': 'Hello BigchainDB!'}},
'id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87',
'inputs': [{'fulfillment': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'fulfills': None,
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'metadata': None,
'operation': 'CREATE',
'outputs': [{'amount': '1',
'condition': {'details': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'uri': 'ni:///sha-256;7ApQLsLLQgj5WOUipJg1txojmge68pctwFxvc3iOl54?fpt=ed25519-sha-256&cost=131072'},
'public_keys': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'version': '2.0'}
Then, the input may be constructed in this way::
output_index
output = tx['transaction']['outputs'][output_index]
input_ = {
'fulfillment': output['condition']['details'],
'input': {
'output_index': output_index,
'transaction_id': tx['id'],
},
'owners_before': output['owners_after'],
}
Displaying the input on the prompt would look like::
>>> input_
{'fulfillment': {
'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'input': {'output_index': 0,
'transaction_id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87'},
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}
To prepare the transfer:
>>> prepare_transfer_transaction(
... inputs=input_,
... recipients='EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA',
... asset=tx['transaction']['asset'],
... )
### Response:
def prepare_transfer_transaction(*,
inputs,
recipients,
asset,
metadata=None):
"""Prepares a ``"TRANSFER"`` transaction payload, ready to be
fulfilled.
Args:
inputs (:obj:`dict` | :obj:`list` | :obj:`tuple`): One or more
inputs holding the condition(s) that this transaction
intends to fulfill. Each input is expected to be a
:obj:`dict`.
recipients (:obj:`str` | :obj:`list` | :obj:`tuple`): One or
more public keys representing the new recipients(s) of the
asset being transferred.
asset (:obj:`dict`): A single-key dictionary holding the ``id``
of the asset being transferred with this transaction.
metadata (:obj:`dict`): Metadata associated with the
transaction. Defaults to ``None``.
Returns:
dict: The prepared ``"TRANSFER"`` transaction.
.. important::
* ``asset`` MUST be in the form of::
{
'id': '<Asset ID (i.e. TX ID of its CREATE transaction)>'
}
Example:
.. todo:: Replace this section with docs.
In case it may not be clear what an input should look like, say
Alice (public key: ``'3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf'``)
wishes to transfer an asset over to Bob
(public key: ``'EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA'``).
Let the asset creation transaction payload be denoted by
``tx``::
# noqa E501
>>> tx
{'asset': {'data': {'msg': 'Hello BigchainDB!'}},
'id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87',
'inputs': [{'fulfillment': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'fulfills': None,
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'metadata': None,
'operation': 'CREATE',
'outputs': [{'amount': '1',
'condition': {'details': {'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'uri': 'ni:///sha-256;7ApQLsLLQgj5WOUipJg1txojmge68pctwFxvc3iOl54?fpt=ed25519-sha-256&cost=131072'},
'public_keys': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}],
'version': '2.0'}
Then, the input may be constructed in this way::
output_index
output = tx['transaction']['outputs'][output_index]
input_ = {
'fulfillment': output['condition']['details'],
'input': {
'output_index': output_index,
'transaction_id': tx['id'],
},
'owners_before': output['owners_after'],
}
Displaying the input on the prompt would look like::
>>> input_
{'fulfillment': {
'public_key': '3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf',
'type': 'ed25519-sha-256'},
'input': {'output_index': 0,
'transaction_id': '9650055df2539223586d33d273cb8fd05bd6d485b1fef1caf7c8901a49464c87'},
'owners_before': ['3Cxh1eKZk3Wp9KGBWFS7iVde465UvqUKnEqTg2MW4wNf']}
To prepare the transfer:
>>> prepare_transfer_transaction(
... inputs=input_,
... recipients='EcRawy3Y22eAUSS94vLF8BVJi62wbqbD9iSUSUNU9wAA',
... asset=tx['transaction']['asset'],
... )
"""
if not isinstance(inputs, (list, tuple)):
inputs = (inputs, )
if not isinstance(recipients, (list, tuple)):
recipients = [([recipients], 1)]
# NOTE: Needed for the time being. See
# https://github.com/bigchaindb/bigchaindb/issues/797
if isinstance(recipients, tuple):
recipients = [(list(recipients), 1)]
fulfillments = [
Input(_fulfillment_from_details(input_['fulfillment']),
input_['owners_before'],
fulfills=TransactionLink(
txid=input_['fulfills']['transaction_id'],
output=input_['fulfills']['output_index']))
for input_ in inputs
]
transaction = Transaction.transfer(
fulfillments,
recipients,
asset_id=asset['id'],
metadata=metadata,
)
return transaction.to_dict() |
def convert(self, value, param, ctx):
"""Return file content if file, else, return value as-is
"""
# Protect against corner cases of invalid inputs
if not isinstance(value, str):
return value
if isinstance(value, six.binary_type):
value = value.decode('UTF-8')
# Read from a file under these cases
if value.startswith('@'):
filename = os.path.expanduser(value[1:])
file_obj = super(Variables, self).convert(filename, param, ctx)
if hasattr(file_obj, 'read'):
# Sometimes click.File may return a buffer and not a string
return file_obj.read()
return file_obj
# No file, use given string
return value | Return file content if file, else, return value as-is | Below is the the instruction that describes the task:
### Input:
Return file content if file, else, return value as-is
### Response:
def convert(self, value, param, ctx):
"""Return file content if file, else, return value as-is
"""
# Protect against corner cases of invalid inputs
if not isinstance(value, str):
return value
if isinstance(value, six.binary_type):
value = value.decode('UTF-8')
# Read from a file under these cases
if value.startswith('@'):
filename = os.path.expanduser(value[1:])
file_obj = super(Variables, self).convert(filename, param, ctx)
if hasattr(file_obj, 'read'):
# Sometimes click.File may return a buffer and not a string
return file_obj.read()
return file_obj
# No file, use given string
return value |
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort() | Returns the indices of the lexicographical sorting
order of the supplied arrays. | Below is the the instruction that describes the task:
### Input:
Returns the indices of the lexicographical sorting
order of the supplied arrays.
### Response:
def arglexsort(arrays):
"""
Returns the indices of the lexicographical sorting
order of the supplied arrays.
"""
dtypes = ','.join(array.dtype.str for array in arrays)
recarray = np.empty(len(arrays[0]), dtype=dtypes)
for i, array in enumerate(arrays):
recarray['f%s' % i] = array
return recarray.argsort() |
def get_pairtree_prefix(pairtree_store):
"""Returns the prefix given in pairtree_prefix file."""
prefix_path = os.path.join(pairtree_store, 'pairtree_prefix')
with open(prefix_path, 'r') as prefixf:
prefix = prefixf.read().strip()
return prefix | Returns the prefix given in pairtree_prefix file. | Below is the the instruction that describes the task:
### Input:
Returns the prefix given in pairtree_prefix file.
### Response:
def get_pairtree_prefix(pairtree_store):
"""Returns the prefix given in pairtree_prefix file."""
prefix_path = os.path.join(pairtree_store, 'pairtree_prefix')
with open(prefix_path, 'r') as prefixf:
prefix = prefixf.read().strip()
return prefix |
def get_authors(self):
"""
Returns:
list: Authors represented as :class:`.Person` objects.
"""
authors = self._parse_persons("100", "a")
authors += self._parse_persons("600", "a")
authors += self._parse_persons("700", "a")
authors += self._parse_persons("800", "a")
return authors | Returns:
list: Authors represented as :class:`.Person` objects. | Below is the the instruction that describes the task:
### Input:
Returns:
list: Authors represented as :class:`.Person` objects.
### Response:
def get_authors(self):
"""
Returns:
list: Authors represented as :class:`.Person` objects.
"""
authors = self._parse_persons("100", "a")
authors += self._parse_persons("600", "a")
authors += self._parse_persons("700", "a")
authors += self._parse_persons("800", "a")
return authors |
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except AttributeError:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
} | Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
### Response:
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except AttributeError:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
} |
def SchemaValidateCtxt(self, reader, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(reader__o, self._o, options)
return ret | Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. | Below is the the instruction that describes the task:
### Input:
Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated.
### Response:
def SchemaValidateCtxt(self, reader, options):
"""Use W3C XSD schema context to validate the document as it
is processed. Activation is only possible before the first
Read(). If @ctxt is None, then XML Schema validation is
deactivated. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlTextReaderSchemaValidateCtxt(reader__o, self._o, options)
return ret |
def serialize(obj):
"""Serialize the given object into JSON.
Args:
obj: the object to be serialized.
Returns:
(str): JSON representation of the given object.
"""
LOGGER.debug('serialize(%s)', obj)
if isinstance(obj, datetime.date):
return simplejson.dumps(obj, default=encoders.as_date)
elif hasattr(obj, '__dict__'):
return simplejson.dumps(obj, default=encoders.as_object)
return simplejson.dumps(obj) | Serialize the given object into JSON.
Args:
obj: the object to be serialized.
Returns:
(str): JSON representation of the given object. | Below is the the instruction that describes the task:
### Input:
Serialize the given object into JSON.
Args:
obj: the object to be serialized.
Returns:
(str): JSON representation of the given object.
### Response:
def serialize(obj):
"""Serialize the given object into JSON.
Args:
obj: the object to be serialized.
Returns:
(str): JSON representation of the given object.
"""
LOGGER.debug('serialize(%s)', obj)
if isinstance(obj, datetime.date):
return simplejson.dumps(obj, default=encoders.as_date)
elif hasattr(obj, '__dict__'):
return simplejson.dumps(obj, default=encoders.as_object)
return simplejson.dumps(obj) |
def _intf_network(self):
"""
Retrieve the ac and dc network interface variable indices.
:Example:
``self._ac = {'bus1': (a1, v1)}`` gives
- indices: self.bus1
- system.Bus.a -> self.a1
- system.Bus.v -> self.v1
``self._dc = {'node1': v1}`` gives
- indices: self.node1
- system.Node.v-> self.v1
:return: None
"""
for key, val in self._ac.items():
self.copy_data_ext(
model='Bus', field='a', dest=val[0], idx=self.__dict__[key])
self.copy_data_ext(
model='Bus', field='v', dest=val[1], idx=self.__dict__[key])
for key, val in self._dc.items():
self.copy_data_ext(
model='Node', field='v', dest=val, idx=self.__dict__[key])
# check for interface voltage differences
self._check_Vn() | Retrieve the ac and dc network interface variable indices.
:Example:
``self._ac = {'bus1': (a1, v1)}`` gives
- indices: self.bus1
- system.Bus.a -> self.a1
- system.Bus.v -> self.v1
``self._dc = {'node1': v1}`` gives
- indices: self.node1
- system.Node.v-> self.v1
:return: None | Below is the the instruction that describes the task:
### Input:
Retrieve the ac and dc network interface variable indices.
:Example:
``self._ac = {'bus1': (a1, v1)}`` gives
- indices: self.bus1
- system.Bus.a -> self.a1
- system.Bus.v -> self.v1
``self._dc = {'node1': v1}`` gives
- indices: self.node1
- system.Node.v-> self.v1
:return: None
### Response:
def _intf_network(self):
"""
Retrieve the ac and dc network interface variable indices.
:Example:
``self._ac = {'bus1': (a1, v1)}`` gives
- indices: self.bus1
- system.Bus.a -> self.a1
- system.Bus.v -> self.v1
``self._dc = {'node1': v1}`` gives
- indices: self.node1
- system.Node.v-> self.v1
:return: None
"""
for key, val in self._ac.items():
self.copy_data_ext(
model='Bus', field='a', dest=val[0], idx=self.__dict__[key])
self.copy_data_ext(
model='Bus', field='v', dest=val[1], idx=self.__dict__[key])
for key, val in self._dc.items():
self.copy_data_ext(
model='Node', field='v', dest=val, idx=self.__dict__[key])
# check for interface voltage differences
self._check_Vn() |
def create_endpoint(port=None, service_name=None, host=None, use_defaults=True):
"""Creates a new Endpoint object.
:param port: TCP/UDP port. Defaults to 0.
:type port: int
:param service_name: service name as a str. Defaults to 'unknown'.
:type service_name: str
:param host: ipv4 or ipv6 address of the host. Defaults to the
current host ip.
:type host: str
:param use_defaults: whether to use defaults.
:type use_defaults: bool
:returns: zipkin Endpoint object
"""
if use_defaults:
if port is None:
port = 0
if service_name is None:
service_name = 'unknown'
if host is None:
try:
host = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
ipv4 = None
ipv6 = None
if host:
# Check ipv4 or ipv6.
try:
socket.inet_pton(socket.AF_INET, host)
ipv4 = host
except socket.error:
# If it's not an ipv4 address, maybe it's ipv6.
try:
socket.inet_pton(socket.AF_INET6, host)
ipv6 = host
except socket.error:
# If it's neither ipv4 or ipv6, leave both ip addresses unset.
pass
return Endpoint(
ipv4=ipv4,
ipv6=ipv6,
port=port,
service_name=service_name,
) | Creates a new Endpoint object.
:param port: TCP/UDP port. Defaults to 0.
:type port: int
:param service_name: service name as a str. Defaults to 'unknown'.
:type service_name: str
:param host: ipv4 or ipv6 address of the host. Defaults to the
current host ip.
:type host: str
:param use_defaults: whether to use defaults.
:type use_defaults: bool
:returns: zipkin Endpoint object | Below is the the instruction that describes the task:
### Input:
Creates a new Endpoint object.
:param port: TCP/UDP port. Defaults to 0.
:type port: int
:param service_name: service name as a str. Defaults to 'unknown'.
:type service_name: str
:param host: ipv4 or ipv6 address of the host. Defaults to the
current host ip.
:type host: str
:param use_defaults: whether to use defaults.
:type use_defaults: bool
:returns: zipkin Endpoint object
### Response:
def create_endpoint(port=None, service_name=None, host=None, use_defaults=True):
"""Creates a new Endpoint object.
:param port: TCP/UDP port. Defaults to 0.
:type port: int
:param service_name: service name as a str. Defaults to 'unknown'.
:type service_name: str
:param host: ipv4 or ipv6 address of the host. Defaults to the
current host ip.
:type host: str
:param use_defaults: whether to use defaults.
:type use_defaults: bool
:returns: zipkin Endpoint object
"""
if use_defaults:
if port is None:
port = 0
if service_name is None:
service_name = 'unknown'
if host is None:
try:
host = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
ipv4 = None
ipv6 = None
if host:
# Check ipv4 or ipv6.
try:
socket.inet_pton(socket.AF_INET, host)
ipv4 = host
except socket.error:
# If it's not an ipv4 address, maybe it's ipv6.
try:
socket.inet_pton(socket.AF_INET6, host)
ipv6 = host
except socket.error:
# If it's neither ipv4 or ipv6, leave both ip addresses unset.
pass
return Endpoint(
ipv4=ipv4,
ipv6=ipv6,
port=port,
service_name=service_name,
) |
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json() | 获取单词的例句 | Below is the the instruction that describes the task:
### Input:
获取单词的例句
### Response:
def examples(self, word_id, type=None,
url='https://api.shanbay.com/bdc/example/'):
"""获取单词的例句"""
params = {
'vocabulary_id': word_id
}
if type is not None:
params['type'] = type
return self._request(url, params=params).json() |
def p_unique_case_statement(self, p):
'unique_case_statement : UNIQUE CASE LPAREN case_comp RPAREN casecontent_statements ENDCASE'
p[0] = UniqueCaseStatement(p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | unique_case_statement : UNIQUE CASE LPAREN case_comp RPAREN casecontent_statements ENDCASE | Below is the the instruction that describes the task:
### Input:
unique_case_statement : UNIQUE CASE LPAREN case_comp RPAREN casecontent_statements ENDCASE
### Response:
def p_unique_case_statement(self, p):
'unique_case_statement : UNIQUE CASE LPAREN case_comp RPAREN casecontent_statements ENDCASE'
p[0] = UniqueCaseStatement(p[3], p[5], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.