code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def read(string):
"""
Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph
"""
dom = parseString(string)
if dom.getElementsByTagName("graph"):
G = graph()
elif dom.getElementsByTagName("digraph"):
G = digraph()
elif dom.getElementsByTagName("hypergraph"):
return read_hypergraph(string)
else:
raise InvalidGraphType
# Read nodes...
for each_node in dom.getElementsByTagName("node"):
G.add_node(each_node.getAttribute('id'))
for each_attr in each_node.getElementsByTagName("attribute"):
G.add_node_attribute(each_node.getAttribute('id'),
(each_attr.getAttribute('attr'),
each_attr.getAttribute('value')))
# Read edges...
for each_edge in dom.getElementsByTagName("edge"):
if (not G.has_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')))):
G.add_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')), \
wt = float(each_edge.getAttribute('wt')), label = each_edge.getAttribute('label'))
for each_attr in each_edge.getElementsByTagName("attribute"):
attr_tuple = (each_attr.getAttribute('attr'), each_attr.getAttribute('value'))
if (attr_tuple not in G.edge_attributes((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')))):
G.add_edge_attribute((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')), attr_tuple)
return G | Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph | Below is the the instruction that describes the task:
### Input:
Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph
### Response:
def read(string):
"""
Read a graph from a XML document and return it. Nodes and edges specified in the input will
be added to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: graph
@return: Graph
"""
dom = parseString(string)
if dom.getElementsByTagName("graph"):
G = graph()
elif dom.getElementsByTagName("digraph"):
G = digraph()
elif dom.getElementsByTagName("hypergraph"):
return read_hypergraph(string)
else:
raise InvalidGraphType
# Read nodes...
for each_node in dom.getElementsByTagName("node"):
G.add_node(each_node.getAttribute('id'))
for each_attr in each_node.getElementsByTagName("attribute"):
G.add_node_attribute(each_node.getAttribute('id'),
(each_attr.getAttribute('attr'),
each_attr.getAttribute('value')))
# Read edges...
for each_edge in dom.getElementsByTagName("edge"):
if (not G.has_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')))):
G.add_edge((each_edge.getAttribute('from'), each_edge.getAttribute('to')), \
wt = float(each_edge.getAttribute('wt')), label = each_edge.getAttribute('label'))
for each_attr in each_edge.getElementsByTagName("attribute"):
attr_tuple = (each_attr.getAttribute('attr'), each_attr.getAttribute('value'))
if (attr_tuple not in G.edge_attributes((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')))):
G.add_edge_attribute((each_edge.getAttribute('from'), \
each_edge.getAttribute('to')), attr_tuple)
return G |
def replace_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs):
"""Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
else:
(data) = cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
return data | Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
### Response:
def replace_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs):
"""Replace OrderLineItem
Replace all attributes of OrderLineItem
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True)
>>> result = thread.get()
:param async bool
:param str order_line_item_id: ID of orderLineItem to replace (required)
:param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required)
:return: OrderLineItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
else:
(data) = cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs)
return data |
def addParts(self, part_id, parent_id, part_relationship=None):
"""
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
"""
if part_relationship is None:
part_relationship = self.globaltt['has_part']
# Fail loudly if parent or child identifiers are None
if parent_id is None:
raise TypeError('Attempt to pass None as parent')
elif part_id is None:
raise TypeError('Attempt to pass None as child')
elif part_relationship is None:
part_relationship = self.globaltt['has_part']
self.graph.addTriple(parent_id, part_relationship, part_id)
return | This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return: | Below is the the instruction that describes the task:
### Input:
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
### Response:
def addParts(self, part_id, parent_id, part_relationship=None):
"""
This will add a has_part (or subproperty) relationship between
a parent_id and the supplied part.
By default the relationship will be BFO:has_part,
but any relationship could be given here.
:param part_id:
:param parent_id:
:param part_relationship:
:return:
"""
if part_relationship is None:
part_relationship = self.globaltt['has_part']
# Fail loudly if parent or child identifiers are None
if parent_id is None:
raise TypeError('Attempt to pass None as parent')
elif part_id is None:
raise TypeError('Attempt to pass None as child')
elif part_relationship is None:
part_relationship = self.globaltt['has_part']
self.graph.addTriple(parent_id, part_relationship, part_id)
return |
def unroll_angles(A,sign):
""" Unrolls the angles, A, so they increase continuously """
n = np.array([0,0,0])
P = np.zeros(np.shape(A))
P[0]=A[0]
for i in range(1,len(A)):
n = n+((A[i]-A[i-1]+0.5*sign*np.pi)*sign<0)*np.ones(3)*2.*np.pi
P[i] = A[i]+sign*n
return P | Unrolls the angles, A, so they increase continuously | Below is the the instruction that describes the task:
### Input:
Unrolls the angles, A, so they increase continuously
### Response:
def unroll_angles(A,sign):
""" Unrolls the angles, A, so they increase continuously """
n = np.array([0,0,0])
P = np.zeros(np.shape(A))
P[0]=A[0]
for i in range(1,len(A)):
n = n+((A[i]-A[i-1]+0.5*sign*np.pi)*sign<0)*np.ones(3)*2.*np.pi
P[i] = A[i]+sign*n
return P |
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields] | Set all fields readonly. | Below is the the instruction that describes the task:
### Input:
Set all fields readonly.
### Response:
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields] |
def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr) | Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479 | Below is the the instruction that describes the task:
### Input:
Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
### Response:
def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr) |
def set_state(self, color_hex):
"""
:param color_hex: a hex string indicating the color of the porkfolio nose
:return: nothing
From the api...
"the color of the nose is not in the desired_state
but on the object itself."
"""
root_name = self.json_state.get('piggy_bank_id', self.name())
response = self.api_interface.set_device_state(self, {
"nose_color": color_hex
}, root_name)
self._update_state_from_response(response) | :param color_hex: a hex string indicating the color of the porkfolio nose
:return: nothing
From the api...
"the color of the nose is not in the desired_state
but on the object itself." | Below is the the instruction that describes the task:
### Input:
:param color_hex: a hex string indicating the color of the porkfolio nose
:return: nothing
From the api...
"the color of the nose is not in the desired_state
but on the object itself."
### Response:
def set_state(self, color_hex):
"""
:param color_hex: a hex string indicating the color of the porkfolio nose
:return: nothing
From the api...
"the color of the nose is not in the desired_state
but on the object itself."
"""
root_name = self.json_state.get('piggy_bank_id', self.name())
response = self.api_interface.set_device_state(self, {
"nose_color": color_hex
}, root_name)
self._update_state_from_response(response) |
def read_file(self, infile):
"""Read a reST file into a string.
"""
try:
with open(infile, 'rt') as file:
return file.read()
except UnicodeDecodeError as e:
err_exit('Error reading %s: %s' % (infile, e))
except (IOError, OSError) as e:
err_exit('Error reading %s: %s' % (infile, e.strerror or e)) | Read a reST file into a string. | Below is the the instruction that describes the task:
### Input:
Read a reST file into a string.
### Response:
def read_file(self, infile):
"""Read a reST file into a string.
"""
try:
with open(infile, 'rt') as file:
return file.read()
except UnicodeDecodeError as e:
err_exit('Error reading %s: %s' % (infile, e))
except (IOError, OSError) as e:
err_exit('Error reading %s: %s' % (infile, e.strerror or e)) |
def make_module_spec(vocabulary_file, vocab_size, embeddings_dim,
num_oov_buckets, preprocess_text):
"""Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
ids = lookup_table.lookup(tokens)
combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
hub.add_signature("default", {"tokens": tokens},
{"default": combined_embedding})
def module_fn_with_preprocessing():
"""Spec function for a full-text embedding module with preprocessing."""
sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences")
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
tokens = tf.string_split(normalized_sentences, " ")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
sparse_ids = tf.SparseTensor(
indices=tokens.indices,
values=lookup_table.lookup(tokens.values),
dense_shape=tokens.dense_shape)
# In case some of the input sentences are empty before or after
# normalization, we will end up with empty rows. We do however want to
# return embedding for every row, so we have to fill in the empty rows with
# a default.
sparse_ids, _ = tf.sparse_fill_empty_rows(
sparse_ids, lookup_table.lookup(tf.constant("")))
# In case all of the input sentences are empty before or after
# normalization, we will end up with a SparseTensor with shape [?, 0]. After
# filling in the empty rows we must ensure the shape is set properly to
# [?, 1]. At this point, there are no empty rows, so the new shape will be
# [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])].
sparse_ids = tf.sparse_reset_shape(sparse_ids)
combined_embedding = tf.nn.embedding_lookup_sparse(
params=embeddings_var,
sp_ids=sparse_ids,
sp_weights=None,
combiner="sqrtn")
hub.add_signature("default", {"sentences": sentences},
{"default": combined_embedding})
if preprocess_text:
return hub.create_module_spec(module_fn_with_preprocessing)
else:
return hub.create_module_spec(module_fn) | Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module. | Below is the the instruction that describes the task:
### Input:
Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
### Response:
def make_module_spec(vocabulary_file, vocab_size, embeddings_dim,
num_oov_buckets, preprocess_text):
"""Makes a module spec to simply perform token to embedding lookups.
Input of this module is a 1-D list of string tokens. For T tokens input and
an M dimensional embedding table, the lookup result is a [T, M] shaped Tensor.
Args:
vocabulary_file: Text file where each line is a key in the vocabulary.
vocab_size: The number of tokens contained in the vocabulary.
embeddings_dim: The embedding dimension.
num_oov_buckets: The number of out-of-vocabulary buckets.
preprocess_text: Whether to preprocess the input tensor by removing
punctuation and splitting on spaces.
Returns:
A module spec object used for constructing a TF-Hub module.
"""
def module_fn():
"""Spec function for a token embedding module."""
tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
ids = lookup_table.lookup(tokens)
combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
hub.add_signature("default", {"tokens": tokens},
{"default": combined_embedding})
def module_fn_with_preprocessing():
"""Spec function for a full-text embedding module with preprocessing."""
sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences")
# Perform a minimalistic text preprocessing by removing punctuation and
# splitting on spaces.
normalized_sentences = tf.regex_replace(
input=sentences, pattern=r"\pP", rewrite="")
tokens = tf.string_split(normalized_sentences, " ")
embeddings_var = tf.get_variable(
initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
name=EMBEDDINGS_VAR_NAME,
dtype=tf.float32)
lookup_table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
num_oov_buckets=num_oov_buckets,
)
sparse_ids = tf.SparseTensor(
indices=tokens.indices,
values=lookup_table.lookup(tokens.values),
dense_shape=tokens.dense_shape)
# In case some of the input sentences are empty before or after
# normalization, we will end up with empty rows. We do however want to
# return embedding for every row, so we have to fill in the empty rows with
# a default.
sparse_ids, _ = tf.sparse_fill_empty_rows(
sparse_ids, lookup_table.lookup(tf.constant("")))
# In case all of the input sentences are empty before or after
# normalization, we will end up with a SparseTensor with shape [?, 0]. After
# filling in the empty rows we must ensure the shape is set properly to
# [?, 1]. At this point, there are no empty rows, so the new shape will be
# [sparse_ids.dense_shape[0], max(1, sparse_ids.dense_shape[1])].
sparse_ids = tf.sparse_reset_shape(sparse_ids)
combined_embedding = tf.nn.embedding_lookup_sparse(
params=embeddings_var,
sp_ids=sparse_ids,
sp_weights=None,
combiner="sqrtn")
hub.add_signature("default", {"sentences": sentences},
{"default": combined_embedding})
if preprocess_text:
return hub.create_module_spec(module_fn_with_preprocessing)
else:
return hub.create_module_spec(module_fn) |
def ntp_config(self, ntp_opt):
"""
ntp_opt is the NTP options listed as example below:
NTP.Address=clock.isc.org
NTP.Enable=false
NTP.Port=38
NTP.TimeZone=9
NTP.UpdatePeriod=31
ntp_opt format:
<paramName>=<paramValue>[&<paramName>=<paramValue>...]
"""
ret = self.command(
'configManager.cgi?action=setConfig&{0}'.format(ntp_opt)
)
return ret.content.decode('utf-8') | ntp_opt is the NTP options listed as example below:
NTP.Address=clock.isc.org
NTP.Enable=false
NTP.Port=38
NTP.TimeZone=9
NTP.UpdatePeriod=31
ntp_opt format:
<paramName>=<paramValue>[&<paramName>=<paramValue>...] | Below is the the instruction that describes the task:
### Input:
ntp_opt is the NTP options listed as example below:
NTP.Address=clock.isc.org
NTP.Enable=false
NTP.Port=38
NTP.TimeZone=9
NTP.UpdatePeriod=31
ntp_opt format:
<paramName>=<paramValue>[&<paramName>=<paramValue>...]
### Response:
def ntp_config(self, ntp_opt):
"""
ntp_opt is the NTP options listed as example below:
NTP.Address=clock.isc.org
NTP.Enable=false
NTP.Port=38
NTP.TimeZone=9
NTP.UpdatePeriod=31
ntp_opt format:
<paramName>=<paramValue>[&<paramName>=<paramValue>...]
"""
ret = self.command(
'configManager.cgi?action=setConfig&{0}'.format(ntp_opt)
)
return ret.content.decode('utf-8') |
def last_login_date(self):
""" Date of user's last login """
return sa.Column(
sa.TIMESTAMP(timezone=False),
default=lambda x: datetime.utcnow(),
server_default=sa.func.now(),
) | Date of user's last login | Below is the the instruction that describes the task:
### Input:
Date of user's last login
### Response:
def last_login_date(self):
""" Date of user's last login """
return sa.Column(
sa.TIMESTAMP(timezone=False),
default=lambda x: datetime.utcnow(),
server_default=sa.func.now(),
) |
def send(self, text):
"""
Send raw text to the distant end.
"""
if text:
self.send_buffer += text.replace('\n', '\r\n')
self.send_pending = True | Send raw text to the distant end. | Below is the the instruction that describes the task:
### Input:
Send raw text to the distant end.
### Response:
def send(self, text):
"""
Send raw text to the distant end.
"""
if text:
self.send_buffer += text.replace('\n', '\r\n')
self.send_pending = True |
def _put_bucket_versioning(self):
"""Adds bucket versioning policy to bucket"""
status = 'Suspended'
if self.s3props['versioning']['enabled']:
status = 'Enabled'
versioning_config = {
'MFADelete': self.s3props['versioning']['mfa_delete'],
'Status': status
}
_response = self.s3client.put_bucket_versioning(Bucket=self.bucket, VersioningConfiguration=versioning_config)
LOG.debug('Response setting up S3 versioning: %s', _response)
LOG.info('S3 versioning configuration updated') | Adds bucket versioning policy to bucket | Below is the the instruction that describes the task:
### Input:
Adds bucket versioning policy to bucket
### Response:
def _put_bucket_versioning(self):
"""Adds bucket versioning policy to bucket"""
status = 'Suspended'
if self.s3props['versioning']['enabled']:
status = 'Enabled'
versioning_config = {
'MFADelete': self.s3props['versioning']['mfa_delete'],
'Status': status
}
_response = self.s3client.put_bucket_versioning(Bucket=self.bucket, VersioningConfiguration=versioning_config)
LOG.debug('Response setting up S3 versioning: %s', _response)
LOG.info('S3 versioning configuration updated') |
def detect(self, color_im, depth_im, cfg, camera_intr,
T_camera_world,
vis_foreground=False, vis_segmentation=False, segmask=None):
"""Detects all relevant objects in an rgbd image pair using foreground masking.
Parameters
----------
color_im : :obj:`ColorImage`
color image for detection
depth_im : :obj:`DepthImage`
depth image for detection (corresponds to color image)
cfg : :obj:`YamlConfig`
parameters of detection function
camera_intr : :obj:`CameraIntrinsics`
intrinsics of the camera
T_camera_world : :obj:`autolab_core.RigidTransform`
registration of the camera to world frame
segmask : :obj:`BinaryImage`
optional segmask of invalid pixels
Returns
-------
:obj:`list` of :obj:`RgbdDetection`
all detections in the image
"""
# read params
min_pt_box = np.array(cfg['min_pt'])
max_pt_box = np.array(cfg['max_pt'])
min_contour_area = cfg['min_contour_area']
max_contour_area = cfg['max_contour_area']
min_box_area = cfg['min_box_area']
max_box_area = cfg['max_box_area']
box_padding_px = cfg['box_padding_px']
crop_height = cfg['image_height']
crop_width = cfg['image_width']
depth_grad_thresh = cfg['depth_grad_thresh']
point_cloud_mask_only = cfg['point_cloud_mask_only']
w = cfg['filter_dim']
half_crop_height = float(crop_height) / 2
half_crop_width = float(crop_width) / 2
half_crop_dims = np.array([half_crop_height, half_crop_width])
fill_depth = np.max(depth_im.data[depth_im.data > 0])
kinect2_denoising = False
if 'kinect2_denoising' in cfg.keys() and cfg['kinect2_denoising']:
kinect2_denoising = True
depth_offset = cfg['kinect2_noise_offset']
max_depth = cfg['kinect2_noise_max_depth']
box = Box(min_pt_box, max_pt_box, 'world')
# project into 3D
point_cloud_cam = camera_intr.deproject(depth_im)
point_cloud_world = T_camera_world * point_cloud_cam
seg_point_cloud_world, _ = point_cloud_world.box_mask(box)
seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)
# mask image using background detection
bgmodel = color_im.background_model()
binary_im = depth_im_seg.to_binary()
if segmask is not None:
binary_im = binary_im.mask_binary(segmask.inverse())
# filter the image
y, x = np.ogrid[-w/2+1:w/2+1, -w/2+1:w/2+1]
mask = x*x + y*y <= w/2*w/2
filter_struct = np.zeros([w,w]).astype(np.uint8)
filter_struct[mask] = 1
binary_im_filtered_data = snm.binary_dilation(binary_im.data, structure=filter_struct)
binary_im_filtered = BinaryImage(binary_im_filtered_data.astype(np.uint8),
frame=binary_im.frame,
threshold=0)
# find all contours
contours = binary_im_filtered.find_contours(min_area=min_contour_area, max_area=max_contour_area)
if vis_foreground:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.imshow(segmask.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im_filtered.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
# switch to just return the mean of nonzero_px
if point_cloud_mask_only == 1:
center_px = np.mean(binary_im_filtered.nonzero_pixels(), axis=0)
ci = center_px[0]
cj = center_px[1]
binary_thumbnail = binary_im_filtered.crop(crop_height, crop_width, ci, cj)
color_thumbnail = color_im.crop(crop_height, crop_width, ci, cj)
depth_thumbnail = depth_im.crop(crop_height, crop_width, ci, cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(crop_height, crop_width, ci, cj)
query_box = Box(center_px - half_crop_dims, center_px + half_crop_dims)
return [RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=None,
camera_intr=thumbnail_intr)]
# convert contours to detections
detections = []
for i, contour in enumerate(contours):
orig_box = contour.bounding_box
logging.debug('Orig box %d area: %.3f' %(i, orig_box.area))
if orig_box.area > min_box_area and orig_box.area < max_box_area:
# convert orig bounding box to query bounding box
min_pt = orig_box.center - half_crop_dims
max_pt = orig_box.center + half_crop_dims
query_box = Box(min_pt, max_pt, frame=orig_box.frame)
# segment color to get refined detection
contour_mask = binary_im_filtered.contour_mask(contour)
binary_thumbnail = contour_mask.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
else:
# otherwise take original bounding box
query_box = Box(contour.bounding_box.min_pt - box_padding_px,
contour.bounding_box.max_pt + box_padding_px,
frame = contour.bounding_box.frame)
binary_thumbnail = binary_im_filtered.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# crop to get thumbnails
color_thumbnail = color_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
depth_thumbnail = depth_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# fix depth thumbnail
depth_thumbnail = depth_thumbnail.replace_zeros(fill_depth)
if kinect2_denoising:
depth_data = depth_thumbnail.data
min_depth = np.min(depth_data)
binary_mask_data = binary_thumbnail.data
depth_mask_data = depth_thumbnail.mask_binary(binary_thumbnail).data
depth_mask_data += depth_offset
depth_data[binary_mask_data > 0] = depth_mask_data[binary_mask_data > 0]
depth_thumbnail = DepthImage(depth_data, depth_thumbnail.frame)
# append to detections
detections.append(RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=contour,
camera_intr=thumbnail_intr))
return detections | Detects all relevant objects in an rgbd image pair using foreground masking.
Parameters
----------
color_im : :obj:`ColorImage`
color image for detection
depth_im : :obj:`DepthImage`
depth image for detection (corresponds to color image)
cfg : :obj:`YamlConfig`
parameters of detection function
camera_intr : :obj:`CameraIntrinsics`
intrinsics of the camera
T_camera_world : :obj:`autolab_core.RigidTransform`
registration of the camera to world frame
segmask : :obj:`BinaryImage`
optional segmask of invalid pixels
Returns
-------
:obj:`list` of :obj:`RgbdDetection`
all detections in the image | Below is the the instruction that describes the task:
### Input:
Detects all relevant objects in an rgbd image pair using foreground masking.
Parameters
----------
color_im : :obj:`ColorImage`
color image for detection
depth_im : :obj:`DepthImage`
depth image for detection (corresponds to color image)
cfg : :obj:`YamlConfig`
parameters of detection function
camera_intr : :obj:`CameraIntrinsics`
intrinsics of the camera
T_camera_world : :obj:`autolab_core.RigidTransform`
registration of the camera to world frame
segmask : :obj:`BinaryImage`
optional segmask of invalid pixels
Returns
-------
:obj:`list` of :obj:`RgbdDetection`
all detections in the image
### Response:
def detect(self, color_im, depth_im, cfg, camera_intr,
T_camera_world,
vis_foreground=False, vis_segmentation=False, segmask=None):
"""Detects all relevant objects in an rgbd image pair using foreground masking.
Parameters
----------
color_im : :obj:`ColorImage`
color image for detection
depth_im : :obj:`DepthImage`
depth image for detection (corresponds to color image)
cfg : :obj:`YamlConfig`
parameters of detection function
camera_intr : :obj:`CameraIntrinsics`
intrinsics of the camera
T_camera_world : :obj:`autolab_core.RigidTransform`
registration of the camera to world frame
segmask : :obj:`BinaryImage`
optional segmask of invalid pixels
Returns
-------
:obj:`list` of :obj:`RgbdDetection`
all detections in the image
"""
# read params
min_pt_box = np.array(cfg['min_pt'])
max_pt_box = np.array(cfg['max_pt'])
min_contour_area = cfg['min_contour_area']
max_contour_area = cfg['max_contour_area']
min_box_area = cfg['min_box_area']
max_box_area = cfg['max_box_area']
box_padding_px = cfg['box_padding_px']
crop_height = cfg['image_height']
crop_width = cfg['image_width']
depth_grad_thresh = cfg['depth_grad_thresh']
point_cloud_mask_only = cfg['point_cloud_mask_only']
w = cfg['filter_dim']
half_crop_height = float(crop_height) / 2
half_crop_width = float(crop_width) / 2
half_crop_dims = np.array([half_crop_height, half_crop_width])
fill_depth = np.max(depth_im.data[depth_im.data > 0])
kinect2_denoising = False
if 'kinect2_denoising' in cfg.keys() and cfg['kinect2_denoising']:
kinect2_denoising = True
depth_offset = cfg['kinect2_noise_offset']
max_depth = cfg['kinect2_noise_max_depth']
box = Box(min_pt_box, max_pt_box, 'world')
# project into 3D
point_cloud_cam = camera_intr.deproject(depth_im)
point_cloud_world = T_camera_world * point_cloud_cam
seg_point_cloud_world, _ = point_cloud_world.box_mask(box)
seg_point_cloud_cam = T_camera_world.inverse() * seg_point_cloud_world
depth_im_seg = camera_intr.project_to_image(seg_point_cloud_cam)
# mask image using background detection
bgmodel = color_im.background_model()
binary_im = depth_im_seg.to_binary()
if segmask is not None:
binary_im = binary_im.mask_binary(segmask.inverse())
# filter the image
y, x = np.ogrid[-w/2+1:w/2+1, -w/2+1:w/2+1]
mask = x*x + y*y <= w/2*w/2
filter_struct = np.zeros([w,w]).astype(np.uint8)
filter_struct[mask] = 1
binary_im_filtered_data = snm.binary_dilation(binary_im.data, structure=filter_struct)
binary_im_filtered = BinaryImage(binary_im_filtered_data.astype(np.uint8),
frame=binary_im.frame,
threshold=0)
# find all contours
contours = binary_im_filtered.find_contours(min_area=min_contour_area, max_area=max_contour_area)
if vis_foreground:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.imshow(segmask.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im_filtered.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
# switch to just return the mean of nonzero_px
if point_cloud_mask_only == 1:
center_px = np.mean(binary_im_filtered.nonzero_pixels(), axis=0)
ci = center_px[0]
cj = center_px[1]
binary_thumbnail = binary_im_filtered.crop(crop_height, crop_width, ci, cj)
color_thumbnail = color_im.crop(crop_height, crop_width, ci, cj)
depth_thumbnail = depth_im.crop(crop_height, crop_width, ci, cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(crop_height, crop_width, ci, cj)
query_box = Box(center_px - half_crop_dims, center_px + half_crop_dims)
return [RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=None,
camera_intr=thumbnail_intr)]
# convert contours to detections
detections = []
for i, contour in enumerate(contours):
orig_box = contour.bounding_box
logging.debug('Orig box %d area: %.3f' %(i, orig_box.area))
if orig_box.area > min_box_area and orig_box.area < max_box_area:
# convert orig bounding box to query bounding box
min_pt = orig_box.center - half_crop_dims
max_pt = orig_box.center + half_crop_dims
query_box = Box(min_pt, max_pt, frame=orig_box.frame)
# segment color to get refined detection
contour_mask = binary_im_filtered.contour_mask(contour)
binary_thumbnail = contour_mask.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
else:
# otherwise take original bounding box
query_box = Box(contour.bounding_box.min_pt - box_padding_px,
contour.bounding_box.max_pt + box_padding_px,
frame = contour.bounding_box.frame)
binary_thumbnail = binary_im_filtered.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# crop to get thumbnails
color_thumbnail = color_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
depth_thumbnail = depth_im.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
thumbnail_intr = camera_intr
if camera_intr is not None:
thumbnail_intr = camera_intr.crop(query_box.height, query_box.width, query_box.ci, query_box.cj)
# fix depth thumbnail
depth_thumbnail = depth_thumbnail.replace_zeros(fill_depth)
if kinect2_denoising:
depth_data = depth_thumbnail.data
min_depth = np.min(depth_data)
binary_mask_data = binary_thumbnail.data
depth_mask_data = depth_thumbnail.mask_binary(binary_thumbnail).data
depth_mask_data += depth_offset
depth_data[binary_mask_data > 0] = depth_mask_data[binary_mask_data > 0]
depth_thumbnail = DepthImage(depth_data, depth_thumbnail.frame)
# append to detections
detections.append(RgbdDetection(color_thumbnail,
depth_thumbnail,
query_box,
binary_thumbnail=binary_thumbnail,
contour=contour,
camera_intr=thumbnail_intr))
return detections |
def list_pools_on_lbaas_agent(self, lbaas_agent, **_params):
"""Fetches a list of pools hosted by the loadbalancer agent."""
return self.get((self.agent_path + self.LOADBALANCER_POOLS) %
lbaas_agent, params=_params) | Fetches a list of pools hosted by the loadbalancer agent. | Below is the the instruction that describes the task:
### Input:
Fetches a list of pools hosted by the loadbalancer agent.
### Response:
def list_pools_on_lbaas_agent(self, lbaas_agent, **_params):
"""Fetches a list of pools hosted by the loadbalancer agent."""
return self.get((self.agent_path + self.LOADBALANCER_POOLS) %
lbaas_agent, params=_params) |
def pauli_kraus_map(probabilities):
r"""
Generate the Kraus operators corresponding to a pauli channel.
:params list|floats probabilities: The 4^num_qubits list of probabilities specifying the desired pauli channel.
There should be either 4 or 16 probabilities specified in the order I, X, Y, Z for 1 qubit
or II, IX, IY, IZ, XI, XX, XY, etc for 2 qubits.
For example::
The d-dimensional depolarizing channel \Delta parameterized as
\Delta(\rho) = p \rho + [(1-p)/d] I
is specified by the list of probabilities
[p + (1-p)/d, (1-p)/d, (1-p)/d), ... , (1-p)/d)]
:return: A list of the 4^num_qubits Kraus operators that parametrize the map.
:rtype: list
"""
if len(probabilities) not in [4, 16]:
raise ValueError("Currently we only support one or two qubits, "
"so the provided list of probabilities must have length 4 or 16.")
if not np.allclose(sum(probabilities), 1.0, atol=1e-3):
raise ValueError("Probabilities must sum to one.")
paulis = [np.eye(2), np.array([[0, 1], [1, 0]]), np.array([[0, -1j], [1j, 0]]), np.array([[1, 0], [0, -1]])]
if len(probabilities) == 4:
operators = paulis
else:
operators = np.kron(paulis, paulis)
return [coeff * op for coeff, op in zip(np.sqrt(probabilities), operators)] | r"""
Generate the Kraus operators corresponding to a pauli channel.
:params list|floats probabilities: The 4^num_qubits list of probabilities specifying the desired pauli channel.
There should be either 4 or 16 probabilities specified in the order I, X, Y, Z for 1 qubit
or II, IX, IY, IZ, XI, XX, XY, etc for 2 qubits.
For example::
The d-dimensional depolarizing channel \Delta parameterized as
\Delta(\rho) = p \rho + [(1-p)/d] I
is specified by the list of probabilities
[p + (1-p)/d, (1-p)/d, (1-p)/d), ... , (1-p)/d)]
:return: A list of the 4^num_qubits Kraus operators that parametrize the map.
:rtype: list | Below is the the instruction that describes the task:
### Input:
r"""
Generate the Kraus operators corresponding to a pauli channel.
:params list|floats probabilities: The 4^num_qubits list of probabilities specifying the desired pauli channel.
There should be either 4 or 16 probabilities specified in the order I, X, Y, Z for 1 qubit
or II, IX, IY, IZ, XI, XX, XY, etc for 2 qubits.
For example::
The d-dimensional depolarizing channel \Delta parameterized as
\Delta(\rho) = p \rho + [(1-p)/d] I
is specified by the list of probabilities
[p + (1-p)/d, (1-p)/d, (1-p)/d), ... , (1-p)/d)]
:return: A list of the 4^num_qubits Kraus operators that parametrize the map.
:rtype: list
### Response:
def pauli_kraus_map(probabilities):
r"""
Generate the Kraus operators corresponding to a pauli channel.
:params list|floats probabilities: The 4^num_qubits list of probabilities specifying the desired pauli channel.
There should be either 4 or 16 probabilities specified in the order I, X, Y, Z for 1 qubit
or II, IX, IY, IZ, XI, XX, XY, etc for 2 qubits.
For example::
The d-dimensional depolarizing channel \Delta parameterized as
\Delta(\rho) = p \rho + [(1-p)/d] I
is specified by the list of probabilities
[p + (1-p)/d, (1-p)/d, (1-p)/d), ... , (1-p)/d)]
:return: A list of the 4^num_qubits Kraus operators that parametrize the map.
:rtype: list
"""
if len(probabilities) not in [4, 16]:
raise ValueError("Currently we only support one or two qubits, "
"so the provided list of probabilities must have length 4 or 16.")
if not np.allclose(sum(probabilities), 1.0, atol=1e-3):
raise ValueError("Probabilities must sum to one.")
paulis = [np.eye(2), np.array([[0, 1], [1, 0]]), np.array([[0, -1j], [1j, 0]]), np.array([[1, 0], [0, -1]])]
if len(probabilities) == 4:
operators = paulis
else:
operators = np.kron(paulis, paulis)
return [coeff * op for coeff, op in zip(np.sqrt(probabilities), operators)] |
def valid_workflow(self):
"""
Return true if each handler's output type is the same as
the next handler's input type. Return False if not.
Returns: boolean - True if workflow is valid, False if not
"""
for ix, handler in enumerate(self.handlers[:-1]):
next_input_type = self.handlers[ix + 1].input_type
if (handler.output_type is not None and
next_input_type is not None):
if handler.output_type != next_input_type:
return False
return True | Return true if each handler's output type is the same as
the next handler's input type. Return False if not.
Returns: boolean - True if workflow is valid, False if not | Below is the the instruction that describes the task:
### Input:
Return true if each handler's output type is the same as
the next handler's input type. Return False if not.
Returns: boolean - True if workflow is valid, False if not
### Response:
def valid_workflow(self):
"""
Return true if each handler's output type is the same as
the next handler's input type. Return False if not.
Returns: boolean - True if workflow is valid, False if not
"""
for ix, handler in enumerate(self.handlers[:-1]):
next_input_type = self.handlers[ix + 1].input_type
if (handler.output_type is not None and
next_input_type is not None):
if handler.output_type != next_input_type:
return False
return True |
def goto_block(self, block):
"""
A context manager which temporarily positions the builder at the end
of basic block *bb* (but before any terminator).
"""
old_block = self.basic_block
term = block.terminator
if term is not None:
self.position_before(term)
else:
self.position_at_end(block)
try:
yield
finally:
self.position_at_end(old_block) | A context manager which temporarily positions the builder at the end
of basic block *bb* (but before any terminator). | Below is the the instruction that describes the task:
### Input:
A context manager which temporarily positions the builder at the end
of basic block *bb* (but before any terminator).
### Response:
def goto_block(self, block):
"""
A context manager which temporarily positions the builder at the end
of basic block *bb* (but before any terminator).
"""
old_block = self.basic_block
term = block.terminator
if term is not None:
self.position_before(term)
else:
self.position_at_end(block)
try:
yield
finally:
self.position_at_end(old_block) |
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None,
partition_id=0,
num_partitions=1,
shuffle_buffer_size=1024,
max_records=-1):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
shuffle_files = shuffle_files or shuffle_files is None and is_training
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern))
# Functions used in dataset transforms below. `filenames` can be either a
# `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.
def _load_records_and_preprocess(filenames):
"""Reads files from a string tensor or a dataset of filenames."""
# Load records from file(s) with an 8MiB read buffer.
dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024)
# Decode.
dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)
# Preprocess if requested.
# Note that preprocessing should happen per-file as order may matter.
if preprocess:
dataset = self.preprocess(dataset, mode, hparams,
interleave=shuffle_files)
return dataset
if len(data_files) < num_partitions:
raise ValueError(
"number of data files (%d) must be at least the number of hosts (%d)"
% (len(data_files), num_partitions))
data_files = [f for (i, f) in enumerate(data_files)
if i % num_partitions == partition_id]
tf.logging.info(
"partition: %d num_data_files: %d" % (partition_id, len(data_files)))
if shuffle_files:
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
random.shuffle(data_files)
dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))
# Create data-set from files by parsing, pre-processing and interleaving.
if shuffle_files:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_load_records_and_preprocess, sloppy=True, cycle_length=8))
else:
dataset = _load_records_and_preprocess(dataset)
dataset = dataset.map(
self.maybe_reverse_and_copy, num_parallel_calls=num_threads)
dataset = dataset.take(max_records)
## Shuffle records only for training examples.
if shuffle_files and is_training:
dataset = dataset.shuffle(shuffle_buffer_size)
if hparams.get("pack_dataset", False):
dataset = generator_utils.pack_dataset(
dataset, hparams.max_length, keys=["inputs", "targets"],
use_custom_ops=hparams.get("use_custom_ops", False))
if output_buffer_size:
dataset = dataset.prefetch(output_buffer_size)
return dataset | Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files. | Below is the the instruction that describes the task:
### Input:
Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
### Response:
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None,
partition_id=0,
num_partitions=1,
shuffle_buffer_size=1024,
max_records=-1):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch at end of pipeline.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: DatasetSplit, which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
partition_id: integer - which partition of the dataset to read from
num_partitions: how many partitions in the dataset
shuffle_buffer_size: if shuffle_files is True, this is the buffer size
used to shuffle records.
max_records: int, number of records to truncate to.
Returns:
Dataset containing dict<feature name, Tensor>.
Raises:
ValueError: if num_partitions is greater than the number of data files.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
shuffle_files = shuffle_files or shuffle_files is None and is_training
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = sorted(tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern))
# Functions used in dataset transforms below. `filenames` can be either a
# `tf.string` tensor or `tf.data.Dataset` containing one or more filenames.
def _load_records_and_preprocess(filenames):
"""Reads files from a string tensor or a dataset of filenames."""
# Load records from file(s) with an 8MiB read buffer.
dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024)
# Decode.
dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads)
# Preprocess if requested.
# Note that preprocessing should happen per-file as order may matter.
if preprocess:
dataset = self.preprocess(dataset, mode, hparams,
interleave=shuffle_files)
return dataset
if len(data_files) < num_partitions:
raise ValueError(
"number of data files (%d) must be at least the number of hosts (%d)"
% (len(data_files), num_partitions))
data_files = [f for (i, f) in enumerate(data_files)
if i % num_partitions == partition_id]
tf.logging.info(
"partition: %d num_data_files: %d" % (partition_id, len(data_files)))
if shuffle_files:
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
random.shuffle(data_files)
dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files))
# Create data-set from files by parsing, pre-processing and interleaving.
if shuffle_files:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_load_records_and_preprocess, sloppy=True, cycle_length=8))
else:
dataset = _load_records_and_preprocess(dataset)
dataset = dataset.map(
self.maybe_reverse_and_copy, num_parallel_calls=num_threads)
dataset = dataset.take(max_records)
## Shuffle records only for training examples.
if shuffle_files and is_training:
dataset = dataset.shuffle(shuffle_buffer_size)
if hparams.get("pack_dataset", False):
dataset = generator_utils.pack_dataset(
dataset, hparams.max_length, keys=["inputs", "targets"],
use_custom_ops=hparams.get("use_custom_ops", False))
if output_buffer_size:
dataset = dataset.prefetch(output_buffer_size)
return dataset |
def inside_polygon(x, y, coordinates):
"""
Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes!
"""
contained = False
# the edge from the last to the first point is checked first
i = -1
y1 = coordinates[1][-1]
y_gt_y1 = y > y1
for y2 in coordinates[1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
# compare the slope of the line [p1-p2] and [p-p2]
# depending on the position of p2 this determines whether the polygon edge is right or left of the point
# to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side
# ( dy/dx > a == dy > a * dx )
# int64 accuracy needed here!
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) <= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
else:
if y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) >= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained | Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes! | Below is the the instruction that describes the task:
### Input:
Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes!
### Response:
def inside_polygon(x, y, coordinates):
"""
Implementing the ray casting point in polygon test algorithm
cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm
:param x:
:param y:
:param coordinates: a polygon represented by a list containing two lists (x and y coordinates):
[ [x1,x2,x3...], [y1,y2,y3...]]
those lists are actually numpy arrays which are bei
ng read directly from a binary file
:return: true if the point (x,y) lies within the polygon
Some overflow considerations for the critical part of comparing the line segment slopes:
(y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max
(y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max
delta_y_max * delta_x_max = 180 * 360 < 65 x10^3
Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us:
delta_y_max * delta_x_max = 180x10^7 * 360x10^7
delta_y_max * delta_x_max <= 65x10^17
So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never
occur in practice (timezone polygons do not span the whole lng lat coordinate space),
32bit accuracy hence is not safe to use here!
Python 2.2 automatically uses the appropriate int data type preventing overflow
(cf. https://www.python.org/dev/peps/pep-0237/),
but here the data types are numpy internal static data types. The data is stored as int32
-> use int64 when comparing slopes!
"""
contained = False
# the edge from the last to the first point is checked first
i = -1
y1 = coordinates[1][-1]
y_gt_y1 = y > y1
for y2 in coordinates[1]:
y_gt_y2 = y > y2
if y_gt_y1:
if not y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
# compare the slope of the line [p1-p2] and [p-p2]
# depending on the position of p2 this determines whether the polygon edge is right or left of the point
# to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side
# ( dy/dx > a == dy > a * dx )
# int64 accuracy needed here!
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) <= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
else:
if y_gt_y2:
x1 = coordinates[0][i]
x2 = coordinates[0][i + 1]
# only crossings "right" of the point should be counted
x1GEx = x <= x1
x2GEx = x <= x2
if (x1GEx and x2GEx) or ((x1GEx or x2GEx)
and (int64(y2) - int64(y)) * (int64(x2) - int64(x1)) >= (
int64(y2) - int64(y1)) * (int64(x2) - int64(x))):
contained = not contained
y1 = y2
y_gt_y1 = y_gt_y2
i += 1
return contained |
def get_server_type():
"""Checks server.ini for server type."""
server_location_file = os.path.expanduser(SERVER_LOCATION_FILE)
if not os.path.exists(server_location_file):
raise Exception(
"%s not found. Please run 'loom server set "
"<servertype>' first." % server_location_file)
config = ConfigParser.SafeConfigParser()
config.read(server_location_file)
server_type = config.get('server', 'type')
return server_type | Checks server.ini for server type. | Below is the the instruction that describes the task:
### Input:
Checks server.ini for server type.
### Response:
def get_server_type():
"""Checks server.ini for server type."""
server_location_file = os.path.expanduser(SERVER_LOCATION_FILE)
if not os.path.exists(server_location_file):
raise Exception(
"%s not found. Please run 'loom server set "
"<servertype>' first." % server_location_file)
config = ConfigParser.SafeConfigParser()
config.read(server_location_file)
server_type = config.get('server', 'type')
return server_type |
def key_paths(self):
"""List of key paths defined by the filter."""
if self._wow64_key_path:
return [self._key_path, self._wow64_key_path]
return [self._key_path] | List of key paths defined by the filter. | Below is the the instruction that describes the task:
### Input:
List of key paths defined by the filter.
### Response:
def key_paths(self):
"""List of key paths defined by the filter."""
if self._wow64_key_path:
return [self._key_path, self._wow64_key_path]
return [self._key_path] |
def auto_down(self, count=1, go_to_start_of_line_if_history_changes=False):
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next(count=count)
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_down(count=count)
elif not self.selection_state:
self.history_forward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position() | If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.) | Below is the the instruction that describes the task:
### Input:
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
### Response:
def auto_down(self, count=1, go_to_start_of_line_if_history_changes=False):
"""
If we're not on the last line (of a multiline input) go a line down,
otherwise go forward in history. (If nothing is selected.)
"""
if self.complete_state:
self.complete_next(count=count)
elif self.document.cursor_position_row < self.document.line_count - 1:
self.cursor_down(count=count)
elif not self.selection_state:
self.history_forward(count=count)
# Go to the start of the line?
if go_to_start_of_line_if_history_changes:
self.cursor_position += self.document.get_start_of_line_position() |
def file_md5(f, size=8192):
"Calculates the MD5 of a file."
md5 = hashlib.md5()
while True:
data = f.read(size)
if not data:
break
md5.update(data)
return md5.hexdigest() | Calculates the MD5 of a file. | Below is the the instruction that describes the task:
### Input:
Calculates the MD5 of a file.
### Response:
def file_md5(f, size=8192):
"Calculates the MD5 of a file."
md5 = hashlib.md5()
while True:
data = f.read(size)
if not data:
break
md5.update(data)
return md5.hexdigest() |
def returned(self):
"""Does the extracted piece contain return statement"""
if self._returned is None:
node = _parse_text(self.extracted)
self._returned = usefunction._returns_last(node)
return self._returned | Does the extracted piece contain return statement | Below is the the instruction that describes the task:
### Input:
Does the extracted piece contain return statement
### Response:
def returned(self):
"""Does the extracted piece contain return statement"""
if self._returned is None:
node = _parse_text(self.extracted)
self._returned = usefunction._returns_last(node)
return self._returned |
def maximum_consecutive_dry_days(pr, thresh='1 mm/day', freq='YS'):
r"""Maximum number of consecutive dry days
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [mm]
thresh : str
Threshold precipitation on which to base evaluation [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive dry days.
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = utils.convert_units_to(thresh, pr, 'hydro')
group = (pr < t).resample(time=freq)
return group.apply(rl.longest_run, dim='time') | r"""Maximum number of consecutive dry days
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [mm]
thresh : str
Threshold precipitation on which to base evaluation [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive dry days.
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does. | Below is the the instruction that describes the task:
### Input:
r"""Maximum number of consecutive dry days
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [mm]
thresh : str
Threshold precipitation on which to base evaluation [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive dry days.
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
### Response:
def maximum_consecutive_dry_days(pr, thresh='1 mm/day', freq='YS'):
r"""Maximum number of consecutive dry days
Return the maximum number of consecutive days within the period where precipitation
is below a certain threshold.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [mm]
thresh : str
Threshold precipitation on which to base evaluation [mm]. Default : '1 mm/day'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The maximum number of consecutive dry days.
Notes
-----
Let :math:`\mathbf{p}=p_0, p_1, \ldots, p_n` be a daily precipitation series and :math:`thresh` the threshold
under which a day is considered dry. Then let :math:`\mathbf{s}` be the sorted vector of indices :math:`i` where
:math:`[p_i < thresh] \neq [p_{i+1} < thresh]`, that is, the days when the temperature crosses the threshold.
Then the maximum number of consecutive dry days is given by
.. math::
\max(\mathbf{d}) \quad \mathrm{where} \quad d_j = (s_j - s_{j-1}) [p_{s_j} > thresh]
where :math:`[P]` is 1 if :math:`P` is true, and 0 if false. Note that this formula does not handle sequences at
the start and end of the series, but the numerical algorithm does.
"""
t = utils.convert_units_to(thresh, pr, 'hydro')
group = (pr < t).resample(time=freq)
return group.apply(rl.longest_run, dim='time') |
def check_complicance(self):
"""Check compliance with Media RSS Specification, Version 1.5.1.
see http://www.rssboard.org/media-rss
Raises AttributeError on error.
"""
# check Media RSS requirement: one of the following elements is
# required: media_group | media_content | media_player | media_peerLink
# | media_location. We do the check only if any media_... element is
# set to allow non media feeds
if(any([ma for ma in vars(self)
if ma.startswith('media_') and getattr(self, ma)])
and not self.media_group
and not self.media_content
and not self.media_player
and not self.media_peerLink
and not self.media_location
):
raise AttributeError(
"Using media elements requires the specification of at least "
"one of the following elements: 'media_group', "
"'media_content', 'media_player', 'media_peerLink' or "
"'media_location'.")
# check Media RSS requirement: if media:player is missing all
# media_content elements need to have url attributes.
if not self.media_player:
if self.media_content:
# check if all media_content elements have a URL set
if isinstance(self.media_content, list):
if not all([False for mc in self.media_content if
'url' not in mc.element_attrs]):
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
else:
if not self.media_content.element_attrs['url']:
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
pass
elif self.media_group:
# check media groups without player if its media_content
# elements have a URL set
raise NotImplementedError(
"MediaRSSItem: media_group check not implemented yet.") | Check compliance with Media RSS Specification, Version 1.5.1.
see http://www.rssboard.org/media-rss
Raises AttributeError on error. | Below is the the instruction that describes the task:
### Input:
Check compliance with Media RSS Specification, Version 1.5.1.
see http://www.rssboard.org/media-rss
Raises AttributeError on error.
### Response:
def check_complicance(self):
"""Check compliance with Media RSS Specification, Version 1.5.1.
see http://www.rssboard.org/media-rss
Raises AttributeError on error.
"""
# check Media RSS requirement: one of the following elements is
# required: media_group | media_content | media_player | media_peerLink
# | media_location. We do the check only if any media_... element is
# set to allow non media feeds
if(any([ma for ma in vars(self)
if ma.startswith('media_') and getattr(self, ma)])
and not self.media_group
and not self.media_content
and not self.media_player
and not self.media_peerLink
and not self.media_location
):
raise AttributeError(
"Using media elements requires the specification of at least "
"one of the following elements: 'media_group', "
"'media_content', 'media_player', 'media_peerLink' or "
"'media_location'.")
# check Media RSS requirement: if media:player is missing all
# media_content elements need to have url attributes.
if not self.media_player:
if self.media_content:
# check if all media_content elements have a URL set
if isinstance(self.media_content, list):
if not all([False for mc in self.media_content if
'url' not in mc.element_attrs]):
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
else:
if not self.media_content.element_attrs['url']:
raise AttributeError(
"MediaRSSItems require a media_player attribute "
"if a media_content has no url set.")
pass
elif self.media_group:
# check media groups without player if its media_content
# elements have a URL set
raise NotImplementedError(
"MediaRSSItem: media_group check not implemented yet.") |
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1) | Internal handler for the bash completion support. | Below is the the instruction that describes the task:
### Input:
Internal handler for the bash completion support.
### Response:
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
sys.exit(1) |
def update_intent(self,
intent,
language_code,
update_mask=None,
intent_view=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> # TODO: Initialize ``language_code``:
>>> language_code = ''
>>>
>>> response = client.update_intent(intent, language_code)
Args:
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to update.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_intent' not in self._inner_api_calls:
self._inner_api_calls[
'update_intent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_intent,
default_retry=self._method_configs['UpdateIntent'].retry,
default_timeout=self._method_configs['UpdateIntent']
.timeout,
client_info=self._client_info,
)
request = intent_pb2.UpdateIntentRequest(
intent=intent,
language_code=language_code,
update_mask=update_mask,
intent_view=intent_view,
)
return self._inner_api_calls['update_intent'](
request, retry=retry, timeout=timeout, metadata=metadata) | Updates the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> # TODO: Initialize ``language_code``:
>>> language_code = ''
>>>
>>> response = client.update_intent(intent, language_code)
Args:
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to update.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Updates the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> # TODO: Initialize ``language_code``:
>>> language_code = ''
>>>
>>> response = client.update_intent(intent, language_code)
Args:
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to update.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def update_intent(self,
intent,
language_code,
update_mask=None,
intent_view=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates the specified intent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.IntentsClient()
>>>
>>> # TODO: Initialize ``intent``:
>>> intent = {}
>>>
>>> # TODO: Initialize ``language_code``:
>>> language_code = ''
>>>
>>> response = client.update_intent(intent, language_code)
Args:
intent (Union[dict, ~google.cloud.dialogflow_v2.types.Intent]): Required. The intent to update.
Format: ``projects/<Project ID>/agent/intents/<Intent ID>``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.Intent`
language_code (str): Optional. The language of training phrases, parameters and rich messages
defined in ``intent``. If not specified, the agent's default language is
used. [More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
update_mask (Union[dict, ~google.cloud.dialogflow_v2.types.FieldMask]): Optional. The mask to control which fields get updated.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.FieldMask`
intent_view (~google.cloud.dialogflow_v2.types.IntentView): Optional. The resource view to apply to the returned intent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Intent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_intent' not in self._inner_api_calls:
self._inner_api_calls[
'update_intent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_intent,
default_retry=self._method_configs['UpdateIntent'].retry,
default_timeout=self._method_configs['UpdateIntent']
.timeout,
client_info=self._client_info,
)
request = intent_pb2.UpdateIntentRequest(
intent=intent,
language_code=language_code,
update_mask=update_mask,
intent_view=intent_view,
)
return self._inner_api_calls['update_intent'](
request, retry=retry, timeout=timeout, metadata=metadata) |
def complete_vhwa_command(self, command):
"""Signals that the Video HW Acceleration command has completed.
in command of type str
Pointer to VBOXVHWACMD containing the completed command.
"""
if not isinstance(command, basestring):
raise TypeError("command can only be an instance of type basestring")
self._call("completeVHWACommand",
in_p=[command]) | Signals that the Video HW Acceleration command has completed.
in command of type str
Pointer to VBOXVHWACMD containing the completed command. | Below is the the instruction that describes the task:
### Input:
Signals that the Video HW Acceleration command has completed.
in command of type str
Pointer to VBOXVHWACMD containing the completed command.
### Response:
def complete_vhwa_command(self, command):
"""Signals that the Video HW Acceleration command has completed.
in command of type str
Pointer to VBOXVHWACMD containing the completed command.
"""
if not isinstance(command, basestring):
raise TypeError("command can only be an instance of type basestring")
self._call("completeVHWACommand",
in_p=[command]) |
def registration_settings(request):
'''Expose selected settings to templates'''
context = {}
for setting in (
'WAFER_SSO',
'WAFER_HIDE_LOGIN',
'WAFER_REGISTRATION_OPEN',
'WAFER_REGISTRATION_MODE',
'WAFER_TALKS_OPEN',
'WAFER_VIDEO_LICENSE',
):
context[setting] = getattr(settings, setting, None)
return context | Expose selected settings to templates | Below is the the instruction that describes the task:
### Input:
Expose selected settings to templates
### Response:
def registration_settings(request):
'''Expose selected settings to templates'''
context = {}
for setting in (
'WAFER_SSO',
'WAFER_HIDE_LOGIN',
'WAFER_REGISTRATION_OPEN',
'WAFER_REGISTRATION_MODE',
'WAFER_TALKS_OPEN',
'WAFER_VIDEO_LICENSE',
):
context[setting] = getattr(settings, setting, None)
return context |
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
fut = self.tasks[task_id]['app_fu']
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed") | Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?) | Below is the the instruction that describes the task:
### Input:
Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
### Response:
def wait_for_current_tasks(self):
"""Waits for all tasks in the task list to be completed, by waiting for their
AppFuture to be completed. This method will not necessarily wait for any tasks
added after cleanup has started (such as data stageout?)
"""
logger.info("Waiting for all remaining tasks to complete")
for task_id in self.tasks:
# .exception() is a less exception throwing way of
# waiting for completion than .result()
fut = self.tasks[task_id]['app_fu']
if not fut.done():
logger.debug("Waiting for task {} to complete".format(task_id))
fut.exception()
logger.info("All remaining tasks completed") |
def validate_answer(self, value):
"""
Check that the answer is JSON-serializable and not too long.
"""
# Check that the answer is JSON-serializable
try:
serialized = json.dumps(value)
except (ValueError, TypeError):
raise serializers.ValidationError("Answer value must be JSON-serializable")
# Check the length of the serialized representation
if len(serialized) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return value | Check that the answer is JSON-serializable and not too long. | Below is the the instruction that describes the task:
### Input:
Check that the answer is JSON-serializable and not too long.
### Response:
def validate_answer(self, value):
"""
Check that the answer is JSON-serializable and not too long.
"""
# Check that the answer is JSON-serializable
try:
serialized = json.dumps(value)
except (ValueError, TypeError):
raise serializers.ValidationError("Answer value must be JSON-serializable")
# Check the length of the serialized representation
if len(serialized) > Submission.MAXSIZE:
raise serializers.ValidationError("Maximum answer size exceeded.")
return value |
def succ_item(self, key, default=_sentinel):
"""Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist. optimized for pypy.
"""
# removed graingets version, because it was little slower on CPython and much slower on pypy
# this version runs about 4x faster with pypy than the Cython version
# Note: Code sharing of succ_item() and ceiling_item() is possible, but has always a speed penalty.
node = self._root
succ_node = None
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
break
elif cmp < 0:
if (succ_node is None) or self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
node = node.left
else:
node = node.right
if node is None: # stay at dead end
if default is _sentinel:
raise KeyError(str(key))
return default
# found node of key
if node.right is not None:
# find smallest node of right subtree
node = node.right
while node.left is not None:
node = node.left
if succ_node is None:
succ_node = node
elif self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
elif succ_node is None: # given key is biggest in tree
if default is _sentinel:
raise KeyError(str(key))
return default
return succ_node.key, succ_node.value | Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist. optimized for pypy. | Below is the the instruction that describes the task:
### Input:
Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist. optimized for pypy.
### Response:
def succ_item(self, key, default=_sentinel):
"""Get successor (k,v) pair of key, raises KeyError if key is max key
or key does not exist. optimized for pypy.
"""
# removed graingets version, because it was little slower on CPython and much slower on pypy
# this version runs about 4x faster with pypy than the Cython version
# Note: Code sharing of succ_item() and ceiling_item() is possible, but has always a speed penalty.
node = self._root
succ_node = None
while node is not None:
cmp = self._cmp(self._cmp_data, key, node.key)
if cmp == 0:
break
elif cmp < 0:
if (succ_node is None) or self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
node = node.left
else:
node = node.right
if node is None: # stay at dead end
if default is _sentinel:
raise KeyError(str(key))
return default
# found node of key
if node.right is not None:
# find smallest node of right subtree
node = node.right
while node.left is not None:
node = node.left
if succ_node is None:
succ_node = node
elif self._cmp(self._cmp_data, node.key, succ_node.key) < 0:
succ_node = node
elif succ_node is None: # given key is biggest in tree
if default is _sentinel:
raise KeyError(str(key))
return default
return succ_node.key, succ_node.value |
def compute_date_range_chunks(sessions, start_date, end_date, chunksize):
"""Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for.
"""
if start_date not in sessions:
raise KeyError("Start date %s is not found in calendar." %
(start_date.strftime("%Y-%m-%d"),))
if end_date not in sessions:
raise KeyError("End date %s is not found in calendar." %
(end_date.strftime("%Y-%m-%d"),))
if end_date < start_date:
raise ValueError("End date %s cannot precede start date %s." %
(end_date.strftime("%Y-%m-%d"),
start_date.strftime("%Y-%m-%d")))
if chunksize is None:
return [(start_date, end_date)]
start_ix, end_ix = sessions.slice_locs(start_date, end_date)
return (
(r[0], r[-1]) for r in partition_all(
chunksize, sessions[start_ix:end_ix]
)
) | Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for. | Below is the the instruction that describes the task:
### Input:
Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for.
### Response:
def compute_date_range_chunks(sessions, start_date, end_date, chunksize):
"""Compute the start and end dates to run a pipeline for.
Parameters
----------
sessions : DatetimeIndex
The available dates.
start_date : pd.Timestamp
The first date in the pipeline.
end_date : pd.Timestamp
The last date in the pipeline.
chunksize : int or None
The size of the chunks to run. Setting this to None returns one chunk.
Returns
-------
ranges : iterable[(np.datetime64, np.datetime64)]
A sequence of start and end dates to run the pipeline for.
"""
if start_date not in sessions:
raise KeyError("Start date %s is not found in calendar." %
(start_date.strftime("%Y-%m-%d"),))
if end_date not in sessions:
raise KeyError("End date %s is not found in calendar." %
(end_date.strftime("%Y-%m-%d"),))
if end_date < start_date:
raise ValueError("End date %s cannot precede start date %s." %
(end_date.strftime("%Y-%m-%d"),
start_date.strftime("%Y-%m-%d")))
if chunksize is None:
return [(start_date, end_date)]
start_ix, end_ix = sessions.slice_locs(start_date, end_date)
return (
(r[0], r[-1]) for r in partition_all(
chunksize, sessions[start_ix:end_ix]
)
) |
def check_usage (docstring, argv=None, usageifnoargs=False):
"""Check if the program has been run with a --help argument; if so,
print usage information and exit.
:arg str docstring: the program help text
:arg argv: the program arguments; taken as :data:`sys.argv` if
given as :const:`None` (the default). (Note that this implies
``argv[0]`` should be the program name and not the first option.)
:arg bool usageifnoargs: if :const:`True`, usage information will be
printed and the program will exit if no command-line arguments are
passed. If "long", print long usasge. Default is :const:`False`.
This function is intended for small programs launched from the command
line. The intention is for the program help information to be written in
its docstring, and then for the preamble to contain something like::
\"\"\"myprogram - this is all the usage help you get\"\"\"
import sys
... # other setup
check_usage (__doc__)
... # go on with business
If it is determined that usage information should be shown,
:func:`show_usage` is called and the program exits.
See also :func:`wrong_usage`.
"""
if argv is None:
from sys import argv
if len (argv) == 1 and usageifnoargs:
show_usage (docstring, (usageifnoargs != 'long'), None, 0)
if len (argv) == 2 and argv[1] in ('-h', '--help'):
show_usage (docstring, False, None, 0) | Check if the program has been run with a --help argument; if so,
print usage information and exit.
:arg str docstring: the program help text
:arg argv: the program arguments; taken as :data:`sys.argv` if
given as :const:`None` (the default). (Note that this implies
``argv[0]`` should be the program name and not the first option.)
:arg bool usageifnoargs: if :const:`True`, usage information will be
printed and the program will exit if no command-line arguments are
passed. If "long", print long usasge. Default is :const:`False`.
This function is intended for small programs launched from the command
line. The intention is for the program help information to be written in
its docstring, and then for the preamble to contain something like::
\"\"\"myprogram - this is all the usage help you get\"\"\"
import sys
... # other setup
check_usage (__doc__)
... # go on with business
If it is determined that usage information should be shown,
:func:`show_usage` is called and the program exits.
See also :func:`wrong_usage`. | Below is the the instruction that describes the task:
### Input:
Check if the program has been run with a --help argument; if so,
print usage information and exit.
:arg str docstring: the program help text
:arg argv: the program arguments; taken as :data:`sys.argv` if
given as :const:`None` (the default). (Note that this implies
``argv[0]`` should be the program name and not the first option.)
:arg bool usageifnoargs: if :const:`True`, usage information will be
printed and the program will exit if no command-line arguments are
passed. If "long", print long usasge. Default is :const:`False`.
This function is intended for small programs launched from the command
line. The intention is for the program help information to be written in
its docstring, and then for the preamble to contain something like::
\"\"\"myprogram - this is all the usage help you get\"\"\"
import sys
... # other setup
check_usage (__doc__)
... # go on with business
If it is determined that usage information should be shown,
:func:`show_usage` is called and the program exits.
See also :func:`wrong_usage`.
### Response:
def check_usage (docstring, argv=None, usageifnoargs=False):
"""Check if the program has been run with a --help argument; if so,
print usage information and exit.
:arg str docstring: the program help text
:arg argv: the program arguments; taken as :data:`sys.argv` if
given as :const:`None` (the default). (Note that this implies
``argv[0]`` should be the program name and not the first option.)
:arg bool usageifnoargs: if :const:`True`, usage information will be
printed and the program will exit if no command-line arguments are
passed. If "long", print long usasge. Default is :const:`False`.
This function is intended for small programs launched from the command
line. The intention is for the program help information to be written in
its docstring, and then for the preamble to contain something like::
\"\"\"myprogram - this is all the usage help you get\"\"\"
import sys
... # other setup
check_usage (__doc__)
... # go on with business
If it is determined that usage information should be shown,
:func:`show_usage` is called and the program exits.
See also :func:`wrong_usage`.
"""
if argv is None:
from sys import argv
if len (argv) == 1 and usageifnoargs:
show_usage (docstring, (usageifnoargs != 'long'), None, 0)
if len (argv) == 2 and argv[1] in ('-h', '--help'):
show_usage (docstring, False, None, 0) |
def CopyToDateTimeStringISO8601(self):
"""Copies the date time value to an ISO 8601 date and time string.
Returns:
str: date and time value formatted as an ISO 8601 date and time string or
None if the timestamp cannot be copied to a date and time string.
"""
date_time_string = self.CopyToDateTimeString()
if date_time_string:
date_time_string = date_time_string.replace(' ', 'T')
date_time_string = '{0:s}Z'.format(date_time_string)
return date_time_string | Copies the date time value to an ISO 8601 date and time string.
Returns:
str: date and time value formatted as an ISO 8601 date and time string or
None if the timestamp cannot be copied to a date and time string. | Below is the the instruction that describes the task:
### Input:
Copies the date time value to an ISO 8601 date and time string.
Returns:
str: date and time value formatted as an ISO 8601 date and time string or
None if the timestamp cannot be copied to a date and time string.
### Response:
def CopyToDateTimeStringISO8601(self):
"""Copies the date time value to an ISO 8601 date and time string.
Returns:
str: date and time value formatted as an ISO 8601 date and time string or
None if the timestamp cannot be copied to a date and time string.
"""
date_time_string = self.CopyToDateTimeString()
if date_time_string:
date_time_string = date_time_string.replace(' ', 'T')
date_time_string = '{0:s}Z'.format(date_time_string)
return date_time_string |
def add_entry_listener(self, key=None, predicate=None, added_func=None, removed_func=None, updated_func=None,
evicted_func=None, clear_all_func=None):
"""
Adds a continuous entry listener for this map. Listener will get notified for map events filtered with given
parameters.
:param key: (object), key for filtering the events (optional).
:param predicate: (Predicate), predicate for filtering the events (optional).
:param added_func: Function to be called when an entry is added to map (optional).
:param removed_func: Function to be called when an entry is removed from map (optional).
:param updated_func: Function to be called when an entry is updated (optional).
:param evicted_func: Function to be called when an entry is evicted from map (optional).
:param clear_all_func: Function to be called when entries are cleared from map (optional).
:return: (str), a registration id which is used as a key to remove the listener.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
"""
if key and predicate:
key_data = self._to_data(key)
predicate_data = self._to_data(predicate)
request = replicated_map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,
predicate_data,
False)
elif key and not predicate:
key_data = self._to_data(key)
request = replicated_map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, False)
elif not key and predicate:
predicate = self._to_data(predicate)
request = replicated_map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate,
False)
else:
request = replicated_map_add_entry_listener_codec.encode_request(self.name, False)
def handle_event_entry(**_kwargs):
event = EntryEvent(self._to_object, **_kwargs)
if event.event_type == EntryEventType.added and added_func:
added_func(event)
elif event.event_type == EntryEventType.removed and removed_func:
removed_func(event)
elif event.event_type == EntryEventType.updated and updated_func:
updated_func(event)
elif event.event_type == EntryEventType.evicted and evicted_func:
evicted_func(event)
elif event.event_type == EntryEventType.clear_all and clear_all_func:
clear_all_func(event)
return self._start_listening(request,
lambda m: replicated_map_add_entry_listener_codec.handle(m,
handle_event_entry),
lambda r: replicated_map_add_entry_listener_codec.decode_response(r)[
'response']) | Adds a continuous entry listener for this map. Listener will get notified for map events filtered with given
parameters.
:param key: (object), key for filtering the events (optional).
:param predicate: (Predicate), predicate for filtering the events (optional).
:param added_func: Function to be called when an entry is added to map (optional).
:param removed_func: Function to be called when an entry is removed from map (optional).
:param updated_func: Function to be called when an entry is updated (optional).
:param evicted_func: Function to be called when an entry is evicted from map (optional).
:param clear_all_func: Function to be called when entries are cleared from map (optional).
:return: (str), a registration id which is used as a key to remove the listener.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates. | Below is the the instruction that describes the task:
### Input:
Adds a continuous entry listener for this map. Listener will get notified for map events filtered with given
parameters.
:param key: (object), key for filtering the events (optional).
:param predicate: (Predicate), predicate for filtering the events (optional).
:param added_func: Function to be called when an entry is added to map (optional).
:param removed_func: Function to be called when an entry is removed from map (optional).
:param updated_func: Function to be called when an entry is updated (optional).
:param evicted_func: Function to be called when an entry is evicted from map (optional).
:param clear_all_func: Function to be called when entries are cleared from map (optional).
:return: (str), a registration id which is used as a key to remove the listener.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
### Response:
def add_entry_listener(self, key=None, predicate=None, added_func=None, removed_func=None, updated_func=None,
evicted_func=None, clear_all_func=None):
"""
Adds a continuous entry listener for this map. Listener will get notified for map events filtered with given
parameters.
:param key: (object), key for filtering the events (optional).
:param predicate: (Predicate), predicate for filtering the events (optional).
:param added_func: Function to be called when an entry is added to map (optional).
:param removed_func: Function to be called when an entry is removed from map (optional).
:param updated_func: Function to be called when an entry is updated (optional).
:param evicted_func: Function to be called when an entry is evicted from map (optional).
:param clear_all_func: Function to be called when entries are cleared from map (optional).
:return: (str), a registration id which is used as a key to remove the listener.
.. seealso:: :class:`~hazelcast.serialization.predicate.Predicate` for more info about predicates.
"""
if key and predicate:
key_data = self._to_data(key)
predicate_data = self._to_data(predicate)
request = replicated_map_add_entry_listener_to_key_with_predicate_codec.encode_request(self.name, key_data,
predicate_data,
False)
elif key and not predicate:
key_data = self._to_data(key)
request = replicated_map_add_entry_listener_to_key_codec.encode_request(self.name, key_data, False)
elif not key and predicate:
predicate = self._to_data(predicate)
request = replicated_map_add_entry_listener_with_predicate_codec.encode_request(self.name, predicate,
False)
else:
request = replicated_map_add_entry_listener_codec.encode_request(self.name, False)
def handle_event_entry(**_kwargs):
event = EntryEvent(self._to_object, **_kwargs)
if event.event_type == EntryEventType.added and added_func:
added_func(event)
elif event.event_type == EntryEventType.removed and removed_func:
removed_func(event)
elif event.event_type == EntryEventType.updated and updated_func:
updated_func(event)
elif event.event_type == EntryEventType.evicted and evicted_func:
evicted_func(event)
elif event.event_type == EntryEventType.clear_all and clear_all_func:
clear_all_func(event)
return self._start_listening(request,
lambda m: replicated_map_add_entry_listener_codec.handle(m,
handle_event_entry),
lambda r: replicated_map_add_entry_listener_codec.decode_response(r)[
'response']) |
def shutdown(self):
"""
Stop broker instance.
Closes all connected session, stop listening on network socket and free resources.
"""
try:
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self.transitions.shutdown()
except (MachineError, ValueError) as exc:
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.debug("Invalid method call at this moment: %s" % exc)
raise BrokerException("Broker instance can't be stopped: %s" % exc)
# Fire broker_shutdown event to plugins
yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN)
# Stop broadcast loop
if self._broadcast_task:
self._broadcast_task.cancel()
if self._broadcast_queue.qsize() > 0:
self.logger.warning("%d messages not broadcasted" % self._broadcast_queue.qsize())
for listener_name in self._servers:
server = self._servers[listener_name]
yield from server.close_instance()
self.logger.debug("Broker closing")
self.logger.info("Broker closed")
yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN)
self.transitions.stopping_success() | Stop broker instance.
Closes all connected session, stop listening on network socket and free resources. | Below is the the instruction that describes the task:
### Input:
Stop broker instance.
Closes all connected session, stop listening on network socket and free resources.
### Response:
def shutdown(self):
"""
Stop broker instance.
Closes all connected session, stop listening on network socket and free resources.
"""
try:
self._sessions = dict()
self._subscriptions = dict()
self._retained_messages = dict()
self.transitions.shutdown()
except (MachineError, ValueError) as exc:
# Backwards compat: MachineError is raised by transitions < 0.5.0.
self.logger.debug("Invalid method call at this moment: %s" % exc)
raise BrokerException("Broker instance can't be stopped: %s" % exc)
# Fire broker_shutdown event to plugins
yield from self.plugins_manager.fire_event(EVENT_BROKER_PRE_SHUTDOWN)
# Stop broadcast loop
if self._broadcast_task:
self._broadcast_task.cancel()
if self._broadcast_queue.qsize() > 0:
self.logger.warning("%d messages not broadcasted" % self._broadcast_queue.qsize())
for listener_name in self._servers:
server = self._servers[listener_name]
yield from server.close_instance()
self.logger.debug("Broker closing")
self.logger.info("Broker closed")
yield from self.plugins_manager.fire_event(EVENT_BROKER_POST_SHUTDOWN)
self.transitions.stopping_success() |
def public_copy(self):
"""Yield the corresponding public node for this node."""
d = dict(chain_code=self._chain_code, depth=self._depth,
parent_fingerprint=self._parent_fingerprint,
child_index=self._child_index, public_pair=self.public_pair())
return self.__class__(**d) | Yield the corresponding public node for this node. | Below is the the instruction that describes the task:
### Input:
Yield the corresponding public node for this node.
### Response:
def public_copy(self):
"""Yield the corresponding public node for this node."""
d = dict(chain_code=self._chain_code, depth=self._depth,
parent_fingerprint=self._parent_fingerprint,
child_index=self._child_index, public_pair=self.public_pair())
return self.__class__(**d) |
def divide(self, other, out=None):
"""Return ``out = self / other``.
If ``out`` is provided, the result is written to it.
See Also
--------
LinearSpace.divide
"""
return self.space.divide(self, other, out=out) | Return ``out = self / other``.
If ``out`` is provided, the result is written to it.
See Also
--------
LinearSpace.divide | Below is the the instruction that describes the task:
### Input:
Return ``out = self / other``.
If ``out`` is provided, the result is written to it.
See Also
--------
LinearSpace.divide
### Response:
def divide(self, other, out=None):
"""Return ``out = self / other``.
If ``out`` is provided, the result is written to it.
See Also
--------
LinearSpace.divide
"""
return self.space.divide(self, other, out=out) |
def dec2hms(x):
"""
Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
"""
if not np.isfinite(x):
return 'XX:XX:XX.XX'
# wrap negative RA's
if x < 0:
x += 360
x /= 15.0
h = int(x)
x = (x - h) * 60
m = int(x)
s = (x - m) * 60
return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s) | Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite. | Below is the the instruction that describes the task:
### Input:
Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
### Response:
def dec2hms(x):
"""
Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
"""
if not np.isfinite(x):
return 'XX:XX:XX.XX'
# wrap negative RA's
if x < 0:
x += 360
x /= 15.0
h = int(x)
x = (x - h) * 60
m = int(x)
s = (x - m) * 60
return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s) |
def compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d):
"""All angles in radians."""
zeta_a = satz_a
zeta_b = satz_b
phi_a = compute_phi(zeta_a)
phi_b = compute_phi(zeta_b)
theta_a = compute_theta(zeta_a, phi_a)
theta_b = compute_theta(zeta_b, phi_b)
phi = (phi_a + phi_b) / 2
zeta = compute_zeta(phi)
theta = compute_theta(zeta, phi)
c_expansion = 4 * (((theta_a + theta_b) / 2 - theta) / (theta_a - theta_b))
sin_beta_2 = scan_width / (2 * H)
d = ((R + H) / R * np.cos(phi) - np.cos(zeta)) * sin_beta_2
e = np.cos(zeta) - np.sqrt(np.cos(zeta) ** 2 - d ** 2)
c_alignment = 4 * e * np.sin(zeta) / (theta_a - theta_b)
return c_expansion, c_alignment | All angles in radians. | Below is the the instruction that describes the task:
### Input:
All angles in radians.
### Response:
def compute_expansion_alignment(satz_a, satz_b, satz_c, satz_d):
"""All angles in radians."""
zeta_a = satz_a
zeta_b = satz_b
phi_a = compute_phi(zeta_a)
phi_b = compute_phi(zeta_b)
theta_a = compute_theta(zeta_a, phi_a)
theta_b = compute_theta(zeta_b, phi_b)
phi = (phi_a + phi_b) / 2
zeta = compute_zeta(phi)
theta = compute_theta(zeta, phi)
c_expansion = 4 * (((theta_a + theta_b) / 2 - theta) / (theta_a - theta_b))
sin_beta_2 = scan_width / (2 * H)
d = ((R + H) / R * np.cos(phi) - np.cos(zeta)) * sin_beta_2
e = np.cos(zeta) - np.sqrt(np.cos(zeta) ** 2 - d ** 2)
c_alignment = 4 * e * np.sin(zeta) / (theta_a - theta_b)
return c_expansion, c_alignment |
def get_assets_by_record_type(self, asset_record_type=None):
"""Gets an ``AssetList`` containing the given asset record ``Type``.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return AssetList(self._provider_session.get_assets_by_record_type(asset_record_type),
self._config_map) | Gets an ``AssetList`` containing the given asset record ``Type``.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets an ``AssetList`` containing the given asset record ``Type``.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assets_by_record_type(self, asset_record_type=None):
"""Gets an ``AssetList`` containing the given asset record ``Type``.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: asset_record_type (osid.type.Type): an asset record type
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``asset_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return AssetList(self._provider_session.get_assets_by_record_type(asset_record_type),
self._config_map) |
def _init_stub(self, stub_init, **stub_kwargs):
"""Initializes all other stubs for consistency's sake"""
getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs) | Initializes all other stubs for consistency's sake | Below is the the instruction that describes the task:
### Input:
Initializes all other stubs for consistency's sake
### Response:
def _init_stub(self, stub_init, **stub_kwargs):
"""Initializes all other stubs for consistency's sake"""
getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs) |
def _indexed_leafs(parent):
""" Return a leaf nodeid -> node dictionary with
"parent" and "leaf" (average child "leaf" value) added to all nodes.
"""
if not parent.get('children'):
return {parent['nodeid']: parent}
indexed = {}
for child in parent['children']:
child['parent'] = parent
if 'leaf' in child:
indexed[child['nodeid']] = child
else:
indexed.update(_indexed_leafs(child))
parent['leaf'] = _parent_value(parent['children'])
return indexed | Return a leaf nodeid -> node dictionary with
"parent" and "leaf" (average child "leaf" value) added to all nodes. | Below is the the instruction that describes the task:
### Input:
Return a leaf nodeid -> node dictionary with
"parent" and "leaf" (average child "leaf" value) added to all nodes.
### Response:
def _indexed_leafs(parent):
""" Return a leaf nodeid -> node dictionary with
"parent" and "leaf" (average child "leaf" value) added to all nodes.
"""
if not parent.get('children'):
return {parent['nodeid']: parent}
indexed = {}
for child in parent['children']:
child['parent'] = parent
if 'leaf' in child:
indexed[child['nodeid']] = child
else:
indexed.update(_indexed_leafs(child))
parent['leaf'] = _parent_value(parent['children'])
return indexed |
def _psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
mu - NxQ
S - NxQ
"""
variance, lengthscale = kern.variance, kern.lengthscale
N,M,Q = self.get_dimensions(Z, variational_posterior)
self._initGPUCache(N,M,Q)
self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2_gpu = self.gpuCache['psi2_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
psi0 = np.empty((N,))
psi0[:] = variance
self.g_psi1computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi1_gpu.gpudata, log_denom1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi2_gpu.gpudata, psi2n_gpu.gpudata, log_denom2_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
# t = self.g_psi1computations(psi1_gpu, log_denom1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi1computations '+str(t)
# t = self.g_psi2computations(psi2_gpu, psi2n_gpu, log_denom2_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi2computations '+str(t)
if self.GPU_direct:
return psi0, psi1_gpu, psi2_gpu
else:
if return_psi2_n:
return psi0, psi1_gpu.get(), psi2n_gpu.get()
else:
return psi0, psi1_gpu.get(), psi2_gpu.get() | Z - MxQ
mu - NxQ
S - NxQ | Below is the the instruction that describes the task:
### Input:
Z - MxQ
mu - NxQ
S - NxQ
### Response:
def _psicomputations(self, kern, Z, variational_posterior, return_psi2_n=False):
"""
Z - MxQ
mu - NxQ
S - NxQ
"""
variance, lengthscale = kern.variance, kern.lengthscale
N,M,Q = self.get_dimensions(Z, variational_posterior)
self._initGPUCache(N,M,Q)
self.sync_params(lengthscale, Z, variational_posterior.mean, variational_posterior.variance)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2_gpu = self.gpuCache['psi2_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
psi0 = np.empty((N,))
psi0[:] = variance
self.g_psi1computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi1_gpu.gpudata, log_denom1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2computations.prepared_call((self.blocknum,1),(self.threadnum,1,1),psi2_gpu.gpudata, psi2n_gpu.gpudata, log_denom2_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata, np.int32(N), np.int32(M), np.int32(Q))
# t = self.g_psi1computations(psi1_gpu, log_denom1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi1computations '+str(t)
# t = self.g_psi2computations(psi2_gpu, psi2n_gpu, log_denom2_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi2computations '+str(t)
if self.GPU_direct:
return psi0, psi1_gpu, psi2_gpu
else:
if return_psi2_n:
return psi0, psi1_gpu.get(), psi2n_gpu.get()
else:
return psi0, psi1_gpu.get(), psi2_gpu.get() |
def export(self, query, **params):
"""Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to export.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(path_segment="export",
search=query,
**params).body | Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server. | Below is the the instruction that describes the task:
### Input:
Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
### Response:
def export(self, query, **params):
"""Runs a search and immediately starts streaming preview events.
This method returns a streaming handle to this job's events as an XML
document from the server. To parse this stream into usable Python objects,
pass the handle to :class:`splunklib.results.ResultsReader`::
import splunklib.client as client
import splunklib.results as results
service = client.connect(...)
rr = results.ResultsReader(service.jobs.export("search * | head 5"))
for result in rr:
if isinstance(result, results.Message):
# Diagnostic messages may be returned in the results
print '%s: %s' % (result.type, result.message)
elif isinstance(result, dict):
# Normal events are returned as dicts
print result
assert rr.is_preview == False
Running an export search is more efficient as it streams the results
directly to you, rather than having to write them out to disk and make
them available later. As soon as results are ready, you will receive
them.
The ``export`` method makes a single roundtrip to the server (as opposed
to two for :meth:`create` followed by :meth:`preview`), plus at most two
more if the ``autologin`` field of :func:`connect` is set to ``True``.
:raises `ValueError`: Raised for invalid queries.
:param query: The search query.
:type query: ``string``
:param params: Additional arguments (optional). For a list of valid
parameters, see `GET search/jobs/export
<http://docs/Documentation/Splunk/latest/RESTAPI/RESTsearch#search.2Fjobs.2Fexport>`_
in the REST API documentation.
:type params: ``dict``
:return: The ``InputStream`` IO handle to raw XML returned from the server.
"""
if "exec_mode" in params:
raise TypeError("Cannot specify an exec_mode to export.")
params['segmentation'] = params.get('segmentation', 'none')
return self.post(path_segment="export",
search=query,
**params).body |
def notify(self, value: Any) -> asyncio.Future:
""" Calling .emit(value) on all subscribers. A synchronouse subscriber
will just return None, a asynchronous one may returns a future. Futures
will be collected. If no future was returned None will be returned by
this method. If one futrue was returned that future will be returned.
When multiple futures were returned a gathered future will be returned.
:param value: value to be emitted to subscribers
:returns: a future if at least one subscriber has returned a future,
elsewise None
"""
results = (s.emit(value, who=self) for s in self._subscriptions)
futures = tuple(r for r in results if r is not None)
if not futures:
return None
if len(futures) == 1:
return futures[0] # return the received single future
return asyncio.gather(*futures) | Calling .emit(value) on all subscribers. A synchronouse subscriber
will just return None, a asynchronous one may returns a future. Futures
will be collected. If no future was returned None will be returned by
this method. If one futrue was returned that future will be returned.
When multiple futures were returned a gathered future will be returned.
:param value: value to be emitted to subscribers
:returns: a future if at least one subscriber has returned a future,
elsewise None | Below is the the instruction that describes the task:
### Input:
Calling .emit(value) on all subscribers. A synchronouse subscriber
will just return None, a asynchronous one may returns a future. Futures
will be collected. If no future was returned None will be returned by
this method. If one futrue was returned that future will be returned.
When multiple futures were returned a gathered future will be returned.
:param value: value to be emitted to subscribers
:returns: a future if at least one subscriber has returned a future,
elsewise None
### Response:
def notify(self, value: Any) -> asyncio.Future:
""" Calling .emit(value) on all subscribers. A synchronouse subscriber
will just return None, a asynchronous one may returns a future. Futures
will be collected. If no future was returned None will be returned by
this method. If one futrue was returned that future will be returned.
When multiple futures were returned a gathered future will be returned.
:param value: value to be emitted to subscribers
:returns: a future if at least one subscriber has returned a future,
elsewise None
"""
results = (s.emit(value, who=self) for s in self._subscriptions)
futures = tuple(r for r in results if r is not None)
if not futures:
return None
if len(futures) == 1:
return futures[0] # return the received single future
return asyncio.gather(*futures) |
def add_configuration(self, configuration, collect_another_source, done, result, src):
"""
Used to add a file to the configuration, result here is the yaml.load
of the src.
If the configuration we're reading in has ``harpoon.extra_files``
then this is treated as a list of strings of other files to collect.
We also take extra files to collector from result["images"]["__images_from__"]
"""
# Make sure to maintain the original config_root
if "config_root" in configuration:
# if we already have a config root then we only keep new config root if it's not the home location
# i.e. if it is the home configuration, we don't delete the new config_root
if configuration["config_root"] != os.path.dirname(self.home_dir_configuration_location()):
if "config_root" in result:
del result["config_root"]
config_root = configuration.get("config_root")
if config_root and src.startswith(config_root):
src = "{{config_root}}/{0}".format(src[len(config_root) + 1:])
if "images" in result and "__images_from__" in result["images"]:
images_from_path = result["images"]["__images_from__"]
if isinstance(images_from_path, six.string_types):
images_from_path = [images_from_path]
for ifp in images_from_path:
if not ifp.startswith("/"):
ifp = os.path.join(os.path.dirname(src), ifp)
if not os.path.exists(ifp) or not os.path.isdir(ifp):
raise self.BadConfigurationErrorKls(
"Specified folder for other configuration files points to a folder that doesn't exist"
, path="images.__images_from__"
, value=ifp
)
for root, dirs, files in os.walk(ifp):
for fle in files:
location = os.path.join(root, fle)
if fle.endswith(".yml") or fle.endswith(".yaml"):
collect_another_source(location
, prefix = ["images", os.path.splitext(os.path.basename(fle))[0]]
)
del result["images"]["__images_from__"]
configuration.update(result, source=src)
if "harpoon" in result:
if "extra_files" in result["harpoon"]:
spec = sb.listof(sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter))
config_root = {"config_root": result.get("config_root", configuration.get("config_root"))}
meta = Meta(MergedOptions.using(result, config_root), []).at("harpoon").at("extra_files")
for extra in spec.normalise(meta, result["harpoon"]["extra_files"]):
if os.path.abspath(extra) not in done:
if not os.path.exists(extra):
raise BadConfiguration("Specified extra file doesn't exist", extra=extra, source=src)
collect_another_source(extra) | Used to add a file to the configuration, result here is the yaml.load
of the src.
If the configuration we're reading in has ``harpoon.extra_files``
then this is treated as a list of strings of other files to collect.
We also take extra files to collector from result["images"]["__images_from__"] | Below is the the instruction that describes the task:
### Input:
Used to add a file to the configuration, result here is the yaml.load
of the src.
If the configuration we're reading in has ``harpoon.extra_files``
then this is treated as a list of strings of other files to collect.
We also take extra files to collector from result["images"]["__images_from__"]
### Response:
def add_configuration(self, configuration, collect_another_source, done, result, src):
"""
Used to add a file to the configuration, result here is the yaml.load
of the src.
If the configuration we're reading in has ``harpoon.extra_files``
then this is treated as a list of strings of other files to collect.
We also take extra files to collector from result["images"]["__images_from__"]
"""
# Make sure to maintain the original config_root
if "config_root" in configuration:
# if we already have a config root then we only keep new config root if it's not the home location
# i.e. if it is the home configuration, we don't delete the new config_root
if configuration["config_root"] != os.path.dirname(self.home_dir_configuration_location()):
if "config_root" in result:
del result["config_root"]
config_root = configuration.get("config_root")
if config_root and src.startswith(config_root):
src = "{{config_root}}/{0}".format(src[len(config_root) + 1:])
if "images" in result and "__images_from__" in result["images"]:
images_from_path = result["images"]["__images_from__"]
if isinstance(images_from_path, six.string_types):
images_from_path = [images_from_path]
for ifp in images_from_path:
if not ifp.startswith("/"):
ifp = os.path.join(os.path.dirname(src), ifp)
if not os.path.exists(ifp) or not os.path.isdir(ifp):
raise self.BadConfigurationErrorKls(
"Specified folder for other configuration files points to a folder that doesn't exist"
, path="images.__images_from__"
, value=ifp
)
for root, dirs, files in os.walk(ifp):
for fle in files:
location = os.path.join(root, fle)
if fle.endswith(".yml") or fle.endswith(".yaml"):
collect_another_source(location
, prefix = ["images", os.path.splitext(os.path.basename(fle))[0]]
)
del result["images"]["__images_from__"]
configuration.update(result, source=src)
if "harpoon" in result:
if "extra_files" in result["harpoon"]:
spec = sb.listof(sb.formatted(sb.string_spec(), formatter=MergedOptionStringFormatter))
config_root = {"config_root": result.get("config_root", configuration.get("config_root"))}
meta = Meta(MergedOptions.using(result, config_root), []).at("harpoon").at("extra_files")
for extra in spec.normalise(meta, result["harpoon"]["extra_files"]):
if os.path.abspath(extra) not in done:
if not os.path.exists(extra):
raise BadConfiguration("Specified extra file doesn't exist", extra=extra, source=src)
collect_another_source(extra) |
def draw(self, gdefs, theme):
"""
Draw out each guide definition
Parameters
----------
gdefs : list of guide_legend|guide_colorbar
guide definitions
theme : theme
Plot theme
Returns
-------
out : list of matplotlib.offsetbox.Offsetbox
A drawing of each legend
"""
for g in gdefs:
g.theme = theme
g._set_defaults()
return [g.draw() for g in gdefs] | Draw out each guide definition
Parameters
----------
gdefs : list of guide_legend|guide_colorbar
guide definitions
theme : theme
Plot theme
Returns
-------
out : list of matplotlib.offsetbox.Offsetbox
A drawing of each legend | Below is the the instruction that describes the task:
### Input:
Draw out each guide definition
Parameters
----------
gdefs : list of guide_legend|guide_colorbar
guide definitions
theme : theme
Plot theme
Returns
-------
out : list of matplotlib.offsetbox.Offsetbox
A drawing of each legend
### Response:
def draw(self, gdefs, theme):
"""
Draw out each guide definition
Parameters
----------
gdefs : list of guide_legend|guide_colorbar
guide definitions
theme : theme
Plot theme
Returns
-------
out : list of matplotlib.offsetbox.Offsetbox
A drawing of each legend
"""
for g in gdefs:
g.theme = theme
g._set_defaults()
return [g.draw() for g in gdefs] |
def fix_imports(script):
"""
Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path
"""
with open(script, 'r') as f_script:
lines = f_script.read().splitlines()
new_lines = []
for l in lines:
if l.startswith("import "):
l = "from . " + l
if "from PyQt5 import" in l:
l = l.replace("from PyQt5 import", "from pyqode.qt import")
new_lines.append(l)
with open(script, 'w') as f_script:
f_script.write("\n".join(new_lines)) | Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path | Below is the the instruction that describes the task:
### Input:
Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path
### Response:
def fix_imports(script):
"""
Replace "from PyQt5 import" by "from pyqode.qt import".
:param script: script path
"""
with open(script, 'r') as f_script:
lines = f_script.read().splitlines()
new_lines = []
for l in lines:
if l.startswith("import "):
l = "from . " + l
if "from PyQt5 import" in l:
l = l.replace("from PyQt5 import", "from pyqode.qt import")
new_lines.append(l)
with open(script, 'w') as f_script:
f_script.write("\n".join(new_lines)) |
def nx_graph_from_dotfile(filename: str) -> nx.DiGraph:
""" Get a networkx graph from a DOT file, and reverse the edges. """
return nx.DiGraph(read_dot(filename).reverse()) | Get a networkx graph from a DOT file, and reverse the edges. | Below is the the instruction that describes the task:
### Input:
Get a networkx graph from a DOT file, and reverse the edges.
### Response:
def nx_graph_from_dotfile(filename: str) -> nx.DiGraph:
""" Get a networkx graph from a DOT file, and reverse the edges. """
return nx.DiGraph(read_dot(filename).reverse()) |
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs) | r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions). | Below is the the instruction that describes the task:
### Input:
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
### Response:
def subplots(scale_x=None, scale_y=None, scale=None, **kwargs):
r'''
Run ``matplotlib.pyplot.subplots`` with ``figsize`` set to the correct multiple of the default.
:additional options:
**scale, scale_x, scale_y** (``<float>``)
Scale the figure-size (along one of the dimensions).
'''
if 'figsize' in kwargs: return plt.subplots(**kwargs)
width, height = mpl.rcParams['figure.figsize']
if scale is not None:
width *= scale
height *= scale
if scale_x is not None:
width *= scale_x
if scale_y is not None:
height *= scale_y
nrows = kwargs.pop('nrows', 1)
ncols = kwargs.pop('ncols', 1)
width = ncols * width
height = nrows * height
return plt.subplots(nrows=nrows, ncols=ncols, figsize=(width,height), **kwargs) |
def eglGetDisplay(display=EGL_DEFAULT_DISPLAY):
""" Connect to the EGL display server.
"""
res = _lib.eglGetDisplay(display)
if not res or res == EGL_NO_DISPLAY:
raise RuntimeError('Could not create display')
return res | Connect to the EGL display server. | Below is the the instruction that describes the task:
### Input:
Connect to the EGL display server.
### Response:
def eglGetDisplay(display=EGL_DEFAULT_DISPLAY):
""" Connect to the EGL display server.
"""
res = _lib.eglGetDisplay(display)
if not res or res == EGL_NO_DISPLAY:
raise RuntimeError('Could not create display')
return res |
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
"""
print 'Total Usage: %f compute seconds' % self.box_usage
cost = self.box_usage * 0.14
print 'Approximate Cost: $%f' % cost | Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate. | Below is the the instruction that describes the task:
### Input:
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
### Response:
def print_usage(self):
"""
Print the BoxUsage and approximate costs of all requests made on
this specific SDBConnection instance.
.. tip:: This can be out of date, and should only be treated as a
rough estimate. Also note that this estimate only applies to the
requests made on this specific connection instance. It is by
no means an account-wide estimate.
"""
print 'Total Usage: %f compute seconds' % self.box_usage
cost = self.box_usage * 0.14
print 'Approximate Cost: $%f' % cost |
def get_port_vendor_info(port=None):
""" Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' """
port_info_dict = dict((x[0], x[2]) for x in serial.tools.list_ports.comports())
return port_info_dict[port] if port is not None else port_info_dict | Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' | Below is the the instruction that describes the task:
### Input:
Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE'
### Response:
def get_port_vendor_info(port=None):
""" Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' """
port_info_dict = dict((x[0], x[2]) for x in serial.tools.list_ports.comports())
return port_info_dict[port] if port is not None else port_info_dict |
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop,
fs = 1.0, N_bump=5):
"""
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
# Initially make sure the number of taps is even so N_bump needs to be odd
if np.mod(n,2) != 0:
n += 1
N_taps = n
N_taps += N_bump
b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2,
maxiter = 25, grid_density = 16)
print('N_bump must be odd to maintain odd filter length')
print('Remez filter taps = %d.' % N_taps)
return b | Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018 | Below is the the instruction that describes the task:
### Input:
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
### Response:
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop,
fs = 1.0, N_bump=5):
"""
Design an FIR bandstop filter using remez with order
determination. The filter order is determined based on
f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the
desired passband ripple d_pass dB and stopband attenuation
d_stop dB all relative to a sampling rate of fs Hz.
Mark Wickert October 2016, updated October 2018
"""
n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2,
d_pass, d_stop, fsamp=fs)
# Bump up the order by N_bump to bring down the final d_pass & d_stop
# Initially make sure the number of taps is even so N_bump needs to be odd
if np.mod(n,2) != 0:
n += 1
N_taps = n
N_taps += N_bump
b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2,
maxiter = 25, grid_density = 16)
print('N_bump must be odd to maintain odd filter length')
print('Remez filter taps = %d.' % N_taps)
return b |
def encode_packet(packet: dict) -> str:
"""Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;'
"""
if packet['protocol'] == 'rfdebug':
return '10;RFDEBUG=' + packet['command'] + ';'
elif packet['protocol'] == 'rfudebug':
return '10;RFDEBUG=' + packet['command'] + ';'
else:
return SWITCH_COMMAND_TEMPLATE.format(
node=PacketHeader.master.value,
**packet
) | Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;' | Below is the the instruction that describes the task:
### Input:
Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;'
### Response:
def encode_packet(packet: dict) -> str:
"""Construct packet string from packet dictionary.
>>> encode_packet({
... 'protocol': 'newkaku',
... 'id': '000001',
... 'switch': '01',
... 'command': 'on',
... })
'10;newkaku;000001;01;on;'
"""
if packet['protocol'] == 'rfdebug':
return '10;RFDEBUG=' + packet['command'] + ';'
elif packet['protocol'] == 'rfudebug':
return '10;RFDEBUG=' + packet['command'] + ';'
else:
return SWITCH_COMMAND_TEMPLATE.format(
node=PacketHeader.master.value,
**packet
) |
def add_shellwidget(self, shellwidget):
"""
Register shell with variable explorer.
This function opens a new NamespaceBrowser for browsing the variables
in the shell.
"""
shellwidget_id = id(shellwidget)
if shellwidget_id not in self.shellwidgets:
self.options_button.setVisible(True)
nsb = NamespaceBrowser(self, options_button=self.options_button)
nsb.set_shellwidget(shellwidget)
nsb.setup(**self.get_settings())
nsb.sig_option_changed.connect(self.change_option)
nsb.sig_free_memory.connect(self.free_memory)
self.add_widget(nsb)
self.shellwidgets[shellwidget_id] = nsb
self.set_shellwidget_from_id(shellwidget_id)
return nsb | Register shell with variable explorer.
This function opens a new NamespaceBrowser for browsing the variables
in the shell. | Below is the the instruction that describes the task:
### Input:
Register shell with variable explorer.
This function opens a new NamespaceBrowser for browsing the variables
in the shell.
### Response:
def add_shellwidget(self, shellwidget):
"""
Register shell with variable explorer.
This function opens a new NamespaceBrowser for browsing the variables
in the shell.
"""
shellwidget_id = id(shellwidget)
if shellwidget_id not in self.shellwidgets:
self.options_button.setVisible(True)
nsb = NamespaceBrowser(self, options_button=self.options_button)
nsb.set_shellwidget(shellwidget)
nsb.setup(**self.get_settings())
nsb.sig_option_changed.connect(self.change_option)
nsb.sig_free_memory.connect(self.free_memory)
self.add_widget(nsb)
self.shellwidgets[shellwidget_id] = nsb
self.set_shellwidget_from_id(shellwidget_id)
return nsb |
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy() | A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1'). | Below is the the instruction that describes the task:
### Input:
A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
### Response:
def _producer_wrapper(f, port, addr='tcp://127.0.0.1'):
"""A shim that sets up a socket and starts the producer callable.
Parameters
----------
f : callable
Callable that takes a single argument, a handle
for a ZeroMQ PUSH socket. Must be picklable.
port : int
The port on which the socket should connect.
addr : str, optional
Address to which the socket should connect. Defaults
to localhost ('tcp://127.0.0.1').
"""
try:
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.connect(':'.join([addr, str(port)]))
f(socket)
finally:
# Works around a Python 3.x bug.
context.destroy() |
def create_address(kwargs=None, call=None):
'''
Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_address function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating an address.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified for the address.'
)
return False
name = kwargs['name']
ex_region = kwargs['region']
ex_address = kwargs.get("address", None)
kwargs['region'] = _expand_region(kwargs['region'])
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create address',
'salt/cloud/address/creating',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
addy = conn.ex_create_address(name, ex_region, ex_address)
__utils__['cloud.fire_event'](
'event',
'created address',
'salt/cloud/address/created',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Created GCE Address %s', name)
return _expand_address(addy) | Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP | Below is the the instruction that describes the task:
### Input:
Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP
### Response:
def create_address(kwargs=None, call=None):
'''
Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_address function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating an address.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified for the address.'
)
return False
name = kwargs['name']
ex_region = kwargs['region']
ex_address = kwargs.get("address", None)
kwargs['region'] = _expand_region(kwargs['region'])
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create address',
'salt/cloud/address/creating',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
addy = conn.ex_create_address(name, ex_region, ex_address)
__utils__['cloud.fire_event'](
'event',
'created address',
'salt/cloud/address/created',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Created GCE Address %s', name)
return _expand_address(addy) |
def _prepare_request_data(self, eopatch, bbox, time_interval):
""" Collects all parameters used for DataRequest, each one is taken either from initialization parameters or
from EOPatch
"""
service_type = ServiceType(self._get_parameter('service_type', eopatch))
if time_interval is None:
time_interval = self._get_parameter('time_interval', eopatch)
if service_type is ServiceType.WMS:
size_x_name, size_y_name = 'width', 'height'
else:
size_x_name, size_y_name = 'resx', 'resy'
return {
'layer': self.layer,
'bbox': bbox if bbox is not None else self._get_parameter('bbox', eopatch),
'time': time_interval,
'time_difference': self._get_parameter('time_difference', eopatch),
'maxcc': self._get_parameter('maxcc', eopatch),
'image_format': self.image_format,
'custom_url_params': self.custom_url_params,
'data_source': self.data_source,
'instance_id': self.instance_id,
size_x_name: self._get_parameter('size_x', eopatch),
size_y_name: self._get_parameter('size_y', eopatch)
}, service_type | Collects all parameters used for DataRequest, each one is taken either from initialization parameters or
from EOPatch | Below is the the instruction that describes the task:
### Input:
Collects all parameters used for DataRequest, each one is taken either from initialization parameters or
from EOPatch
### Response:
def _prepare_request_data(self, eopatch, bbox, time_interval):
""" Collects all parameters used for DataRequest, each one is taken either from initialization parameters or
from EOPatch
"""
service_type = ServiceType(self._get_parameter('service_type', eopatch))
if time_interval is None:
time_interval = self._get_parameter('time_interval', eopatch)
if service_type is ServiceType.WMS:
size_x_name, size_y_name = 'width', 'height'
else:
size_x_name, size_y_name = 'resx', 'resy'
return {
'layer': self.layer,
'bbox': bbox if bbox is not None else self._get_parameter('bbox', eopatch),
'time': time_interval,
'time_difference': self._get_parameter('time_difference', eopatch),
'maxcc': self._get_parameter('maxcc', eopatch),
'image_format': self.image_format,
'custom_url_params': self.custom_url_params,
'data_source': self.data_source,
'instance_id': self.instance_id,
size_x_name: self._get_parameter('size_x', eopatch),
size_y_name: self._get_parameter('size_y', eopatch)
}, service_type |
def init_db_conn(
connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
rpool = redis.ConnectionPool(
host=HOST, port=PORT, db=DB, password=PASSWORD)
r = redis.Redis(connection_pool=rpool)
redis_pool.connections[connection_name] = RedisClient(r) | Initialize a redis connection by each connection string
defined in the configuration file | Below is the the instruction that describes the task:
### Input:
Initialize a redis connection by each connection string
defined in the configuration file
### Response:
def init_db_conn(
connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None):
"""
Initialize a redis connection by each connection string
defined in the configuration file
"""
rpool = redis.ConnectionPool(
host=HOST, port=PORT, db=DB, password=PASSWORD)
r = redis.Redis(connection_pool=rpool)
redis_pool.connections[connection_name] = RedisClient(r) |
def readconfig(self, configpath=None):
"""
:param configpath: Optional bugzillarc path to read, instead of
the default list.
This function is called automatically from Bugzilla connect(), which
is called at __init__ if a URL is passed. Calling it manually is
just for passing in a non-standard configpath.
The locations for the bugzillarc file are preferred in this order:
~/.config/python-bugzilla/bugzillarc
~/.bugzillarc
/etc/bugzillarc
It has content like:
[bugzilla.yoursite.com]
user = username
password = password
Or
[bugzilla.yoursite.com]
api_key = key
The file can have multiple sections for different bugzilla instances.
A 'url' field in the [DEFAULT] section can be used to set a default
URL for the bugzilla command line tool.
Be sure to set appropriate permissions on bugzillarc if you choose to
store your password in it!
"""
cfg = _open_bugzillarc(configpath or self.configpath)
if not cfg:
return
section = ""
log.debug("bugzillarc: Searching for config section matching %s",
self.url)
def _parse_hostname(_u):
# If http://example.com is passed, netloc=example.com path=""
# If just example.com is passed, netloc="" path=example.com
parsedbits = urlparse(self.url)
return parsedbits.netloc or parsedbits.path
urlhost = _parse_hostname(self.url)
for sectionhost in sorted(cfg.sections()):
# If the section is just a hostname, make it match
# If the section has a / in it, do a substring match
if "/" not in sectionhost:
if sectionhost == urlhost:
section = sectionhost
elif sectionhost in self.url:
section = sectionhost
if section:
log.debug("bugzillarc: Found matching section: %s", section)
break
if not section:
log.debug("bugzillarc: No section found")
return
for key, val in cfg.items(section):
if key == "api_key":
log.debug("bugzillarc: setting api_key")
self.api_key = val
elif key == "user":
log.debug("bugzillarc: setting user=%s", val)
self.user = val
elif key == "password":
log.debug("bugzillarc: setting password")
self.password = val
elif key == "cert":
log.debug("bugzillarc: setting cert")
self.cert = val
else:
log.debug("bugzillarc: unknown key=%s", key) | :param configpath: Optional bugzillarc path to read, instead of
the default list.
This function is called automatically from Bugzilla connect(), which
is called at __init__ if a URL is passed. Calling it manually is
just for passing in a non-standard configpath.
The locations for the bugzillarc file are preferred in this order:
~/.config/python-bugzilla/bugzillarc
~/.bugzillarc
/etc/bugzillarc
It has content like:
[bugzilla.yoursite.com]
user = username
password = password
Or
[bugzilla.yoursite.com]
api_key = key
The file can have multiple sections for different bugzilla instances.
A 'url' field in the [DEFAULT] section can be used to set a default
URL for the bugzilla command line tool.
Be sure to set appropriate permissions on bugzillarc if you choose to
store your password in it! | Below is the the instruction that describes the task:
### Input:
:param configpath: Optional bugzillarc path to read, instead of
the default list.
This function is called automatically from Bugzilla connect(), which
is called at __init__ if a URL is passed. Calling it manually is
just for passing in a non-standard configpath.
The locations for the bugzillarc file are preferred in this order:
~/.config/python-bugzilla/bugzillarc
~/.bugzillarc
/etc/bugzillarc
It has content like:
[bugzilla.yoursite.com]
user = username
password = password
Or
[bugzilla.yoursite.com]
api_key = key
The file can have multiple sections for different bugzilla instances.
A 'url' field in the [DEFAULT] section can be used to set a default
URL for the bugzilla command line tool.
Be sure to set appropriate permissions on bugzillarc if you choose to
store your password in it!
### Response:
def readconfig(self, configpath=None):
"""
:param configpath: Optional bugzillarc path to read, instead of
the default list.
This function is called automatically from Bugzilla connect(), which
is called at __init__ if a URL is passed. Calling it manually is
just for passing in a non-standard configpath.
The locations for the bugzillarc file are preferred in this order:
~/.config/python-bugzilla/bugzillarc
~/.bugzillarc
/etc/bugzillarc
It has content like:
[bugzilla.yoursite.com]
user = username
password = password
Or
[bugzilla.yoursite.com]
api_key = key
The file can have multiple sections for different bugzilla instances.
A 'url' field in the [DEFAULT] section can be used to set a default
URL for the bugzilla command line tool.
Be sure to set appropriate permissions on bugzillarc if you choose to
store your password in it!
"""
cfg = _open_bugzillarc(configpath or self.configpath)
if not cfg:
return
section = ""
log.debug("bugzillarc: Searching for config section matching %s",
self.url)
def _parse_hostname(_u):
# If http://example.com is passed, netloc=example.com path=""
# If just example.com is passed, netloc="" path=example.com
parsedbits = urlparse(self.url)
return parsedbits.netloc or parsedbits.path
urlhost = _parse_hostname(self.url)
for sectionhost in sorted(cfg.sections()):
# If the section is just a hostname, make it match
# If the section has a / in it, do a substring match
if "/" not in sectionhost:
if sectionhost == urlhost:
section = sectionhost
elif sectionhost in self.url:
section = sectionhost
if section:
log.debug("bugzillarc: Found matching section: %s", section)
break
if not section:
log.debug("bugzillarc: No section found")
return
for key, val in cfg.items(section):
if key == "api_key":
log.debug("bugzillarc: setting api_key")
self.api_key = val
elif key == "user":
log.debug("bugzillarc: setting user=%s", val)
self.user = val
elif key == "password":
log.debug("bugzillarc: setting password")
self.password = val
elif key == "cert":
log.debug("bugzillarc: setting cert")
self.cert = val
else:
log.debug("bugzillarc: unknown key=%s", key) |
def write_input(self, atoms=None, properties=['energy'], system_changes=all_changes):
'''Write input file(s).'''
with work_dir(self.directory):
self.calc.write_input(self, atoms, properties, system_changes)
self.write_pbs_in(properties)
subprocess.call(self.copy_out_cmd % {
'ldir': self.directory,
'rdir': self.parameters['rdir'],
'user': self.parameters['user'],
'host': self.parameters['host']
}, shell=True) | Write input file(s). | Below is the the instruction that describes the task:
### Input:
Write input file(s).
### Response:
def write_input(self, atoms=None, properties=['energy'], system_changes=all_changes):
'''Write input file(s).'''
with work_dir(self.directory):
self.calc.write_input(self, atoms, properties, system_changes)
self.write_pbs_in(properties)
subprocess.call(self.copy_out_cmd % {
'ldir': self.directory,
'rdir': self.parameters['rdir'],
'user': self.parameters['user'],
'host': self.parameters['host']
}, shell=True) |
def with_slots(cls):
"""
Decorator for a class with _slots_. It automatically defines
the methods __eq__, __ne__, assert_equal.
"""
def _compare(self, other):
for slot in self.__class__._slots_:
attr = operator.attrgetter(slot)
source = attr(self)
target = attr(other)
if isinstance(source, numpy.ndarray):
eq = numpy.array_equal(source, target)
elif hasattr(source, '_slots_'):
source.assert_equal(target)
eq = True
else:
eq = source == target
yield slot, source, target, eq
def __eq__(self, other):
return all(eq for slot, source, target, eq in _compare(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def assert_equal(self, other, ignore=()):
for slot, source, target, eq in _compare(self, other):
if not eq and slot not in ignore:
raise AssertionError('slot %s: %s is different from %s' %
(slot, source, target))
cls._slots_ # raise an AttributeError for missing slots
cls.__eq__ = __eq__
cls.__ne__ = __ne__
cls.assert_equal = assert_equal
return cls | Decorator for a class with _slots_. It automatically defines
the methods __eq__, __ne__, assert_equal. | Below is the the instruction that describes the task:
### Input:
Decorator for a class with _slots_. It automatically defines
the methods __eq__, __ne__, assert_equal.
### Response:
def with_slots(cls):
"""
Decorator for a class with _slots_. It automatically defines
the methods __eq__, __ne__, assert_equal.
"""
def _compare(self, other):
for slot in self.__class__._slots_:
attr = operator.attrgetter(slot)
source = attr(self)
target = attr(other)
if isinstance(source, numpy.ndarray):
eq = numpy.array_equal(source, target)
elif hasattr(source, '_slots_'):
source.assert_equal(target)
eq = True
else:
eq = source == target
yield slot, source, target, eq
def __eq__(self, other):
return all(eq for slot, source, target, eq in _compare(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def assert_equal(self, other, ignore=()):
for slot, source, target, eq in _compare(self, other):
if not eq and slot not in ignore:
raise AssertionError('slot %s: %s is different from %s' %
(slot, source, target))
cls._slots_ # raise an AttributeError for missing slots
cls.__eq__ = __eq__
cls.__ne__ = __ne__
cls.assert_equal = assert_equal
return cls |
def fermion_avg(efermi, norm_hopping, func):
"""calcules for every slave it's average over the desired observable"""
if func == 'ekin':
func = bethe_ekin_zeroT
elif func == 'ocupation':
func = bethe_filling_zeroT
return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)]) | calcules for every slave it's average over the desired observable | Below is the the instruction that describes the task:
### Input:
calcules for every slave it's average over the desired observable
### Response:
def fermion_avg(efermi, norm_hopping, func):
"""calcules for every slave it's average over the desired observable"""
if func == 'ekin':
func = bethe_ekin_zeroT
elif func == 'ocupation':
func = bethe_filling_zeroT
return np.asarray([func(ef, tz) for ef, tz in zip(efermi, norm_hopping)]) |
def calculate_rate(country_code, postal_code, city):
"""
Calculates the VAT rate that should be collected based on address
information provided
:param country_code:
The two-character country code
:param postal_code:
The postal code for the user
:param city:
The city name for the user
:raises:
ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes.
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None])
"""
if not country_code or not isinstance(country_code, str_cls):
raise ValueError('Invalidly formatted country code')
country_code = country_code.strip()
if len(country_code) != 2:
raise ValueError('Invalidly formatted country code')
country_code = country_code.upper()
if country_code not in COUNTRIES_WITHOUT_POSTAL_CODES:
if not postal_code or not isinstance(postal_code, str_cls):
raise ValueError('Postal code is not a string')
if not city or not isinstance(city, str_cls):
raise ValueError('City is not a string')
if isinstance(postal_code, str_cls):
postal_code = re.sub('\\s+', '', postal_code)
postal_code = postal_code.upper()
# Remove the common european practice of adding the country code
# to the beginning of a postal code, followed by a dash
if len(postal_code) > 3 and postal_code[0:3] == country_code + '-':
postal_code = postal_code[3:]
postal_code = postal_code.replace('-', '')
city = city.lower().strip()
if country_code not in rates.BY_COUNTRY and country_code not in POSTAL_CODE_EXCEPTIONS:
return (Decimal('0.0'), country_code, None)
country_default = rates.BY_COUNTRY.get(country_code, {'rate': Decimal('0.0')})['rate']
if country_code not in POSTAL_CODE_EXCEPTIONS:
return (country_default, country_code, None)
exceptions = POSTAL_CODE_EXCEPTIONS[country_code]
for matcher in exceptions:
# Postal code-only match
if isinstance(matcher, str_cls):
postal_regex = matcher
city_regex = None
else:
postal_regex, city_regex = matcher
if not re.match(postal_regex, postal_code):
continue
if city_regex and not re.search(city_regex, city):
continue
mapped_country = exceptions[matcher]['country_code']
# There is at least one entry where we map to a different country,
# but are not mapping to an exception
if 'name' not in exceptions[matcher]:
country_code = mapped_country
country_default = rates.BY_COUNTRY[country_code]['rate']
break
mapped_name = exceptions[matcher]['name']
rate = rates.BY_COUNTRY[mapped_country]['exceptions'][mapped_name]
return (rate, mapped_country, mapped_name)
return (country_default, country_code, None) | Calculates the VAT rate that should be collected based on address
information provided
:param country_code:
The two-character country code
:param postal_code:
The postal code for the user
:param city:
The city name for the user
:raises:
ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes.
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None]) | Below is the the instruction that describes the task:
### Input:
Calculates the VAT rate that should be collected based on address
information provided
:param country_code:
The two-character country code
:param postal_code:
The postal code for the user
:param city:
The city name for the user
:raises:
ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes.
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None])
### Response:
def calculate_rate(country_code, postal_code, city):
"""
Calculates the VAT rate that should be collected based on address
information provided
:param country_code:
The two-character country code
:param postal_code:
The postal code for the user
:param city:
The city name for the user
:raises:
ValueError - If country code is not two characers, or postal_code or city are not strings. postal_code may be None or blank string for countries without postal codes.
:return:
A tuple of (Decimal percentage rate, country code, exception name [or None])
"""
if not country_code or not isinstance(country_code, str_cls):
raise ValueError('Invalidly formatted country code')
country_code = country_code.strip()
if len(country_code) != 2:
raise ValueError('Invalidly formatted country code')
country_code = country_code.upper()
if country_code not in COUNTRIES_WITHOUT_POSTAL_CODES:
if not postal_code or not isinstance(postal_code, str_cls):
raise ValueError('Postal code is not a string')
if not city or not isinstance(city, str_cls):
raise ValueError('City is not a string')
if isinstance(postal_code, str_cls):
postal_code = re.sub('\\s+', '', postal_code)
postal_code = postal_code.upper()
# Remove the common european practice of adding the country code
# to the beginning of a postal code, followed by a dash
if len(postal_code) > 3 and postal_code[0:3] == country_code + '-':
postal_code = postal_code[3:]
postal_code = postal_code.replace('-', '')
city = city.lower().strip()
if country_code not in rates.BY_COUNTRY and country_code not in POSTAL_CODE_EXCEPTIONS:
return (Decimal('0.0'), country_code, None)
country_default = rates.BY_COUNTRY.get(country_code, {'rate': Decimal('0.0')})['rate']
if country_code not in POSTAL_CODE_EXCEPTIONS:
return (country_default, country_code, None)
exceptions = POSTAL_CODE_EXCEPTIONS[country_code]
for matcher in exceptions:
# Postal code-only match
if isinstance(matcher, str_cls):
postal_regex = matcher
city_regex = None
else:
postal_regex, city_regex = matcher
if not re.match(postal_regex, postal_code):
continue
if city_regex and not re.search(city_regex, city):
continue
mapped_country = exceptions[matcher]['country_code']
# There is at least one entry where we map to a different country,
# but are not mapping to an exception
if 'name' not in exceptions[matcher]:
country_code = mapped_country
country_default = rates.BY_COUNTRY[country_code]['rate']
break
mapped_name = exceptions[matcher]['name']
rate = rates.BY_COUNTRY[mapped_country]['exceptions'][mapped_name]
return (rate, mapped_country, mapped_name)
return (country_default, country_code, None) |
def get_sizeof_descriptor_table(version="Denali"):
"""
Get sizeof DescriptorTable
"""
if version == "Denali":
return sizeof(DescriptorTableDenali)
elif version == "Spec20":
return sizeof(DescriptorTableSpec20)
elif version == "Spec12":
return 0
else:
raise RuntimeError("Error version!") | Get sizeof DescriptorTable | Below is the the instruction that describes the task:
### Input:
Get sizeof DescriptorTable
### Response:
def get_sizeof_descriptor_table(version="Denali"):
"""
Get sizeof DescriptorTable
"""
if version == "Denali":
return sizeof(DescriptorTableDenali)
elif version == "Spec20":
return sizeof(DescriptorTableSpec20)
elif version == "Spec12":
return 0
else:
raise RuntimeError("Error version!") |
def HasTable(self, table_name):
"""Determines if a specific table exists.
Args:
table_name (str): name of the table.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
"""
if not self._connection:
raise IOError('Not opened.')
if not table_name:
return False
if self._table_names is None:
self._table_names = []
self._cursor.execute(self._HAS_TABLE_QUERY)
for row in self._cursor.fetchall():
if not row[0]:
continue
row_table_name = row[0]
if isinstance(row_table_name, bytes):
row_table_name = row_table_name.decode('utf-8')
self._table_names.append(row_table_name.lower())
table_name = table_name.lower()
return table_name in self._table_names | Determines if a specific table exists.
Args:
table_name (str): name of the table.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened. | Below is the the instruction that describes the task:
### Input:
Determines if a specific table exists.
Args:
table_name (str): name of the table.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
### Response:
def HasTable(self, table_name):
"""Determines if a specific table exists.
Args:
table_name (str): name of the table.
Returns:
bool: True if the column exists.
Raises:
IOError: if the database file is not opened.
OSError: if the database file is not opened.
"""
if not self._connection:
raise IOError('Not opened.')
if not table_name:
return False
if self._table_names is None:
self._table_names = []
self._cursor.execute(self._HAS_TABLE_QUERY)
for row in self._cursor.fetchall():
if not row[0]:
continue
row_table_name = row[0]
if isinstance(row_table_name, bytes):
row_table_name = row_table_name.decode('utf-8')
self._table_names.append(row_table_name.lower())
table_name = table_name.lower()
return table_name in self._table_names |
def SetLineColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._linecolor = Color(color)
if isinstance(self, ROOT.TAttLine):
ROOT.TAttLine.SetLineColor(self, self._linecolor('root')) | *color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`. | Below is the the instruction that describes the task:
### Input:
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
### Response:
def SetLineColor(self, color):
"""
*color* may be any color understood by ROOT or matplotlib.
For full documentation of accepted *color* arguments, see
:class:`rootpy.plotting.style.Color`.
"""
self._linecolor = Color(color)
if isinstance(self, ROOT.TAttLine):
ROOT.TAttLine.SetLineColor(self, self._linecolor('root')) |
def _getCampaignDict():
"""Returns a dictionary specifying the details of all campaigns."""
global _campaign_dict_cache
if _campaign_dict_cache is None:
# All pointing parameters and dates are stored in a JSON file
fn = os.path.join(PACKAGEDIR, "data", "k2-campaign-parameters.json")
_campaign_dict_cache = json.load(open(fn))
return _campaign_dict_cache | Returns a dictionary specifying the details of all campaigns. | Below is the the instruction that describes the task:
### Input:
Returns a dictionary specifying the details of all campaigns.
### Response:
def _getCampaignDict():
"""Returns a dictionary specifying the details of all campaigns."""
global _campaign_dict_cache
if _campaign_dict_cache is None:
# All pointing parameters and dates are stored in a JSON file
fn = os.path.join(PACKAGEDIR, "data", "k2-campaign-parameters.json")
_campaign_dict_cache = json.load(open(fn))
return _campaign_dict_cache |
def part(self, channel, reason=''):
"""
Part a channel.
Required arguments:
* channel - Channel to part.
Optional arguments:
* reason='' - Reason for parting.
"""
with self.lock:
self.is_in_channel(channel)
self.send('PART %s :%s' % (channel, reason))
msg = self._recv(expected_replies=('PART',))
if msg[0] == 'PART':
del self.channels[msg[1]]
if not self.hide_called_events:
self.stepback() | Part a channel.
Required arguments:
* channel - Channel to part.
Optional arguments:
* reason='' - Reason for parting. | Below is the the instruction that describes the task:
### Input:
Part a channel.
Required arguments:
* channel - Channel to part.
Optional arguments:
* reason='' - Reason for parting.
### Response:
def part(self, channel, reason=''):
"""
Part a channel.
Required arguments:
* channel - Channel to part.
Optional arguments:
* reason='' - Reason for parting.
"""
with self.lock:
self.is_in_channel(channel)
self.send('PART %s :%s' % (channel, reason))
msg = self._recv(expected_replies=('PART',))
if msg[0] == 'PART':
del self.channels[msg[1]]
if not self.hide_called_events:
self.stepback() |
def from_array(self, coeffs, normalization='4pi', csphase=1, lmax=None,
copy=True):
"""
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
"""
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy) | Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array. | Below is the the instruction that describes the task:
### Input:
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
### Response:
def from_array(self, coeffs, normalization='4pi', csphase=1, lmax=None,
copy=True):
"""
Initialize the class with spherical harmonic coefficients from an input
array.
Usage
-----
x = SHCoeffs.from_array(array, [normalization, csphase, lmax, copy])
Returns
-------
x : SHCoeffs class instance.
Parameters
----------
array : ndarray, shape (2, lmaxin+1, lmaxin+1).
The input spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized,
orthonormalized, Schmidt semi-normalized, or unnormalized
coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax : int, optional, default = None
The maximum spherical harmonic degree to include in the returned
class instance. This must be less than or equal to lmaxin.
copy : bool, optional, default = True
If True, make a copy of array when initializing the class instance.
If False, initialize the class instance with a reference to array.
"""
if _np.iscomplexobj(coeffs):
kind = 'complex'
else:
kind = 'real'
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
lmaxin = coeffs.shape[1] - 1
if lmax is None:
lmax = lmaxin
else:
if lmax > lmaxin:
lmax = lmaxin
if normalization.lower() == 'unnorm' and lmax > 85:
_warnings.warn("Calculations using unnormalized coefficients " +
"are stable only for degrees less than or equal " +
"to 85. lmax for the coefficients will be set to " +
"85. Input value was {:d}.".format(lmax),
category=RuntimeWarning)
lmax = 85
for cls in self.__subclasses__():
if cls.istype(kind):
return cls(coeffs[:, 0:lmax+1, 0:lmax+1],
normalization=normalization.lower(),
csphase=csphase, copy=copy) |
def _create_index_file(
root_dir, location, image_files, dirs, force_no_processing=False):
"""
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
"""
# Put together HTML as a list of the lines we'll want to include
# Issue #2 exists to do this better than HTML in-code
header_text = \
'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]'
html = [
'<!DOCTYPE html>',
'<html>',
' <head>',
' <title>imageMe</title>'
' <style>',
' html, body {margin: 0;padding: 0;}',
' .header {text-align: right;}',
' .content {',
' padding: 3em;',
' padding-left: 4em;',
' padding-right: 4em;',
' }',
' .image {max-width: 100%; border-radius: 0.3em;}',
' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}',
' </style>',
' </head>',
' <body>',
' <div class="content">',
' <h2 class="header">' + header_text + '</h2>'
]
# Populate the present subdirectories - this includes '..' unless we're at
# the top level
directories = []
if root_dir != location:
directories = ['..']
directories += dirs
if len(directories) > 0:
html.append('<hr>')
# For each subdirectory, include a link to its index file
for directory in directories:
link = directory + '/' + INDEX_FILE_NAME
html += [
' <h3 class="header">',
' <a href="' + link + '">' + directory + '</a>',
' </h3>'
]
# Populate the image gallery table
# Counter to cycle down through table rows
table_row_count = 1
html += ['<hr>', '<table>']
# For each image file, potentially create a new <tr> and create a new <td>
for image_file in image_files:
if table_row_count == 1:
html.append('<tr>')
img_src = _get_thumbnail_src_from_file(
location, image_file, force_no_processing
)
link_target = _get_image_link_target_from_file(
location, image_file, force_no_processing
)
html += [
' <td>',
' <a href="' + link_target + '">',
' <img class="image" src="' + img_src + '">',
' </a>',
' </td>'
]
if table_row_count == IMAGES_PER_ROW:
table_row_count = 0
html.append('</tr>')
table_row_count += 1
html += ['</tr>', '</table>']
html += [
' </div>',
' </body>',
'</html>'
]
# Actually create the file, now we've put together the HTML content
index_file_path = _get_index_file_path(location)
print('Creating index file %s' % index_file_path)
index_file = open(index_file_path, 'w')
index_file.write('\n'.join(html))
index_file.close()
# Return the path for cleaning up later
return index_file_path | Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files. | Below is the the instruction that describes the task:
### Input:
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
### Response:
def _create_index_file(
root_dir, location, image_files, dirs, force_no_processing=False):
"""
Create an index file in the given location, supplying known lists of
present image files and subdirectories.
@param {String} root_dir - The root directory of the entire crawl. Used to
ascertain whether the given location is the top level.
@param {String} location - The current directory of the crawl. The index
file will be created here.
@param {[String]} image_files - A list of image file names in the location.
These will be displayed in the index file's gallery.
@param {[String]} dirs - The subdirectories of the location directory.
These will be displayed as links further down the file structure.
@param {Boolean=False} force_no_processing - If True, do not attempt to
actually process thumbnails, PIL images or anything. Simply index
<img> tags with original file src attributes.
@return {String} The full path (location plus filename) of the newly
created index file. Intended for usage cleaning up created files.
"""
# Put together HTML as a list of the lines we'll want to include
# Issue #2 exists to do this better than HTML in-code
header_text = \
'imageMe: ' + location + ' [' + str(len(image_files)) + ' image(s)]'
html = [
'<!DOCTYPE html>',
'<html>',
' <head>',
' <title>imageMe</title>'
' <style>',
' html, body {margin: 0;padding: 0;}',
' .header {text-align: right;}',
' .content {',
' padding: 3em;',
' padding-left: 4em;',
' padding-right: 4em;',
' }',
' .image {max-width: 100%; border-radius: 0.3em;}',
' td {width: ' + str(100.0 / IMAGES_PER_ROW) + '%;}',
' </style>',
' </head>',
' <body>',
' <div class="content">',
' <h2 class="header">' + header_text + '</h2>'
]
# Populate the present subdirectories - this includes '..' unless we're at
# the top level
directories = []
if root_dir != location:
directories = ['..']
directories += dirs
if len(directories) > 0:
html.append('<hr>')
# For each subdirectory, include a link to its index file
for directory in directories:
link = directory + '/' + INDEX_FILE_NAME
html += [
' <h3 class="header">',
' <a href="' + link + '">' + directory + '</a>',
' </h3>'
]
# Populate the image gallery table
# Counter to cycle down through table rows
table_row_count = 1
html += ['<hr>', '<table>']
# For each image file, potentially create a new <tr> and create a new <td>
for image_file in image_files:
if table_row_count == 1:
html.append('<tr>')
img_src = _get_thumbnail_src_from_file(
location, image_file, force_no_processing
)
link_target = _get_image_link_target_from_file(
location, image_file, force_no_processing
)
html += [
' <td>',
' <a href="' + link_target + '">',
' <img class="image" src="' + img_src + '">',
' </a>',
' </td>'
]
if table_row_count == IMAGES_PER_ROW:
table_row_count = 0
html.append('</tr>')
table_row_count += 1
html += ['</tr>', '</table>']
html += [
' </div>',
' </body>',
'</html>'
]
# Actually create the file, now we've put together the HTML content
index_file_path = _get_index_file_path(location)
print('Creating index file %s' % index_file_path)
index_file = open(index_file_path, 'w')
index_file.write('\n'.join(html))
index_file.close()
# Return the path for cleaning up later
return index_file_path |
def reset(project, user):
"""Reset system, delete all output files and prepare for a new run"""
d = Project.path(project, user) + "output"
if os.path.isdir(d):
shutil.rmtree(d)
os.makedirs(d)
else:
raise flask.abort(404)
if os.path.exists(Project.path(project, user) + ".done"):
os.unlink(Project.path(project, user) + ".done")
if os.path.exists(Project.path(project, user) + ".status"):
os.unlink(Project.path(project, user) + ".status") | Reset system, delete all output files and prepare for a new run | Below is the the instruction that describes the task:
### Input:
Reset system, delete all output files and prepare for a new run
### Response:
def reset(project, user):
"""Reset system, delete all output files and prepare for a new run"""
d = Project.path(project, user) + "output"
if os.path.isdir(d):
shutil.rmtree(d)
os.makedirs(d)
else:
raise flask.abort(404)
if os.path.exists(Project.path(project, user) + ".done"):
os.unlink(Project.path(project, user) + ".done")
if os.path.exists(Project.path(project, user) + ".status"):
os.unlink(Project.path(project, user) + ".status") |
def define_parameters(**parameters):
"""Get a list of parameters to pass to AWS boto call."""
params = []
for key, value in parameters.items():
param = dict(ParameterKey=key, ParameterValue=value)
params.append(param)
return params | Get a list of parameters to pass to AWS boto call. | Below is the the instruction that describes the task:
### Input:
Get a list of parameters to pass to AWS boto call.
### Response:
def define_parameters(**parameters):
"""Get a list of parameters to pass to AWS boto call."""
params = []
for key, value in parameters.items():
param = dict(ParameterKey=key, ParameterValue=value)
params.append(param)
return params |
def connect(self, pattern, presenter, **kwargs):
""" Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None
"""
if isinstance(presenter, type) and issubclass(presenter, WWebPresenter) is True:
self.presenter_collection().add(presenter)
presenter = presenter.__presenter_name__()
self.__route_map.connect(pattern, presenter, **kwargs) | Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None | Below is the the instruction that describes the task:
### Input:
Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None
### Response:
def connect(self, pattern, presenter, **kwargs):
""" Shortcut for self.route_map().connect() method. It is possible to pass presenter class instead of
its name - in that case such class will be saved in presenter collection and it will be available in
route matching.
:param pattern: same as pattern in :meth:`.WWebRouteMap.connect` method
:param presenter: presenter name or presenter class
:param kwargs: same as kwargs in :meth:`.WWebRouteMap.connect` method
:return: None
"""
if isinstance(presenter, type) and issubclass(presenter, WWebPresenter) is True:
self.presenter_collection().add(presenter)
presenter = presenter.__presenter_name__()
self.__route_map.connect(pattern, presenter, **kwargs) |
def delete_namespaced_daemon_set(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_daemon_set # noqa: E501
delete a DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | delete_namespaced_daemon_set # noqa: E501
delete a DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete_namespaced_daemon_set # noqa: E501
delete a DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_namespaced_daemon_set(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_daemon_set # noqa: E501
delete a DaemonSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_daemon_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DaemonSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_daemon_set_with_http_info(name, namespace, **kwargs) # noqa: E501
return data |
def search(self, q=''):
"""GET /v1/search"""
if q:
q = '?q=' + q
return self._http_call('/v1/search' + q, get) | GET /v1/search | Below is the the instruction that describes the task:
### Input:
GET /v1/search
### Response:
def search(self, q=''):
"""GET /v1/search"""
if q:
q = '?q=' + q
return self._http_call('/v1/search' + q, get) |
def sequences_to_string(self):
"""
Convert state indices to a string of characters
"""
return {k: ''.join(self.states[v]) for (k, v) in self.sequences.items()} | Convert state indices to a string of characters | Below is the the instruction that describes the task:
### Input:
Convert state indices to a string of characters
### Response:
def sequences_to_string(self):
"""
Convert state indices to a string of characters
"""
return {k: ''.join(self.states[v]) for (k, v) in self.sequences.items()} |
def render(template, **context):
'''
Render a template with uData frontend specifics
* Theme
'''
theme = current_app.config['THEME']
return render_theme_template(get_theme(theme), template, **context) | Render a template with uData frontend specifics
* Theme | Below is the the instruction that describes the task:
### Input:
Render a template with uData frontend specifics
* Theme
### Response:
def render(template, **context):
'''
Render a template with uData frontend specifics
* Theme
'''
theme = current_app.config['THEME']
return render_theme_template(get_theme(theme), template, **context) |
def main():
"""Main script handler.
Returns:
int: 0 for success, >1 error code
"""
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s')
try:
cli()
return 0
except LocationsError as error:
print(error)
return 2
except RuntimeError as error:
print(error)
return 255
except OSError as error:
return error.errno | Main script handler.
Returns:
int: 0 for success, >1 error code | Below is the the instruction that describes the task:
### Input:
Main script handler.
Returns:
int: 0 for success, >1 error code
### Response:
def main():
"""Main script handler.
Returns:
int: 0 for success, >1 error code
"""
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s')
try:
cli()
return 0
except LocationsError as error:
print(error)
return 2
except RuntimeError as error:
print(error)
return 255
except OSError as error:
return error.errno |
def sine_wave(frequency):
"""Emit a sine wave at the given frequency."""
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
return tf.sin(2 * math.pi * frequency * ts) | Emit a sine wave at the given frequency. | Below is the the instruction that describes the task:
### Input:
Emit a sine wave at the given frequency.
### Response:
def sine_wave(frequency):
"""Emit a sine wave at the given frequency."""
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
return tf.sin(2 * math.pi * frequency * ts) |
def blueprint(self, blueprint, **options):
'''Register a blueprint on the application.
:param blueprint: Blueprint object
:param options: option dictionary with blueprint defaults
:return: Nothing
'''
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint with the name "%s" is already registered. ' \
'Blueprint names must be unique.' % \
(blueprint.name,)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
blueprint.register(self, options) | Register a blueprint on the application.
:param blueprint: Blueprint object
:param options: option dictionary with blueprint defaults
:return: Nothing | Below is the the instruction that describes the task:
### Input:
Register a blueprint on the application.
:param blueprint: Blueprint object
:param options: option dictionary with blueprint defaults
:return: Nothing
### Response:
def blueprint(self, blueprint, **options):
'''Register a blueprint on the application.
:param blueprint: Blueprint object
:param options: option dictionary with blueprint defaults
:return: Nothing
'''
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint with the name "%s" is already registered. ' \
'Blueprint names must be unique.' % \
(blueprint.name,)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
blueprint.register(self, options) |
def ReadLink(path):
'''
Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link
'''
_AssertIsLocal(path)
if sys.platform != 'win32':
return os.readlink(path) # @UndefinedVariable
if not IsLink(path):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(path)
import jaraco.windows.filesystem
result = jaraco.windows.filesystem.readlink(path)
if '\\??\\' in result:
result = result.split('\\??\\')[1]
return result | Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link | Below is the the instruction that describes the task:
### Input:
Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link
### Response:
def ReadLink(path):
'''
Read the target of the symbolic link at `path`.
:param unicode path:
Path to a symbolic link
:returns unicode:
Target of a symbolic link
'''
_AssertIsLocal(path)
if sys.platform != 'win32':
return os.readlink(path) # @UndefinedVariable
if not IsLink(path):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(path)
import jaraco.windows.filesystem
result = jaraco.windows.filesystem.readlink(path)
if '\\??\\' in result:
result = result.split('\\??\\')[1]
return result |
def get_c_sources(folder, include_headers=False):
"""Find all C/C++ source files in the `folder` directory."""
allowed_extensions = [".c", ".C", ".cc", ".cpp", ".cxx", ".c++"]
if include_headers:
allowed_extensions += [".h", ".hpp"]
sources = []
for root, _, files in os.walk(folder):
for name in files:
ext = os.path.splitext(name)[1]
if name == "types.cc":
# Make sure `types.cc` is compiled first, as it has multiple
# useful static assertions.
sources.insert(0, os.path.join(root, name))
elif ext in allowed_extensions:
sources.append(os.path.join(root, name))
return sources | Find all C/C++ source files in the `folder` directory. | Below is the the instruction that describes the task:
### Input:
Find all C/C++ source files in the `folder` directory.
### Response:
def get_c_sources(folder, include_headers=False):
"""Find all C/C++ source files in the `folder` directory."""
allowed_extensions = [".c", ".C", ".cc", ".cpp", ".cxx", ".c++"]
if include_headers:
allowed_extensions += [".h", ".hpp"]
sources = []
for root, _, files in os.walk(folder):
for name in files:
ext = os.path.splitext(name)[1]
if name == "types.cc":
# Make sure `types.cc` is compiled first, as it has multiple
# useful static assertions.
sources.insert(0, os.path.join(root, name))
elif ext in allowed_extensions:
sources.append(os.path.join(root, name))
return sources |
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains | num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string | Below is the the instruction that describes the task:
### Input:
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
### Response:
def _netbsd_gpu_data():
'''
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
'''
known_vendors = ['nvidia', 'amd', 'ati', 'intel', 'cirrus logic', 'vmware', 'matrox', 'aspeed']
gpus = []
try:
pcictl_out = __salt__['cmd.run']('pcictl pci0 list')
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r'[0-9:]+ ({0}) (.+) \(VGA .+\)'.format(vendor),
line,
re.IGNORECASE
)
if vendor_match:
gpus.append({'vendor': vendor_match.group(1), 'model': vendor_match.group(2)})
except OSError:
pass
grains = {}
grains['num_gpus'] = len(gpus)
grains['gpus'] = gpus
return grains |
def resample(self, *args, **kwargs):
"""Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently.
"""
kwargs.setdefault('mask_area', False)
return super(EWAResampler, self).resample(*args, **kwargs) | Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently. | Below is the the instruction that describes the task:
### Input:
Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently.
### Response:
def resample(self, *args, **kwargs):
"""Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently.
"""
kwargs.setdefault('mask_area', False)
return super(EWAResampler, self).resample(*args, **kwargs) |
def rosen_nesterov(self, x, rho=100):
"""needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f | needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function" | Below is the the instruction that describes the task:
### Input:
needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
### Response:
def rosen_nesterov(self, x, rho=100):
"""needs exponential number of steps in a non-increasing f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f |
def get_conn(self):
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
"""
connections = self.get_connections(self.webhdfs_conn_id)
for connection in connections:
try:
self.log.debug('Trying namenode %s', connection.host)
client = self._get_client(connection)
client.status('/')
self.log.debug('Using namenode %s for hook', connection.host)
return client
except HdfsError as hdfs_error:
self.log.debug('Read operation on namenode %s failed with error: %s',
connection.host, hdfs_error)
hosts = [connection.host for connection in connections]
error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format(
hosts='\n'.join(hosts))
raise AirflowWebHDFSHookException(error_message) | Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient | Below is the the instruction that describes the task:
### Input:
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
### Response:
def get_conn(self):
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
:rtype: hdfs.InsecureClient or hdfs.ext.kerberos.KerberosClient
"""
connections = self.get_connections(self.webhdfs_conn_id)
for connection in connections:
try:
self.log.debug('Trying namenode %s', connection.host)
client = self._get_client(connection)
client.status('/')
self.log.debug('Using namenode %s for hook', connection.host)
return client
except HdfsError as hdfs_error:
self.log.debug('Read operation on namenode %s failed with error: %s',
connection.host, hdfs_error)
hosts = [connection.host for connection in connections]
error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format(
hosts='\n'.join(hosts))
raise AirflowWebHDFSHookException(error_message) |
def difference(self, *args):
"""
Take the difference between one array and a number of other arrays.
Only the elements present in just the first array will remain.
"""
setobj = set(self.obj)
for i, v in enumerate(args):
setobj = setobj - set(args[i])
return self._wrap(self._clean._toOriginal(setobj)) | Take the difference between one array and a number of other arrays.
Only the elements present in just the first array will remain. | Below is the the instruction that describes the task:
### Input:
Take the difference between one array and a number of other arrays.
Only the elements present in just the first array will remain.
### Response:
def difference(self, *args):
"""
Take the difference between one array and a number of other arrays.
Only the elements present in just the first array will remain.
"""
setobj = set(self.obj)
for i, v in enumerate(args):
setobj = setobj - set(args[i])
return self._wrap(self._clean._toOriginal(setobj)) |
def find_path(self, in_, out):
'''
Given an input and output TypeString, produce a graph traversal,
keeping in mind special options like Conversion Profiles, Preferred
Paths, and Direct Conversions.
'''
if in_.arguments:
raise ValueError('Cannot originate path in argumented TypeString')
# Determine conversion profile. This is either simply the output, OR,
# if a custom profile has been specified for this output, that custom
# path or type is used.
profile = self.conversion_profiles.get(str(out), str(out))
if isinstance(profile, str):
profile = (profile, )
types_by_format = {_format(s): TypeString(s) for s in profile}
# Normalize input and output types to string
in_str = str(in_)
out_str = _format(profile[0])
# First check for direct conversions, returning immediately if found
direct_converter = self.direct_converters.get((in_str, out_str))
if direct_converter:
out_ts = types_by_format.get(out_str, TypeString(out_str))
return [(direct_converter, TypeString(in_str), out_ts)]
# No direct conversions was found, so find path through graph.
# If profile was plural, add in extra steps.
path = self.dgraph.shortest_path(in_str, out_str)
path += profile[1:]
# Loop through each edge traversal, adding converters and type
# string pairs as we go along. This is to ensure conversion
# profiles that have arguments mid-profile get included.
results = []
for left, right in pair_looper(path):
converter = self.converters.get((_format(left), _format(right)))
right_typestring = types_by_format.get(right, TypeString(right))
results.append((converter, TypeString(left), right_typestring))
return results | Given an input and output TypeString, produce a graph traversal,
keeping in mind special options like Conversion Profiles, Preferred
Paths, and Direct Conversions. | Below is the the instruction that describes the task:
### Input:
Given an input and output TypeString, produce a graph traversal,
keeping in mind special options like Conversion Profiles, Preferred
Paths, and Direct Conversions.
### Response:
def find_path(self, in_, out):
'''
Given an input and output TypeString, produce a graph traversal,
keeping in mind special options like Conversion Profiles, Preferred
Paths, and Direct Conversions.
'''
if in_.arguments:
raise ValueError('Cannot originate path in argumented TypeString')
# Determine conversion profile. This is either simply the output, OR,
# if a custom profile has been specified for this output, that custom
# path or type is used.
profile = self.conversion_profiles.get(str(out), str(out))
if isinstance(profile, str):
profile = (profile, )
types_by_format = {_format(s): TypeString(s) for s in profile}
# Normalize input and output types to string
in_str = str(in_)
out_str = _format(profile[0])
# First check for direct conversions, returning immediately if found
direct_converter = self.direct_converters.get((in_str, out_str))
if direct_converter:
out_ts = types_by_format.get(out_str, TypeString(out_str))
return [(direct_converter, TypeString(in_str), out_ts)]
# No direct conversions was found, so find path through graph.
# If profile was plural, add in extra steps.
path = self.dgraph.shortest_path(in_str, out_str)
path += profile[1:]
# Loop through each edge traversal, adding converters and type
# string pairs as we go along. This is to ensure conversion
# profiles that have arguments mid-profile get included.
results = []
for left, right in pair_looper(path):
converter = self.converters.get((_format(left), _format(right)))
right_typestring = types_by_format.get(right, TypeString(right))
results.append((converter, TypeString(left), right_typestring))
return results |
def behavior_script(url, template_parameters=None, behaviors_dir=None):
'''
Returns the javascript behavior string populated with template_parameters.
'''
import re, logging, json
for behavior in behaviors(behaviors_dir=behaviors_dir):
if re.match(behavior['url_regex'], url):
parameters = dict()
if 'default_parameters' in behavior:
parameters.update(behavior['default_parameters'])
if template_parameters:
parameters.update(template_parameters)
template = jinja2_environment(behaviors_dir).get_template(
behavior['behavior_js_template'])
script = template.render(parameters)
logging.info(
'using template=%r populated with parameters=%r for %r',
behavior['behavior_js_template'], json.dumps(parameters), url)
return script
return None | Returns the javascript behavior string populated with template_parameters. | Below is the the instruction that describes the task:
### Input:
Returns the javascript behavior string populated with template_parameters.
### Response:
def behavior_script(url, template_parameters=None, behaviors_dir=None):
'''
Returns the javascript behavior string populated with template_parameters.
'''
import re, logging, json
for behavior in behaviors(behaviors_dir=behaviors_dir):
if re.match(behavior['url_regex'], url):
parameters = dict()
if 'default_parameters' in behavior:
parameters.update(behavior['default_parameters'])
if template_parameters:
parameters.update(template_parameters)
template = jinja2_environment(behaviors_dir).get_template(
behavior['behavior_js_template'])
script = template.render(parameters)
logging.info(
'using template=%r populated with parameters=%r for %r',
behavior['behavior_js_template'], json.dumps(parameters), url)
return script
return None |
def normalize_param(key, value):
"""Convert a set of key, value parameters into a dictionary suitable for
passing into requests. This will convert lists into the syntax required
by SoundCloud. Heavily lifted from HTTParty.
>>> normalize_param('playlist', {
... 'title': 'foo',
... 'sharing': 'private',
... 'tracks': [
... {id: 1234}, {id: 4567}
... ]}) == {
... u'playlist[tracks][][<built-in function id>]': [1234, 4567],
... u'playlist[sharing]': 'private',
... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS
True
>>> normalize_param('oauth_token', 'foo')
{'oauth_token': 'foo'}
>>> normalize_param('playlist[tracks]', [1234, 4567]) == {
... u'playlist[tracks][]': [1234, 4567]}
True
"""
params = {}
stack = []
if isinstance(value, list):
normalized = [normalize_param(u"{0[key]}[]".format(dict(key=key)), e) for e in value]
keys = [item for sublist in tuple(h.keys() for h in normalized) for item in sublist]
lists = {}
if len(keys) != len(set(keys)):
duplicates = [x for x, y in collections.Counter(keys).items() if y > 1]
for dup in duplicates:
lists[dup] = [h[dup] for h in normalized]
for h in normalized:
del h[dup]
params.update(dict((k, v) for d in normalized for (k, v) in d.items()))
params.update(lists)
elif isinstance(value, dict):
stack.append([key, value])
else:
params.update({key: value})
for (parent, hash) in stack:
for (key, value) in six.iteritems(hash):
if isinstance(value, dict):
stack.append([u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value])
else:
params.update(normalize_param(u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value))
return params | Convert a set of key, value parameters into a dictionary suitable for
passing into requests. This will convert lists into the syntax required
by SoundCloud. Heavily lifted from HTTParty.
>>> normalize_param('playlist', {
... 'title': 'foo',
... 'sharing': 'private',
... 'tracks': [
... {id: 1234}, {id: 4567}
... ]}) == {
... u'playlist[tracks][][<built-in function id>]': [1234, 4567],
... u'playlist[sharing]': 'private',
... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS
True
>>> normalize_param('oauth_token', 'foo')
{'oauth_token': 'foo'}
>>> normalize_param('playlist[tracks]', [1234, 4567]) == {
... u'playlist[tracks][]': [1234, 4567]}
True | Below is the the instruction that describes the task:
### Input:
Convert a set of key, value parameters into a dictionary suitable for
passing into requests. This will convert lists into the syntax required
by SoundCloud. Heavily lifted from HTTParty.
>>> normalize_param('playlist', {
... 'title': 'foo',
... 'sharing': 'private',
... 'tracks': [
... {id: 1234}, {id: 4567}
... ]}) == {
... u'playlist[tracks][][<built-in function id>]': [1234, 4567],
... u'playlist[sharing]': 'private',
... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS
True
>>> normalize_param('oauth_token', 'foo')
{'oauth_token': 'foo'}
>>> normalize_param('playlist[tracks]', [1234, 4567]) == {
... u'playlist[tracks][]': [1234, 4567]}
True
### Response:
def normalize_param(key, value):
"""Convert a set of key, value parameters into a dictionary suitable for
passing into requests. This will convert lists into the syntax required
by SoundCloud. Heavily lifted from HTTParty.
>>> normalize_param('playlist', {
... 'title': 'foo',
... 'sharing': 'private',
... 'tracks': [
... {id: 1234}, {id: 4567}
... ]}) == {
... u'playlist[tracks][][<built-in function id>]': [1234, 4567],
... u'playlist[sharing]': 'private',
... u'playlist[title]': 'foo'} # doctest:+ELLIPSIS
True
>>> normalize_param('oauth_token', 'foo')
{'oauth_token': 'foo'}
>>> normalize_param('playlist[tracks]', [1234, 4567]) == {
... u'playlist[tracks][]': [1234, 4567]}
True
"""
params = {}
stack = []
if isinstance(value, list):
normalized = [normalize_param(u"{0[key]}[]".format(dict(key=key)), e) for e in value]
keys = [item for sublist in tuple(h.keys() for h in normalized) for item in sublist]
lists = {}
if len(keys) != len(set(keys)):
duplicates = [x for x, y in collections.Counter(keys).items() if y > 1]
for dup in duplicates:
lists[dup] = [h[dup] for h in normalized]
for h in normalized:
del h[dup]
params.update(dict((k, v) for d in normalized for (k, v) in d.items()))
params.update(lists)
elif isinstance(value, dict):
stack.append([key, value])
else:
params.update({key: value})
for (parent, hash) in stack:
for (key, value) in six.iteritems(hash):
if isinstance(value, dict):
stack.append([u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value])
else:
params.update(normalize_param(u"{0[parent]}[{0[key]}]".format(dict(parent=parent, key=key)), value))
return params |
def get_stage(self, stage, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
'''
stage_id = self._get_stage_id(stage)
result = next((stage for stage in self.stages if stage['id'] == stage_id), None)
if result is None:
raise DXError('The stage ID ' + stage_id + ' could not be found')
return result | :param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow | Below is the the instruction that describes the task:
### Input:
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
### Response:
def get_stage(self, stage, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
'''
stage_id = self._get_stage_id(stage)
result = next((stage for stage in self.stages if stage['id'] == stage_id), None)
if result is None:
raise DXError('The stage ID ' + stage_id + ' could not be found')
return result |
def validate(self, command, args):
"""
Validates Parameters args for command specified by its name using defined schema.
If validation schema is not defined than the methods returns no errors.
It returns validation error if the command is not found.
:param command: the name of the command for which the 'args' must be validated.
:param args: the parameters (arguments) to validate.
:return: an array of ValidationResults. If no command is found by the given
name, then the returned array of ValidationResults will contain a
single entry, whose type will be ValidationResultType.Error.
"""
cref = self.find_command(command)
if cref == None:
results = []
results.append( \
ValidationResult(
None, ValidationResultType.Error,
"CMD_NOT_FOUND",
"Requested command does not exist"
)
)
return results
return cref.validate(args) | Validates Parameters args for command specified by its name using defined schema.
If validation schema is not defined than the methods returns no errors.
It returns validation error if the command is not found.
:param command: the name of the command for which the 'args' must be validated.
:param args: the parameters (arguments) to validate.
:return: an array of ValidationResults. If no command is found by the given
name, then the returned array of ValidationResults will contain a
single entry, whose type will be ValidationResultType.Error. | Below is the the instruction that describes the task:
### Input:
Validates Parameters args for command specified by its name using defined schema.
If validation schema is not defined than the methods returns no errors.
It returns validation error if the command is not found.
:param command: the name of the command for which the 'args' must be validated.
:param args: the parameters (arguments) to validate.
:return: an array of ValidationResults. If no command is found by the given
name, then the returned array of ValidationResults will contain a
single entry, whose type will be ValidationResultType.Error.
### Response:
def validate(self, command, args):
"""
Validates Parameters args for command specified by its name using defined schema.
If validation schema is not defined than the methods returns no errors.
It returns validation error if the command is not found.
:param command: the name of the command for which the 'args' must be validated.
:param args: the parameters (arguments) to validate.
:return: an array of ValidationResults. If no command is found by the given
name, then the returned array of ValidationResults will contain a
single entry, whose type will be ValidationResultType.Error.
"""
cref = self.find_command(command)
if cref == None:
results = []
results.append( \
ValidationResult(
None, ValidationResultType.Error,
"CMD_NOT_FOUND",
"Requested command does not exist"
)
)
return results
return cref.validate(args) |
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(jenkins_server,
Request(location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed"
" after polling the queue %d times",
self.max_try_before_job_appears) | This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job | Below is the the instruction that describes the task:
### Input:
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
### Response:
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(jenkins_server,
Request(location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed"
" after polling the queue %d times",
self.max_try_before_job_appears) |
def get_print_info(n_step, fill=None):
"""
Returns the max. number of digits in range(n_step) and the corresponding
format string.
Examples:
>>> get_print_info(11)
(2, '%2d')
>>> get_print_info(8)
(1, '%1d')
>>> get_print_info(100)
(2, '%2d')
>>> get_print_info(101)
(3, '%3d')
>>> get_print_info(101, fill='0')
(3, '%03d')
"""
if n_step > 1:
n_digit = int(nm.log10(n_step - 1) + 1)
if fill is None:
format = '%%%dd' % n_digit
else:
format = '%%%s%dd' % (fill, n_digit)
else:
n_digit, format = 0, None
return n_digit, format | Returns the max. number of digits in range(n_step) and the corresponding
format string.
Examples:
>>> get_print_info(11)
(2, '%2d')
>>> get_print_info(8)
(1, '%1d')
>>> get_print_info(100)
(2, '%2d')
>>> get_print_info(101)
(3, '%3d')
>>> get_print_info(101, fill='0')
(3, '%03d') | Below is the the instruction that describes the task:
### Input:
Returns the max. number of digits in range(n_step) and the corresponding
format string.
Examples:
>>> get_print_info(11)
(2, '%2d')
>>> get_print_info(8)
(1, '%1d')
>>> get_print_info(100)
(2, '%2d')
>>> get_print_info(101)
(3, '%3d')
>>> get_print_info(101, fill='0')
(3, '%03d')
### Response:
def get_print_info(n_step, fill=None):
"""
Returns the max. number of digits in range(n_step) and the corresponding
format string.
Examples:
>>> get_print_info(11)
(2, '%2d')
>>> get_print_info(8)
(1, '%1d')
>>> get_print_info(100)
(2, '%2d')
>>> get_print_info(101)
(3, '%3d')
>>> get_print_info(101, fill='0')
(3, '%03d')
"""
if n_step > 1:
n_digit = int(nm.log10(n_step - 1) + 1)
if fill is None:
format = '%%%dd' % n_digit
else:
format = '%%%s%dd' % (fill, n_digit)
else:
n_digit, format = 0, None
return n_digit, format |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.