code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def terminate(self, wait=False):
"""Terminate the process."""
if self.proc is not None:
self.proc.stdout.close()
try:
self.proc.terminate()
except ProcessLookupError:
pass
if wait:
self.proc.wait() | Terminate the process. | Below is the the instruction that describes the task:
### Input:
Terminate the process.
### Response:
def terminate(self, wait=False):
"""Terminate the process."""
if self.proc is not None:
self.proc.stdout.close()
try:
self.proc.terminate()
except ProcessLookupError:
pass
if wait:
self.proc.wait() |
def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name) | Compute any pipelines attached with eager=True. | Below is the the instruction that describes the task:
### Input:
Compute any pipelines attached with eager=True.
### Response:
def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name) |
def dispatch(self, event, *args, **kwargs):
"""Emits an event if there are any subscribers.
Args
event (String): The event to be emitted
args: Arguments linked with the event
kwargs: Named arguments linked with the event
"""
# No event, no subscribers.
if event not in self._subscribers:
return
for subscriber in self._subscribers[event]:
subscriber.callback(*args, **kwargs) | Emits an event if there are any subscribers.
Args
event (String): The event to be emitted
args: Arguments linked with the event
kwargs: Named arguments linked with the event | Below is the the instruction that describes the task:
### Input:
Emits an event if there are any subscribers.
Args
event (String): The event to be emitted
args: Arguments linked with the event
kwargs: Named arguments linked with the event
### Response:
def dispatch(self, event, *args, **kwargs):
"""Emits an event if there are any subscribers.
Args
event (String): The event to be emitted
args: Arguments linked with the event
kwargs: Named arguments linked with the event
"""
# No event, no subscribers.
if event not in self._subscribers:
return
for subscriber in self._subscribers[event]:
subscriber.callback(*args, **kwargs) |
def list_available_devices():
"""
List all available devices for the respective backend
returns: devices: a list of dictionaries with the keys 'identifier' and 'instance': \
[ {'identifier': 'usb://0x04f9:0x2015/C5Z315686', 'instance': pyusb.core.Device()}, ]
The 'identifier' is of the format idVendor:idProduct_iSerialNumber.
"""
class find_class(object):
def __init__(self, class_):
self._class = class_
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# ok, transverse all devices to find an interface that matches our class
for cfg in device:
# find_descriptor: what's it?
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
# only Brother printers
printers = usb.core.find(find_all=1, custom_match=find_class(7), idVendor=0x04f9)
def identifier(dev):
try:
serial = usb.util.get_string(dev, 256, dev.iSerialNumber)
return 'usb://0x{:04x}:0x{:04x}_{}'.format(dev.idVendor, dev.idProduct, serial)
except:
return 'usb://0x{:04x}:0x{:04x}'.format(dev.idVendor, dev.idProduct)
return [{'identifier': identifier(printer), 'instance': printer} for printer in printers] | List all available devices for the respective backend
returns: devices: a list of dictionaries with the keys 'identifier' and 'instance': \
[ {'identifier': 'usb://0x04f9:0x2015/C5Z315686', 'instance': pyusb.core.Device()}, ]
The 'identifier' is of the format idVendor:idProduct_iSerialNumber. | Below is the the instruction that describes the task:
### Input:
List all available devices for the respective backend
returns: devices: a list of dictionaries with the keys 'identifier' and 'instance': \
[ {'identifier': 'usb://0x04f9:0x2015/C5Z315686', 'instance': pyusb.core.Device()}, ]
The 'identifier' is of the format idVendor:idProduct_iSerialNumber.
### Response:
def list_available_devices():
"""
List all available devices for the respective backend
returns: devices: a list of dictionaries with the keys 'identifier' and 'instance': \
[ {'identifier': 'usb://0x04f9:0x2015/C5Z315686', 'instance': pyusb.core.Device()}, ]
The 'identifier' is of the format idVendor:idProduct_iSerialNumber.
"""
class find_class(object):
def __init__(self, class_):
self._class = class_
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# ok, transverse all devices to find an interface that matches our class
for cfg in device:
# find_descriptor: what's it?
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
# only Brother printers
printers = usb.core.find(find_all=1, custom_match=find_class(7), idVendor=0x04f9)
def identifier(dev):
try:
serial = usb.util.get_string(dev, 256, dev.iSerialNumber)
return 'usb://0x{:04x}:0x{:04x}_{}'.format(dev.idVendor, dev.idProduct, serial)
except:
return 'usb://0x{:04x}:0x{:04x}'.format(dev.idVendor, dev.idProduct)
return [{'identifier': identifier(printer), 'instance': printer} for printer in printers] |
def Tag(self, key, value):
""" Add a user-defined tag. """
if not self._tags:
self._tags = {}
self._tags[key] = value | Add a user-defined tag. | Below is the the instruction that describes the task:
### Input:
Add a user-defined tag.
### Response:
def Tag(self, key, value):
""" Add a user-defined tag. """
if not self._tags:
self._tags = {}
self._tags[key] = value |
def get_language_pack(locale):
"""Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale)
try:
with open(filename) as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack
except Exception:
# Assuming english, client side falls back on english
pass
return pack | Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords" | Below is the the instruction that describes the task:
### Input:
Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
### Response:
def get_language_pack(locale):
"""Get/cache a language pack
Returns the langugage pack from cache if it exists, caches otherwise
>>> get_language_pack('fr')['Dashboards']
"Tableaux de bords"
"""
pack = ALL_LANGUAGE_PACKS.get(locale)
if not pack:
filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale)
try:
with open(filename) as f:
pack = json.load(f)
ALL_LANGUAGE_PACKS[locale] = pack
except Exception:
# Assuming english, client side falls back on english
pass
return pack |
def authenticateUser(self, login, user, properties):
"""
Parameters:
- login
- user
- properties
"""
self.send_authenticateUser(login, user, properties)
return self.recv_authenticateUser() | Parameters:
- login
- user
- properties | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- user
- properties
### Response:
def authenticateUser(self, login, user, properties):
"""
Parameters:
- login
- user
- properties
"""
self.send_authenticateUser(login, user, properties)
return self.recv_authenticateUser() |
def write(self, message):
"""Write outgoing message."""
data = encode(message, compressed=self.compressed)
length = len(data)
data = self.__pack(length) + data
with self.__write_lock:
while data:
try:
n = os.write(self.out_d, data)
except OSError as why:
if why.errno in (errno.EPIPE, errno.EINVAL):
raise EOFError()
raise
if not n:
raise EOFError()
data = data[n:]
return length + self.packet | Write outgoing message. | Below is the the instruction that describes the task:
### Input:
Write outgoing message.
### Response:
def write(self, message):
"""Write outgoing message."""
data = encode(message, compressed=self.compressed)
length = len(data)
data = self.__pack(length) + data
with self.__write_lock:
while data:
try:
n = os.write(self.out_d, data)
except OSError as why:
if why.errno in (errno.EPIPE, errno.EINVAL):
raise EOFError()
raise
if not n:
raise EOFError()
data = data[n:]
return length + self.packet |
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record) | method that takes care of processing job records in STATE_EMBRYO state | Below is the the instruction that describes the task:
### Input:
method that takes care of processing job records in STATE_EMBRYO state
### Response:
def _process_state_embryo(self, job_record):
""" method that takes care of processing job records in STATE_EMBRYO state"""
start_timeperiod = self.compute_start_timeperiod(job_record.process_name, job_record.timeperiod)
end_timeperiod = self.compute_end_timeperiod(job_record.process_name, job_record.timeperiod)
self._compute_and_transfer_to_progress(job_record.process_name, start_timeperiod,
end_timeperiod, job_record) |
def on_trial_result(self, trial_runner, trial, result):
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if trial in self._stopped_trials:
assert not self._hard_stop
return TrialScheduler.CONTINUE # fall back to FIFO
time = result[self._time_attr]
self._results[trial].append(result)
median_result = self._get_median_result(time)
best_result = self._best_result(trial)
if self._verbose:
logger.info("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if best_result < median_result and time > self._grace_period:
if self._verbose:
logger.info("MedianStoppingRule: "
"early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE | Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`. | Below is the the instruction that describes the task:
### Input:
Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
### Response:
def on_trial_result(self, trial_runner, trial, result):
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if trial in self._stopped_trials:
assert not self._hard_stop
return TrialScheduler.CONTINUE # fall back to FIFO
time = result[self._time_attr]
self._results[trial].append(result)
median_result = self._get_median_result(time)
best_result = self._best_result(trial)
if self._verbose:
logger.info("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if best_result < median_result and time > self._grace_period:
if self._verbose:
logger.info("MedianStoppingRule: "
"early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE |
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = tf.broadcast_static_shape(s_shape, t.shape)
if tensorshape_util.is_fully_defined(s_shape):
return tensorshape_util.as_list(s_shape)
# Fallback on dynamic.
d_shape = tf.shape(input=tensors[0])
for t in tensors[1:]:
d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t))
return d_shape | Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`. | Below is the the instruction that describes the task:
### Input:
Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
### Response:
def get_broadcast_shape(*tensors):
"""Get broadcast shape as a Python list of integers (preferred) or `Tensor`.
Args:
*tensors: One or more `Tensor` objects (already converted!).
Returns:
broadcast shape: Python list (if shapes determined statically), otherwise
an `int32` `Tensor`.
"""
# Try static.
s_shape = tensors[0].shape
for t in tensors[1:]:
s_shape = tf.broadcast_static_shape(s_shape, t.shape)
if tensorshape_util.is_fully_defined(s_shape):
return tensorshape_util.as_list(s_shape)
# Fallback on dynamic.
d_shape = tf.shape(input=tensors[0])
for t in tensors[1:]:
d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t))
return d_shape |
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
hpat = sorted(hpat, key=lambda x: x.name)
return set(self._apply_ranges(pattern, hpat)) | finds hosts that postively match a particular pattern. Does not
take into account negative matches. | Below is the the instruction that describes the task:
### Input:
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
### Response:
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
hpat = sorted(hpat, key=lambda x: x.name)
return set(self._apply_ranges(pattern, hpat)) |
def from_out_edges(cls, vertices, edge_mapper):
"""
Create a DirectedGraph from a collection of vertices and
a mapping giving the vertices that each vertex is connected to.
"""
vertices = set(vertices)
edges = set()
heads = {}
tails = {}
# Number the edges arbitrarily.
edge_identifier = itertools.count()
for tail in vertices:
for head in edge_mapper[tail]:
edge = next(edge_identifier)
edges.add(edge)
heads[edge] = head
tails[edge] = tail
return cls._raw(
vertices=vertices,
edges=edges,
heads=heads,
tails=tails,
) | Create a DirectedGraph from a collection of vertices and
a mapping giving the vertices that each vertex is connected to. | Below is the the instruction that describes the task:
### Input:
Create a DirectedGraph from a collection of vertices and
a mapping giving the vertices that each vertex is connected to.
### Response:
def from_out_edges(cls, vertices, edge_mapper):
"""
Create a DirectedGraph from a collection of vertices and
a mapping giving the vertices that each vertex is connected to.
"""
vertices = set(vertices)
edges = set()
heads = {}
tails = {}
# Number the edges arbitrarily.
edge_identifier = itertools.count()
for tail in vertices:
for head in edge_mapper[tail]:
edge = next(edge_identifier)
edges.add(edge)
heads[edge] = head
tails[edge] = tail
return cls._raw(
vertices=vertices,
edges=edges,
heads=heads,
tails=tails,
) |
def from_bid(cls, value):
"""Create an instance of :class:`Decimal128` from Binary Integer
Decimal string.
:Parameters:
- `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating
point in Binary Integer Decimal (BID) format).
"""
if not isinstance(value, bytes):
raise TypeError("value must be an instance of bytes")
if len(value) != 16:
raise ValueError("value must be exactly 16 bytes")
return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) | Create an instance of :class:`Decimal128` from Binary Integer
Decimal string.
:Parameters:
- `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating
point in Binary Integer Decimal (BID) format). | Below is the the instruction that describes the task:
### Input:
Create an instance of :class:`Decimal128` from Binary Integer
Decimal string.
:Parameters:
- `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating
point in Binary Integer Decimal (BID) format).
### Response:
def from_bid(cls, value):
"""Create an instance of :class:`Decimal128` from Binary Integer
Decimal string.
:Parameters:
- `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating
point in Binary Integer Decimal (BID) format).
"""
if not isinstance(value, bytes):
raise TypeError("value must be an instance of bytes")
if len(value) != 16:
raise ValueError("value must be exactly 16 bytes")
return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) |
def update_value_map(layer, exposure_key=None):
"""Assign inasafe values according to definitions for a vector layer.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = assign_inasafe_values_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
keywords = layer.keywords
inasafe_fields = keywords['inasafe_fields']
classification = None
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
if not inasafe_fields.get(hazard_value_field['key']):
raise InvalidKeywordsForProcessingAlgorithm
old_field = hazard_value_field
new_field = hazard_class_field
classification = active_classification(layer.keywords, exposure_key)
elif keywords['layer_purpose'] == layer_purpose_exposure['key']:
if not inasafe_fields.get(exposure_type_field['key']):
raise InvalidKeywordsForProcessingAlgorithm
old_field = exposure_type_field
new_field = exposure_class_field
else:
raise InvalidKeywordsForProcessingAlgorithm
# It's a hazard layer
if exposure_key:
if not active_thresholds_value_maps(keywords, exposure_key):
raise InvalidKeywordsForProcessingAlgorithm
value_map = active_thresholds_value_maps(keywords, exposure_key)
# It's exposure layer
else:
if not keywords.get('value_map'):
raise InvalidKeywordsForProcessingAlgorithm
value_map = keywords.get('value_map')
unclassified_column = inasafe_fields[old_field['key']]
unclassified_index = layer.fields().lookupField(unclassified_column)
reversed_value_map = {}
for inasafe_class, values in list(value_map.items()):
for val in values:
reversed_value_map[val] = inasafe_class
classified_field = QgsField()
classified_field.setType(new_field['type'])
classified_field.setName(new_field['field_name'])
classified_field.setLength(new_field['length'])
classified_field.setPrecision(new_field['precision'])
layer.startEditing()
layer.addAttribute(classified_field)
classified_field_index = layer.fields(). \
lookupField(classified_field.name())
for feature in layer.getFeatures():
attributes = feature.attributes()
source_value = attributes[unclassified_index]
classified_value = reversed_value_map.get(source_value)
if not classified_value:
classified_value = ''
layer.changeAttributeValue(
feature.id(), classified_field_index, classified_value)
layer.commitChanges()
remove_fields(layer, [unclassified_column])
# We transfer keywords to the output.
# We add new class field
inasafe_fields[new_field['key']] = new_field['field_name']
# and we remove hazard value field
inasafe_fields.pop(old_field['key'])
layer.keywords = keywords
layer.keywords['inasafe_fields'] = inasafe_fields
if exposure_key:
value_map_key = 'value_maps'
else:
value_map_key = 'value_map'
if value_map_key in list(layer.keywords.keys()):
layer.keywords.pop(value_map_key)
layer.keywords['title'] = output_layer_name
if classification:
layer.keywords['classification'] = classification
check_layer(layer)
return layer | Assign inasafe values according to definitions for a vector layer.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Assign inasafe values according to definitions for a vector layer.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
### Response:
def update_value_map(layer, exposure_key=None):
"""Assign inasafe values according to definitions for a vector layer.
:param layer: The vector layer.
:type layer: QgsVectorLayer
:param exposure_key: The exposure key.
:type exposure_key: str
:return: The classified vector layer.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
"""
output_layer_name = assign_inasafe_values_steps['output_layer_name']
output_layer_name = output_layer_name % layer.keywords['layer_purpose']
keywords = layer.keywords
inasafe_fields = keywords['inasafe_fields']
classification = None
if keywords['layer_purpose'] == layer_purpose_hazard['key']:
if not inasafe_fields.get(hazard_value_field['key']):
raise InvalidKeywordsForProcessingAlgorithm
old_field = hazard_value_field
new_field = hazard_class_field
classification = active_classification(layer.keywords, exposure_key)
elif keywords['layer_purpose'] == layer_purpose_exposure['key']:
if not inasafe_fields.get(exposure_type_field['key']):
raise InvalidKeywordsForProcessingAlgorithm
old_field = exposure_type_field
new_field = exposure_class_field
else:
raise InvalidKeywordsForProcessingAlgorithm
# It's a hazard layer
if exposure_key:
if not active_thresholds_value_maps(keywords, exposure_key):
raise InvalidKeywordsForProcessingAlgorithm
value_map = active_thresholds_value_maps(keywords, exposure_key)
# It's exposure layer
else:
if not keywords.get('value_map'):
raise InvalidKeywordsForProcessingAlgorithm
value_map = keywords.get('value_map')
unclassified_column = inasafe_fields[old_field['key']]
unclassified_index = layer.fields().lookupField(unclassified_column)
reversed_value_map = {}
for inasafe_class, values in list(value_map.items()):
for val in values:
reversed_value_map[val] = inasafe_class
classified_field = QgsField()
classified_field.setType(new_field['type'])
classified_field.setName(new_field['field_name'])
classified_field.setLength(new_field['length'])
classified_field.setPrecision(new_field['precision'])
layer.startEditing()
layer.addAttribute(classified_field)
classified_field_index = layer.fields(). \
lookupField(classified_field.name())
for feature in layer.getFeatures():
attributes = feature.attributes()
source_value = attributes[unclassified_index]
classified_value = reversed_value_map.get(source_value)
if not classified_value:
classified_value = ''
layer.changeAttributeValue(
feature.id(), classified_field_index, classified_value)
layer.commitChanges()
remove_fields(layer, [unclassified_column])
# We transfer keywords to the output.
# We add new class field
inasafe_fields[new_field['key']] = new_field['field_name']
# and we remove hazard value field
inasafe_fields.pop(old_field['key'])
layer.keywords = keywords
layer.keywords['inasafe_fields'] = inasafe_fields
if exposure_key:
value_map_key = 'value_maps'
else:
value_map_key = 'value_map'
if value_map_key in list(layer.keywords.keys()):
layer.keywords.pop(value_map_key)
layer.keywords['title'] = output_layer_name
if classification:
layer.keywords['classification'] = classification
check_layer(layer)
return layer |
def marking_thru(self):
"""
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
"""
self_django_type = ContentType.objects.get_for_model(self)
return Marking2X.objects.filter(content_type__pk=self_django_type.id,
object_id=self.id) | Return the back-pointer to markings that
may have been attached via Django's content type mechanism. | Below is the the instruction that describes the task:
### Input:
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
### Response:
def marking_thru(self):
"""
Return the back-pointer to markings that
may have been attached via Django's content type mechanism.
"""
self_django_type = ContentType.objects.get_for_model(self)
return Marking2X.objects.filter(content_type__pk=self_django_type.id,
object_id=self.id) |
def byte_href_anchors(self, chars=False):
'''
simple, regex-based extractor of anchor tags, so we can
compute BYTE offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
'''
input_buffer = self.clean_html
if chars:
input_buffer = input_buffer.decode('utf8')
idx = 0
## split doc up into pieces that end on an anchor tag
parts = input_buffer.split('</a>')
assert len('</a>'.join(parts) ) == len(input_buffer)
for part in parts:
## try to get an A tag out:
m = anchors_re.match(part)
if not m:
idx += len(part) + 4
continue
before = m.group('before')
ahref = m.group('ahref')
## increment the index to get line number for the anchor
idx += len(before) + len(ahref)
first = idx
length = len(m.group('anchor'))
## update the index for the next loop
# include anchor plus the </a>
idx += length + 4
if chars:
yield m.group('href').encode('utf8'), first, length, m.group('anchor').encode('utf8')
else:
yield m.group('href'), first, length, m.group('anchor')
assert idx - 4 == len(input_buffer) | simple, regex-based extractor of anchor tags, so we can
compute BYTE offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text) | Below is the the instruction that describes the task:
### Input:
simple, regex-based extractor of anchor tags, so we can
compute BYTE offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
### Response:
def byte_href_anchors(self, chars=False):
'''
simple, regex-based extractor of anchor tags, so we can
compute BYTE offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
'''
input_buffer = self.clean_html
if chars:
input_buffer = input_buffer.decode('utf8')
idx = 0
## split doc up into pieces that end on an anchor tag
parts = input_buffer.split('</a>')
assert len('</a>'.join(parts) ) == len(input_buffer)
for part in parts:
## try to get an A tag out:
m = anchors_re.match(part)
if not m:
idx += len(part) + 4
continue
before = m.group('before')
ahref = m.group('ahref')
## increment the index to get line number for the anchor
idx += len(before) + len(ahref)
first = idx
length = len(m.group('anchor'))
## update the index for the next loop
# include anchor plus the </a>
idx += length + 4
if chars:
yield m.group('href').encode('utf8'), first, length, m.group('anchor').encode('utf8')
else:
yield m.group('href'), first, length, m.group('anchor')
assert idx - 4 == len(input_buffer) |
def __dict_to_pod_spec(spec):
'''
Converts a dictionary into kubernetes V1PodSpec instance.
'''
spec_obj = kubernetes.client.V1PodSpec()
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
return spec_obj | Converts a dictionary into kubernetes V1PodSpec instance. | Below is the the instruction that describes the task:
### Input:
Converts a dictionary into kubernetes V1PodSpec instance.
### Response:
def __dict_to_pod_spec(spec):
'''
Converts a dictionary into kubernetes V1PodSpec instance.
'''
spec_obj = kubernetes.client.V1PodSpec()
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
return spec_obj |
def get_columns_list(self):
"""
modified: removing the '_cls' column added by Mongoengine to support
mongodb document inheritance
cf. http://docs.mongoengine.org/apireference.html#documents:
"A Document subclass may be itself subclassed,
to create a specialised version of the document that will be
stored in the same collection.
To facilitate this behaviour a _cls field is added to documents
(hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of _cls set
allow_inheritance to False in the meta dictionary."
"""
columns = list(self.obj._fields.keys())
if "_cls" in columns:
columns.remove("_cls")
return columns | modified: removing the '_cls' column added by Mongoengine to support
mongodb document inheritance
cf. http://docs.mongoengine.org/apireference.html#documents:
"A Document subclass may be itself subclassed,
to create a specialised version of the document that will be
stored in the same collection.
To facilitate this behaviour a _cls field is added to documents
(hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of _cls set
allow_inheritance to False in the meta dictionary." | Below is the the instruction that describes the task:
### Input:
modified: removing the '_cls' column added by Mongoengine to support
mongodb document inheritance
cf. http://docs.mongoengine.org/apireference.html#documents:
"A Document subclass may be itself subclassed,
to create a specialised version of the document that will be
stored in the same collection.
To facilitate this behaviour a _cls field is added to documents
(hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of _cls set
allow_inheritance to False in the meta dictionary."
### Response:
def get_columns_list(self):
"""
modified: removing the '_cls' column added by Mongoengine to support
mongodb document inheritance
cf. http://docs.mongoengine.org/apireference.html#documents:
"A Document subclass may be itself subclassed,
to create a specialised version of the document that will be
stored in the same collection.
To facilitate this behaviour a _cls field is added to documents
(hidden though the MongoEngine interface).
To disable this behaviour and remove the dependence on the presence of _cls set
allow_inheritance to False in the meta dictionary."
"""
columns = list(self.obj._fields.keys())
if "_cls" in columns:
columns.remove("_cls")
return columns |
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords)) | Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates. | Below is the the instruction that describes the task:
### Input:
Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
### Response:
def merge(self, other):
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
else:
other_vars = getattr(other, 'variables', other)
coords = expand_and_merge_variables([self.variables, other_vars])
return Dataset._from_vars_and_coord_names(coords, set(coords)) |
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urlparse.urljoin("file:", urllib2.pathname2url(path))
return url | Convert a path to a file: URL. The path will be made absolute and have
quoted path parts. | Below is the the instruction that describes the task:
### Input:
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
### Response:
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urlparse.urljoin("file:", urllib2.pathname2url(path))
return url |
def get_network(self, org, segid):
"""Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network.
"""
network_info = {
'organizationName': org,
'partitionName': self._part_name,
'segmentId': segid,
}
res = self._get_network(network_info)
if res and res.status_code in self._resp_ok:
return res.json() | Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network. | Below is the the instruction that describes the task:
### Input:
Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network.
### Response:
def get_network(self, org, segid):
"""Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network.
"""
network_info = {
'organizationName': org,
'partitionName': self._part_name,
'segmentId': segid,
}
res = self._get_network(network_info)
if res and res.status_code in self._resp_ok:
return res.json() |
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper | Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result. | Below is the the instruction that describes the task:
### Input:
Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
### Response:
def _make_sync_method(name):
"""Helper to synthesize a synchronous method from an async method name.
Used by the @add_sync_methods class decorator below.
Args:
name: The name of the synchronous method.
Returns:
A method (with first argument 'self') that retrieves and calls
self.<name>, passing its own arguments, expects it to return a
Future, and then waits for and returns that Future's result.
"""
def sync_wrapper(self, *args, **kwds):
method = getattr(self, name)
future = method(*args, **kwds)
return future.get_result()
return sync_wrapper |
def obtain_token(self):
"""
Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL.
"""
token_end_points = ('token/obtain',
'obtain-token',
'obtain_token')
for end_point in token_end_points:
try:
return self.auth[end_point]._(page_size=None)['token']
except BeanBagException as e:
if e.response.status_code != 404:
raise
raise Exception('Could not obtain token from any known URL.') | Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL. | Below is the the instruction that describes the task:
### Input:
Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL.
### Response:
def obtain_token(self):
"""
Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL.
"""
token_end_points = ('token/obtain',
'obtain-token',
'obtain_token')
for end_point in token_end_points:
try:
return self.auth[end_point]._(page_size=None)['token']
except BeanBagException as e:
if e.response.status_code != 404:
raise
raise Exception('Could not obtain token from any known URL.') |
def parse_raml(self):
"""
Parse RAML file
"""
if utils.is_url(self.ramlfile):
raml = utils.download_file(self.ramlfile)
else:
with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f:
raml = raml_f.read()
loader = ramlfications.loads(raml)
config = ramlfications.setup_config(self.ramlconfig)
self.raml = ramlfications.parse_raml(loader, config) | Parse RAML file | Below is the the instruction that describes the task:
### Input:
Parse RAML file
### Response:
def parse_raml(self):
"""
Parse RAML file
"""
if utils.is_url(self.ramlfile):
raml = utils.download_file(self.ramlfile)
else:
with codecs.open(self.ramlfile, "rb", encoding="utf-8") as raml_f:
raml = raml_f.read()
loader = ramlfications.loads(raml)
config = ramlfications.setup_config(self.ramlconfig)
self.raml = ramlfications.parse_raml(loader, config) |
def from_dict(page, content_type=None):
"""
Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object.
"""
result = Page(items=page.get('items'),
page_number=page.get('pageNumber'),
page_size=page.get('pageSize'),
total_elements=page.get('totalElements'),
has_next=page.get('hasNext'))
if content_type is not None:
if not issubclass(content_type, ModelBase):
raise ValueError("'content_type' must be a subclass of ModelBase.")
result.items = [content_type.from_dict(item) for item in result.items]
return result | Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object. | Below is the the instruction that describes the task:
### Input:
Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object.
### Response:
def from_dict(page, content_type=None):
"""
Create a |Page| object from a dictionary. This method is intended for internal use, to construct a
|Page| object from the body of a response json from a paginated endpoint.
:param page: The dictionary.
:param content_type: The class that the contents should be deserialized into.
:return: The resulting |Page| object.
"""
result = Page(items=page.get('items'),
page_number=page.get('pageNumber'),
page_size=page.get('pageSize'),
total_elements=page.get('totalElements'),
has_next=page.get('hasNext'))
if content_type is not None:
if not issubclass(content_type, ModelBase):
raise ValueError("'content_type' must be a subclass of ModelBase.")
result.items = [content_type.from_dict(item) for item in result.items]
return result |
def getStatus(self):
"""Returns the charger's charge status, as a string"""
command = '$GS'
status = self.sendCommand(command)
return states[int(status[1])] | Returns the charger's charge status, as a string | Below is the the instruction that describes the task:
### Input:
Returns the charger's charge status, as a string
### Response:
def getStatus(self):
"""Returns the charger's charge status, as a string"""
command = '$GS'
status = self.sendCommand(command)
return states[int(status[1])] |
def is_open(self, refresh=False):
"""Get curtains state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh()
return self.get_level(refresh) > 0 | Get curtains state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions. | Below is the the instruction that describes the task:
### Input:
Get curtains state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
### Response:
def is_open(self, refresh=False):
"""Get curtains state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh()
return self.get_level(refresh) > 0 |
def name_of_associated_transcript(effect):
"""
Name of transcript associated with effect,
if there is one (otherwise return "").
"""
return apply_to_transcript_if_exists(
effect=effect,
fn=lambda t: t.name,
default="") | Name of transcript associated with effect,
if there is one (otherwise return ""). | Below is the the instruction that describes the task:
### Input:
Name of transcript associated with effect,
if there is one (otherwise return "").
### Response:
def name_of_associated_transcript(effect):
"""
Name of transcript associated with effect,
if there is one (otherwise return "").
"""
return apply_to_transcript_if_exists(
effect=effect,
fn=lambda t: t.name,
default="") |
def fileopenbox(msg=None
, title=None
, default="*"
, filetypes=None
):
"""
A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt"
"""
if sys.platform == 'darwin':
_bring_to_front()
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(default,filetypes)
#------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
#------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
f = tk_FileDialog.askopenfilename(parent=localRoot
, title=getFileDialogTitle(msg,title)
, initialdir=initialdir
, initialfile=initialfile
, filetypes=filetypes
)
localRoot.destroy()
if not f: return None
return os.path.normpath(f) | A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt" | Below is the the instruction that describes the task:
### Input:
A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt"
### Response:
def fileopenbox(msg=None
, title=None
, default="*"
, filetypes=None
):
"""
A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt"
"""
if sys.platform == 'darwin':
_bring_to_front()
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(default,filetypes)
#------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
#------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
f = tk_FileDialog.askopenfilename(parent=localRoot
, title=getFileDialogTitle(msg,title)
, initialdir=initialdir
, initialfile=initialfile
, filetypes=filetypes
)
localRoot.destroy()
if not f: return None
return os.path.normpath(f) |
def group_symbol(self):
"""Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str
"""
s_mapping = {symdata.index: symdata.symbol for symdata in self._symboldata_list}
return s_mapping[self.group_num] | Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str | Below is the the instruction that describes the task:
### Input:
Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str
### Response:
def group_symbol(self):
"""Current group symbol.
:getter: Returns current group symbol
:setter: Sets current group symbol
:type: str
"""
s_mapping = {symdata.index: symdata.symbol for symdata in self._symboldata_list}
return s_mapping[self.group_num] |
def params(self):
"""
:return: A dictionary of SSOS query parameters.
:rtype: dict
"""
params = dict(format=RESPONSE_FORMAT,
verbose=self.verbose,
epoch1=str(self.search_start_date),
epoch2=str(self.search_end_date),
search=self.orbit_method,
eunits=self.error_units,
eellipse=self.error_ellipse,
extres=self.resolve_extension,
xyres=self.resolve_position,
telinst=self.telescope_instrument)
if self.orbit_method == 'bynameHorizons':
params['object'] = NEW_LINE.join((str(target_name) for target_name in self.observations))
else:
params['obs'] = NEW_LINE.join((str(observation) for observation in self.observations))
return params | :return: A dictionary of SSOS query parameters.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
:return: A dictionary of SSOS query parameters.
:rtype: dict
### Response:
def params(self):
"""
:return: A dictionary of SSOS query parameters.
:rtype: dict
"""
params = dict(format=RESPONSE_FORMAT,
verbose=self.verbose,
epoch1=str(self.search_start_date),
epoch2=str(self.search_end_date),
search=self.orbit_method,
eunits=self.error_units,
eellipse=self.error_ellipse,
extres=self.resolve_extension,
xyres=self.resolve_position,
telinst=self.telescope_instrument)
if self.orbit_method == 'bynameHorizons':
params['object'] = NEW_LINE.join((str(target_name) for target_name in self.observations))
else:
params['obs'] = NEW_LINE.join((str(observation) for observation in self.observations))
return params |
def nexson_frag_write_newick(out,
edges,
nodes,
otu_group,
label_key,
leaf_labels,
root_id,
needs_quotes_pattern=NEWICK_NEEDING_QUOTING,
ingroup_id=None,
bracket_ingroup=False,
with_edge_lengths=True):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
unlabeled_counter = 0
curr_node_id = root_id
assert curr_node_id
curr_edge = None
curr_sib_list = []
curr_stack = []
going_tipward = True
while True:
if going_tipward:
outgoing_edges = edges.get(curr_node_id)
if outgoing_edges is None:
curr_node = nodes[curr_node_id]
assert curr_node_id is not None
assert curr_node_id is not None
unlabeled_counter = _write_newick_leaf_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
leaf_labels,
unlabeled_counter,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
going_tipward = False
else:
te = [(i, e) for i, e in outgoing_edges.items()]
te.sort() # produce a consistent rotation... Necessary?
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[pre-ingroup-marker]')
out.write('(')
next_p = te.pop(0)
curr_stack.append((curr_edge, curr_node_id, curr_sib_list))
curr_edge, curr_sib_list = next_p[1], te
curr_node_id = curr_edge['@target']
if not going_tipward:
next_up_edge_id = None
while True:
if curr_sib_list:
out.write(',')
next_up_edge_id, next_up_edge = curr_sib_list.pop(0)
break
if curr_stack:
curr_edge, curr_node_id, curr_sib_list = curr_stack.pop(-1)
curr_node = nodes[curr_node_id]
out.write(')')
_write_newick_internal_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[post-ingroup-marker]')
else:
break
if next_up_edge_id is None:
break
curr_edge = next_up_edge
curr_node_id = curr_edge['@target']
going_tipward = True
out.write(';') | `label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list | Below is the the instruction that describes the task:
### Input:
`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
### Response:
def nexson_frag_write_newick(out,
edges,
nodes,
otu_group,
label_key,
leaf_labels,
root_id,
needs_quotes_pattern=NEWICK_NEEDING_QUOTING,
ingroup_id=None,
bracket_ingroup=False,
with_edge_lengths=True):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
unlabeled_counter = 0
curr_node_id = root_id
assert curr_node_id
curr_edge = None
curr_sib_list = []
curr_stack = []
going_tipward = True
while True:
if going_tipward:
outgoing_edges = edges.get(curr_node_id)
if outgoing_edges is None:
curr_node = nodes[curr_node_id]
assert curr_node_id is not None
assert curr_node_id is not None
unlabeled_counter = _write_newick_leaf_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
leaf_labels,
unlabeled_counter,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
going_tipward = False
else:
te = [(i, e) for i, e in outgoing_edges.items()]
te.sort() # produce a consistent rotation... Necessary?
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[pre-ingroup-marker]')
out.write('(')
next_p = te.pop(0)
curr_stack.append((curr_edge, curr_node_id, curr_sib_list))
curr_edge, curr_sib_list = next_p[1], te
curr_node_id = curr_edge['@target']
if not going_tipward:
next_up_edge_id = None
while True:
if curr_sib_list:
out.write(',')
next_up_edge_id, next_up_edge = curr_sib_list.pop(0)
break
if curr_stack:
curr_edge, curr_node_id, curr_sib_list = curr_stack.pop(-1)
curr_node = nodes[curr_node_id]
out.write(')')
_write_newick_internal_label(out,
curr_node_id,
curr_node,
otu_group,
label_key,
needs_quotes_pattern)
if with_edge_lengths:
_write_newick_edge_len(out, curr_edge)
if bracket_ingroup and (ingroup_id == curr_node_id):
out.write('[post-ingroup-marker]')
else:
break
if next_up_edge_id is None:
break
curr_edge = next_up_edge
curr_node_id = curr_edge['@target']
going_tipward = True
out.write(';') |
def get_unspent_outputs(self):
"""Get the utxoset.
Returns:
generator of unspent_outputs.
"""
cursor = backend.query.get_unspent_outputs(self.connection)
return (record for record in cursor) | Get the utxoset.
Returns:
generator of unspent_outputs. | Below is the the instruction that describes the task:
### Input:
Get the utxoset.
Returns:
generator of unspent_outputs.
### Response:
def get_unspent_outputs(self):
"""Get the utxoset.
Returns:
generator of unspent_outputs.
"""
cursor = backend.query.get_unspent_outputs(self.connection)
return (record for record in cursor) |
def value(self):
r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}`
"""
from numpy_sugar.linalg import ddot, sum2diag
if self._cache["value"] is not None:
return self._cache["value"]
scale = exp(self.logscale)
delta = 1 / (1 + exp(-self.logitdelta))
v0 = scale * (1 - delta)
v1 = scale * delta
mu = self.eta / self.tau
n = len(mu)
if self._QS is None:
K = zeros((n, n))
else:
Q0 = self._QS[0][0]
S0 = self._QS[1]
K = dot(ddot(Q0, S0), Q0.T)
A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau)
m = mu - self.mean()
v = -n * log(2 * pi)
v -= slogdet(A)[1]
v -= dot(m, solve(A, m))
self._cache["value"] = v / 2
return self._cache["value"] | r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}` | Below is the the instruction that describes the task:
### Input:
r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}`
### Response:
def value(self):
r"""Log of the marginal likelihood.
Formally,
.. math::
- \frac{n}{2}\log{2\pi} - \frac{1}{2} \log{\left|
v_0 \mathrm K + v_1 \mathrm I + \tilde{\Sigma} \right|}
- \frac{1}{2}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)^{\intercal}
\left( v_0 \mathrm K + v_1 \mathrm I +
\tilde{\Sigma} \right)^{-1}
\left(\tilde{\boldsymbol\mu} -
\mathrm X\boldsymbol\beta\right)
Returns
-------
float
:math:`\log{p(\tilde{\boldsymbol\mu})}`
"""
from numpy_sugar.linalg import ddot, sum2diag
if self._cache["value"] is not None:
return self._cache["value"]
scale = exp(self.logscale)
delta = 1 / (1 + exp(-self.logitdelta))
v0 = scale * (1 - delta)
v1 = scale * delta
mu = self.eta / self.tau
n = len(mu)
if self._QS is None:
K = zeros((n, n))
else:
Q0 = self._QS[0][0]
S0 = self._QS[1]
K = dot(ddot(Q0, S0), Q0.T)
A = sum2diag(sum2diag(v0 * K, v1), 1 / self.tau)
m = mu - self.mean()
v = -n * log(2 * pi)
v -= slogdet(A)[1]
v -= dot(m, solve(A, m))
self._cache["value"] = v / 2
return self._cache["value"] |
def _get_data(self):
"""
Extracts the session data from cookie.
"""
cookie = self.adapter.cookies.get(self.name)
return self._deserialize(cookie) if cookie else {} | Extracts the session data from cookie. | Below is the the instruction that describes the task:
### Input:
Extracts the session data from cookie.
### Response:
def _get_data(self):
"""
Extracts the session data from cookie.
"""
cookie = self.adapter.cookies.get(self.name)
return self._deserialize(cookie) if cookie else {} |
def ANY_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for name in target_mentions:
for chain_ment in chain_mentions:
if name in chain_ment:
return True
return False | For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool: | Below is the the instruction that describes the task:
### Input:
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
### Response:
def ANY_mentions(target_mentions, chain_mentions):
'''
For each name string in the target_mentions list, searches through
all chain_mentions looking for any cleansed Token.token that
contains the name. Returns True if any of the target_mention
strings appeared as substrings of any cleansed Token.token.
Otherwise, returns False.
:type target_mentions: list of basestring
:type chain_mentions: list of basestring
:returns bool:
'''
for name in target_mentions:
for chain_ment in chain_mentions:
if name in chain_ment:
return True
return False |
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
Insert the language tabs.
"""
if self._has_translatable_model():
lang_code = self.get_form_language(request, obj)
lang = get_language_title(lang_code)
available_languages = self.get_available_languages(obj)
language_tabs = self.get_language_tabs(request, obj, available_languages)
context['language_tabs'] = language_tabs
if language_tabs:
context['title'] = '%s (%s)' % (context['title'], lang)
if not language_tabs.current_is_translated:
add = True # lets prepopulated_fields_js work.
# Patch form_url to contain the "language" GET parameter.
# Otherwise AdminModel.render_change_form will clean the URL
# and remove the "language" when coming from a filtered object
# list causing the wrong translation to be changed.
params = request.GET.dict()
params['language'] = lang_code
form_url = add_preserved_filters({
'preserved_filters': urlencode(params),
'opts': self.model._meta
}, form_url)
# django-fluent-pages uses the same technique
if 'default_change_form_template' not in context:
context['default_change_form_template'] = self.default_change_form_template
#context['base_template'] = self.get_change_form_base_template()
return super(TranslatableAdmin, self).render_change_form(request, context, add, change, form_url, obj) | Insert the language tabs. | Below is the the instruction that describes the task:
### Input:
Insert the language tabs.
### Response:
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
Insert the language tabs.
"""
if self._has_translatable_model():
lang_code = self.get_form_language(request, obj)
lang = get_language_title(lang_code)
available_languages = self.get_available_languages(obj)
language_tabs = self.get_language_tabs(request, obj, available_languages)
context['language_tabs'] = language_tabs
if language_tabs:
context['title'] = '%s (%s)' % (context['title'], lang)
if not language_tabs.current_is_translated:
add = True # lets prepopulated_fields_js work.
# Patch form_url to contain the "language" GET parameter.
# Otherwise AdminModel.render_change_form will clean the URL
# and remove the "language" when coming from a filtered object
# list causing the wrong translation to be changed.
params = request.GET.dict()
params['language'] = lang_code
form_url = add_preserved_filters({
'preserved_filters': urlencode(params),
'opts': self.model._meta
}, form_url)
# django-fluent-pages uses the same technique
if 'default_change_form_template' not in context:
context['default_change_form_template'] = self.default_change_form_template
#context['base_template'] = self.get_change_form_base_template()
return super(TranslatableAdmin, self).render_change_form(request, context, add, change, form_url, obj) |
def get_license(id):
"""
Get a specific License by either ID or fullname
"""
response = utils.checked_api_call(
pnc_api.licenses, 'get_specific', id= id)
if response:
return utils.format_json(response.content) | Get a specific License by either ID or fullname | Below is the the instruction that describes the task:
### Input:
Get a specific License by either ID or fullname
### Response:
def get_license(id):
"""
Get a specific License by either ID or fullname
"""
response = utils.checked_api_call(
pnc_api.licenses, 'get_specific', id= id)
if response:
return utils.format_json(response.content) |
def tree2rec(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1): # pragma: no cover
"""View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
warnings.warn("tree2rec is deprecated and will be removed in 5.0.0. "
"Instead use tree2array(...).view(np.recarray)",
DeprecationWarning)
return tree2array(tree,
branches=branches,
selection=selection,
object_selection=object_selection,
start=start,
stop=stop,
step=step,
include_weight=include_weight,
weight_name=weight_name,
cache_size=cache_size).view(np.recarray) | View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array | Below is the the instruction that describes the task:
### Input:
View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
### Response:
def tree2rec(tree,
branches=None,
selection=None,
object_selection=None,
start=None,
stop=None,
step=None,
include_weight=False,
weight_name='weight',
cache_size=-1): # pragma: no cover
"""View the result of :func:`tree2array` as a record array.
.. warning:: ``tree2rec`` is deprecated and will be removed in
release 5.0.0. Instead use ``tree2array(...).view(np.recarray)``.
Notes
-----
* This is equivalent to::
tree2array(treename, branches).view(np.recarray)
* Refer to the :ref:`type conversion table <conversion_table>`.
See Also
--------
tree2array
"""
warnings.warn("tree2rec is deprecated and will be removed in 5.0.0. "
"Instead use tree2array(...).view(np.recarray)",
DeprecationWarning)
return tree2array(tree,
branches=branches,
selection=selection,
object_selection=object_selection,
start=start,
stop=stop,
step=step,
include_weight=include_weight,
weight_name=weight_name,
cache_size=cache_size).view(np.recarray) |
def _get_mainchain(df, invert):
"""Return only main chain atom entries from a DataFrame"""
if invert:
mc = df[(df['atom_name'] != 'C') &
(df['atom_name'] != 'O') &
(df['atom_name'] != 'N') &
(df['atom_name'] != 'CA')]
else:
mc = df[(df['atom_name'] == 'C') |
(df['atom_name'] == 'O') |
(df['atom_name'] == 'N') |
(df['atom_name'] == 'CA')]
return mc | Return only main chain atom entries from a DataFrame | Below is the the instruction that describes the task:
### Input:
Return only main chain atom entries from a DataFrame
### Response:
def _get_mainchain(df, invert):
"""Return only main chain atom entries from a DataFrame"""
if invert:
mc = df[(df['atom_name'] != 'C') &
(df['atom_name'] != 'O') &
(df['atom_name'] != 'N') &
(df['atom_name'] != 'CA')]
else:
mc = df[(df['atom_name'] == 'C') |
(df['atom_name'] == 'O') |
(df['atom_name'] == 'N') |
(df['atom_name'] == 'CA')]
return mc |
def check_valid(money):
"""Determine if an instance of `Money` is valid.
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Raises:
ValueError: if the money instance is invalid
"""
if not isinstance(money, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))
currency = money.currencyCode
if not currency or len(currency) != 3:
raise ValueError(_MSG_3_LETTERS_LONG)
units = money.units
nanos = money.nanos
if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)):
raise ValueError(_MSG_UNITS_NANOS_MISMATCH)
if abs(nanos) > MAX_NANOS:
raise ValueError(_MSG_NANOS_OOB) | Determine if an instance of `Money` is valid.
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Raises:
ValueError: if the money instance is invalid | Below is the the instruction that describes the task:
### Input:
Determine if an instance of `Money` is valid.
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Raises:
ValueError: if the money instance is invalid
### Response:
def check_valid(money):
"""Determine if an instance of `Money` is valid.
Args:
money (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): the
instance to test
Raises:
ValueError: if the money instance is invalid
"""
if not isinstance(money, sc_messages.Money):
raise ValueError(u'Inputs should be of type %s' % (sc_messages.Money,))
currency = money.currencyCode
if not currency or len(currency) != 3:
raise ValueError(_MSG_3_LETTERS_LONG)
units = money.units
nanos = money.nanos
if ((units > 0) and (nanos < 0)) or ((units < 0) and (nanos > 0)):
raise ValueError(_MSG_UNITS_NANOS_MISMATCH)
if abs(nanos) > MAX_NANOS:
raise ValueError(_MSG_NANOS_OOB) |
def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, scale:float=1.35) -> Iterator[List[Tensor]]:
"Computes the outputs for several augmented inputs for TTA"
dl = learn.dl(ds_type)
ds = dl.dataset
old = ds.tfms
augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in
(crop_pad, flip_lr, dihedral, zoom)]
try:
pbar = master_bar(range(8))
for i in pbar:
row = 1 if i&1 else 0
col = 1 if i&2 else 0
flip = i&4
d = {'row_pct':row, 'col_pct':col, 'is_random':False}
tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)]
if flip: tfm.append(flip_lr(p=1.))
ds.tfms = tfm
yield get_preds(learn.model, dl, pbar=pbar, activ=_loss_func2activ(learn.loss_func))[0]
finally: ds.tfms = old | Computes the outputs for several augmented inputs for TTA | Below is the the instruction that describes the task:
### Input:
Computes the outputs for several augmented inputs for TTA
### Response:
def _tta_only(learn:Learner, ds_type:DatasetType=DatasetType.Valid, scale:float=1.35) -> Iterator[List[Tensor]]:
"Computes the outputs for several augmented inputs for TTA"
dl = learn.dl(ds_type)
ds = dl.dataset
old = ds.tfms
augm_tfm = [o for o in learn.data.train_ds.tfms if o.tfm not in
(crop_pad, flip_lr, dihedral, zoom)]
try:
pbar = master_bar(range(8))
for i in pbar:
row = 1 if i&1 else 0
col = 1 if i&2 else 0
flip = i&4
d = {'row_pct':row, 'col_pct':col, 'is_random':False}
tfm = [*augm_tfm, zoom(scale=scale, **d), crop_pad(**d)]
if flip: tfm.append(flip_lr(p=1.))
ds.tfms = tfm
yield get_preds(learn.model, dl, pbar=pbar, activ=_loss_func2activ(learn.loss_func))[0]
finally: ds.tfms = old |
def communicates(self,fromstate, tostate, maxlength=999999):
"""See if a node communicates (directly or indirectly) with another. Returns the probability of the *shortest* path (probably, but not necessarily the highest probability)"""
if (not (fromstate in self.nodes)) or (not (tostate in self.nodes)):
return 0
assert (fromstate != tostate)
def _test(node,length,prob):
if length > maxlength:
return 0
if node == tostate:
prob *= self.edges_out[node][tostate]
return True
for child in self.edges_out[node].keys():
if not child in visited:
visited.add(child)
if child == tostate:
return prob * self.edges_out[node][tostate]
else:
r = _test(child, length+1, prob * self.edges_out[node][tostate])
if r:
return r
return 0
visited = set(fromstate)
return _test(fromstate,1,1) | See if a node communicates (directly or indirectly) with another. Returns the probability of the *shortest* path (probably, but not necessarily the highest probability) | Below is the the instruction that describes the task:
### Input:
See if a node communicates (directly or indirectly) with another. Returns the probability of the *shortest* path (probably, but not necessarily the highest probability)
### Response:
def communicates(self,fromstate, tostate, maxlength=999999):
"""See if a node communicates (directly or indirectly) with another. Returns the probability of the *shortest* path (probably, but not necessarily the highest probability)"""
if (not (fromstate in self.nodes)) or (not (tostate in self.nodes)):
return 0
assert (fromstate != tostate)
def _test(node,length,prob):
if length > maxlength:
return 0
if node == tostate:
prob *= self.edges_out[node][tostate]
return True
for child in self.edges_out[node].keys():
if not child in visited:
visited.add(child)
if child == tostate:
return prob * self.edges_out[node][tostate]
else:
r = _test(child, length+1, prob * self.edges_out[node][tostate])
if r:
return r
return 0
visited = set(fromstate)
return _test(fromstate,1,1) |
def get_build_tool_version(self):
"""
Gets the build tool version to be used by zipalign from build.gradle file.
Returns:
A string containing the build tool version, default is 23.0.2.
"""
with open('%s/%s/build.gradle' % (self.path, self.src_folder)) as f:
for line in f.readlines():
if 'buildToolsVersion' in line:
matches = re.findall(r'buildToolsVersion \"(.+?)\"', line)
if len(matches) == 1:
return matches[0]
return config.build_tool_version | Gets the build tool version to be used by zipalign from build.gradle file.
Returns:
A string containing the build tool version, default is 23.0.2. | Below is the the instruction that describes the task:
### Input:
Gets the build tool version to be used by zipalign from build.gradle file.
Returns:
A string containing the build tool version, default is 23.0.2.
### Response:
def get_build_tool_version(self):
"""
Gets the build tool version to be used by zipalign from build.gradle file.
Returns:
A string containing the build tool version, default is 23.0.2.
"""
with open('%s/%s/build.gradle' % (self.path, self.src_folder)) as f:
for line in f.readlines():
if 'buildToolsVersion' in line:
matches = re.findall(r'buildToolsVersion \"(.+?)\"', line)
if len(matches) == 1:
return matches[0]
return config.build_tool_version |
def add_action(self, action, add_to_toolbar=True, add_to_legend=False):
"""Add a toolbar icon to the InaSAFE toolbar.
:param action: The action that should be added to the toolbar.
:type action: QAction
:param add_to_toolbar: Flag indicating whether the action should also
be added to the InaSAFE toolbar. Defaults to True.
:type add_to_toolbar: bool
:param add_to_legend: Flag indicating whether the action should also
be added to the layer legend menu. Default to False.
:type add_to_legend: bool
"""
# store in the class list of actions for easy plugin unloading
self.actions.append(action)
self.iface.addPluginToMenu(self.tr('InaSAFE'), action)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_legend:
# The id is the action name without spaces, tabs ...
self.iface.addCustomActionForLayerType(
action,
self.tr('InaSAFE'),
QgsMapLayer.VectorLayer,
True)
self.iface.addCustomActionForLayerType(
action,
self.tr('InaSAFE'),
QgsMapLayer.RasterLayer,
True) | Add a toolbar icon to the InaSAFE toolbar.
:param action: The action that should be added to the toolbar.
:type action: QAction
:param add_to_toolbar: Flag indicating whether the action should also
be added to the InaSAFE toolbar. Defaults to True.
:type add_to_toolbar: bool
:param add_to_legend: Flag indicating whether the action should also
be added to the layer legend menu. Default to False.
:type add_to_legend: bool | Below is the the instruction that describes the task:
### Input:
Add a toolbar icon to the InaSAFE toolbar.
:param action: The action that should be added to the toolbar.
:type action: QAction
:param add_to_toolbar: Flag indicating whether the action should also
be added to the InaSAFE toolbar. Defaults to True.
:type add_to_toolbar: bool
:param add_to_legend: Flag indicating whether the action should also
be added to the layer legend menu. Default to False.
:type add_to_legend: bool
### Response:
def add_action(self, action, add_to_toolbar=True, add_to_legend=False):
"""Add a toolbar icon to the InaSAFE toolbar.
:param action: The action that should be added to the toolbar.
:type action: QAction
:param add_to_toolbar: Flag indicating whether the action should also
be added to the InaSAFE toolbar. Defaults to True.
:type add_to_toolbar: bool
:param add_to_legend: Flag indicating whether the action should also
be added to the layer legend menu. Default to False.
:type add_to_legend: bool
"""
# store in the class list of actions for easy plugin unloading
self.actions.append(action)
self.iface.addPluginToMenu(self.tr('InaSAFE'), action)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_legend:
# The id is the action name without spaces, tabs ...
self.iface.addCustomActionForLayerType(
action,
self.tr('InaSAFE'),
QgsMapLayer.VectorLayer,
True)
self.iface.addCustomActionForLayerType(
action,
self.tr('InaSAFE'),
QgsMapLayer.RasterLayer,
True) |
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
"""Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
"""
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell | Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value. | Below is the the instruction that describes the task:
### Input:
Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
### Response:
def parse_cell(self, cell, coords, cell_mode=CellMode.cooked):
"""Tries to convert the value first to an int, then a float and if neither is
successful it returns the string value.
"""
try:
return int(cell)
except ValueError:
pass
try:
return float(cell)
except ValueError:
pass
# TODO Check for dates?
return cell |
def get_status(self):
"""
Get the current state of this topology.
The state values are from the topology.proto
RUNNING = 1, PAUSED = 2, KILLED = 3
if the state is None "Unknown" is returned.
"""
status = None
if self.physical_plan and self.physical_plan.topology:
status = self.physical_plan.topology.state
if status == 1:
return "Running"
elif status == 2:
return "Paused"
elif status == 3:
return "Killed"
else:
return "Unknown" | Get the current state of this topology.
The state values are from the topology.proto
RUNNING = 1, PAUSED = 2, KILLED = 3
if the state is None "Unknown" is returned. | Below is the the instruction that describes the task:
### Input:
Get the current state of this topology.
The state values are from the topology.proto
RUNNING = 1, PAUSED = 2, KILLED = 3
if the state is None "Unknown" is returned.
### Response:
def get_status(self):
"""
Get the current state of this topology.
The state values are from the topology.proto
RUNNING = 1, PAUSED = 2, KILLED = 3
if the state is None "Unknown" is returned.
"""
status = None
if self.physical_plan and self.physical_plan.topology:
status = self.physical_plan.topology.state
if status == 1:
return "Running"
elif status == 2:
return "Paused"
elif status == 3:
return "Killed"
else:
return "Unknown" |
def revs(self, branch='master', limit=None, skip=None, num_datapoints=None):
"""
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame
"""
if limit is not None:
limit = math.floor(float(limit) / len(self.repos))
if num_datapoints is not None:
num_datapoints = math.floor(float(num_datapoints) / len(self.repos))
df = pd.DataFrame(columns=['repository', 'rev'])
if _has_joblib:
ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(
delayed(_revs_func)
(x, branch, limit, skip, num_datapoints) for x in self.repos
)
for d in ds:
df = df.append(d)
else:
for repo in self.repos:
try:
revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
revs['repository'] = repo.repo_name
df = df.append(revs)
except GitCommandError:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
df.reset_index()
return df | Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame
### Response:
def revs(self, branch='master', limit=None, skip=None, num_datapoints=None):
"""
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns:
* date
* repository
* rev
:param branch: (optional, default 'master') the branch to work in
:param limit: (optional, default None), the maximum number of revisions to return, None for no limit
:param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping.
:param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used
:return: DataFrame
"""
if limit is not None:
limit = math.floor(float(limit) / len(self.repos))
if num_datapoints is not None:
num_datapoints = math.floor(float(num_datapoints) / len(self.repos))
df = pd.DataFrame(columns=['repository', 'rev'])
if _has_joblib:
ds = Parallel(n_jobs=-1, backend='threading', verbose=0)(
delayed(_revs_func)
(x, branch, limit, skip, num_datapoints) for x in self.repos
)
for d in ds:
df = df.append(d)
else:
for repo in self.repos:
try:
revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints)
revs['repository'] = repo.repo_name
df = df.append(revs)
except GitCommandError:
print('Warning! Repo: %s couldn\'t be inspected' % (repo, ))
df.reset_index()
return df |
def on_discovery_update(self, name, new_config):
"""
Once a Discovery is updated we update each associated Service to reset
its up/down status so that the next iteration of the `check_loop`
loop does the proper reporting again.
"""
for service in self.configurables[Service].values():
if service.discovery == name:
service.reset_status() | Once a Discovery is updated we update each associated Service to reset
its up/down status so that the next iteration of the `check_loop`
loop does the proper reporting again. | Below is the the instruction that describes the task:
### Input:
Once a Discovery is updated we update each associated Service to reset
its up/down status so that the next iteration of the `check_loop`
loop does the proper reporting again.
### Response:
def on_discovery_update(self, name, new_config):
"""
Once a Discovery is updated we update each associated Service to reset
its up/down status so that the next iteration of the `check_loop`
loop does the proper reporting again.
"""
for service in self.configurables[Service].values():
if service.discovery == name:
service.reset_status() |
def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words: around how many words the sentence should contain
:param variable_nb_words: set to false if you want exactly ``nb``
words returned, otherwise the result may include a number of words
of ``nb`` +/-40% (with a minimum of 1)
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: str
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = self.randomize_nb_elements(nb_words, min=1)
words = self.words(nb=nb_words, ext_word_list=ext_word_list)
words[0] = words[0].title()
return self.word_connector.join(words) + self.sentence_punctuation | Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words: around how many words the sentence should contain
:param variable_nb_words: set to false if you want exactly ``nb``
words returned, otherwise the result may include a number of words
of ``nb`` +/-40% (with a minimum of 1)
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words: around how many words the sentence should contain
:param variable_nb_words: set to false if you want exactly ``nb``
words returned, otherwise the result may include a number of words
of ``nb`` +/-40% (with a minimum of 1)
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: str
### Response:
def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):
"""
Generate a random sentence
:example 'Lorem ipsum dolor sit amet.'
:param nb_words: around how many words the sentence should contain
:param variable_nb_words: set to false if you want exactly ``nb``
words returned, otherwise the result may include a number of words
of ``nb`` +/-40% (with a minimum of 1)
:param ext_word_list: a list of words you would like to have instead of
'Lorem ipsum'.
:rtype: str
"""
if nb_words <= 0:
return ''
if variable_nb_words:
nb_words = self.randomize_nb_elements(nb_words, min=1)
words = self.words(nb=nb_words, ext_word_list=ext_word_list)
words[0] = words[0].title()
return self.word_connector.join(words) + self.sentence_punctuation |
def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) | Flushes the collected information | Below is the the instruction that describes the task:
### Input:
Flushes the collected information
### Response:
def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) |
def get_table_names(connection):
"""
Return a list of the table names in the database.
"""
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type == 'table'")
return [name for (name,) in cursor] | Return a list of the table names in the database. | Below is the the instruction that describes the task:
### Input:
Return a list of the table names in the database.
### Response:
def get_table_names(connection):
"""
Return a list of the table names in the database.
"""
cursor = connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type == 'table'")
return [name for (name,) in cursor] |
def scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None):
"""Scatter of group using representation of data Y.
"""
mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values
color = adata.uns[key + '_colors'][imask]
if not isinstance(color[0], str):
from matplotlib.colors import rgb2hex
color = rgb2hex(adata.uns[key + '_colors'][imask])
if not is_color_like(color):
raise ValueError('"{}" is not a valid matplotlib color.'.format(color))
data = [Y[mask, 0], Y[mask, 1]]
if projection == '3d': data.append(Y[mask, 2])
ax.scatter(*data,
marker='.',
alpha=alpha,
c=color,
edgecolors='none',
s=size,
label=adata.obs[key].cat.categories[imask],
rasterized=settings._vector_friendly)
return mask | Scatter of group using representation of data Y. | Below is the the instruction that describes the task:
### Input:
Scatter of group using representation of data Y.
### Response:
def scatter_group(ax, key, imask, adata, Y, projection='2d', size=3, alpha=None):
"""Scatter of group using representation of data Y.
"""
mask = adata.obs[key].cat.categories[imask] == adata.obs[key].values
color = adata.uns[key + '_colors'][imask]
if not isinstance(color[0], str):
from matplotlib.colors import rgb2hex
color = rgb2hex(adata.uns[key + '_colors'][imask])
if not is_color_like(color):
raise ValueError('"{}" is not a valid matplotlib color.'.format(color))
data = [Y[mask, 0], Y[mask, 1]]
if projection == '3d': data.append(Y[mask, 2])
ax.scatter(*data,
marker='.',
alpha=alpha,
c=color,
edgecolors='none',
s=size,
label=adata.obs[key].cat.categories[imask],
rasterized=settings._vector_friendly)
return mask |
def flatten(in_list):
"""given a list of values in_list, flatten returns the list obtained by
flattening the top-level elements of in_list."""
out_list = []
for val in in_list:
if isinstance(val, list):
out_list.extend(val)
else:
out_list.append(val)
return out_list | given a list of values in_list, flatten returns the list obtained by
flattening the top-level elements of in_list. | Below is the the instruction that describes the task:
### Input:
given a list of values in_list, flatten returns the list obtained by
flattening the top-level elements of in_list.
### Response:
def flatten(in_list):
"""given a list of values in_list, flatten returns the list obtained by
flattening the top-level elements of in_list."""
out_list = []
for val in in_list:
if isinstance(val, list):
out_list.extend(val)
else:
out_list.append(val)
return out_list |
def remove_node(self, node):
"""Removes a node and its attributes from the hypergraph. Removes
every hyperedge that contains this node in either the head or the tail.
:param node: reference to the node being added.
:raises: ValueError -- No such node exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> H.add_node("A", label="positive")
>>> H.remove_node("A")
"""
if not self.has_node(node):
raise ValueError("No such node exists.")
# Remove every hyperedge which is in the forward star of the node
forward_star = self.get_forward_star(node)
for hyperedge_id in forward_star:
self.remove_hyperedge(hyperedge_id)
# Remove every hyperedge which is in the backward star of the node
# but that is not also in the forward start of the node (to handle
# overlapping hyperedges)
backward_star = self.get_backward_star(node)
for hyperedge_id in backward_star - forward_star:
self.remove_hyperedge(hyperedge_id)
# Remove node's forward and backward star
del self._forward_star[node]
del self._backward_star[node]
# Remove node's attributes dictionary
del self._node_attributes[node] | Removes a node and its attributes from the hypergraph. Removes
every hyperedge that contains this node in either the head or the tail.
:param node: reference to the node being added.
:raises: ValueError -- No such node exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> H.add_node("A", label="positive")
>>> H.remove_node("A") | Below is the the instruction that describes the task:
### Input:
Removes a node and its attributes from the hypergraph. Removes
every hyperedge that contains this node in either the head or the tail.
:param node: reference to the node being added.
:raises: ValueError -- No such node exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> H.add_node("A", label="positive")
>>> H.remove_node("A")
### Response:
def remove_node(self, node):
"""Removes a node and its attributes from the hypergraph. Removes
every hyperedge that contains this node in either the head or the tail.
:param node: reference to the node being added.
:raises: ValueError -- No such node exists.
Examples:
::
>>> H = DirectedHypergraph()
>>> H.add_node("A", label="positive")
>>> H.remove_node("A")
"""
if not self.has_node(node):
raise ValueError("No such node exists.")
# Remove every hyperedge which is in the forward star of the node
forward_star = self.get_forward_star(node)
for hyperedge_id in forward_star:
self.remove_hyperedge(hyperedge_id)
# Remove every hyperedge which is in the backward star of the node
# but that is not also in the forward start of the node (to handle
# overlapping hyperedges)
backward_star = self.get_backward_star(node)
for hyperedge_id in backward_star - forward_star:
self.remove_hyperedge(hyperedge_id)
# Remove node's forward and backward star
del self._forward_star[node]
del self._backward_star[node]
# Remove node's attributes dictionary
del self._node_attributes[node] |
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self | Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees | Below is the the instruction that describes the task:
### Input:
Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
### Response:
def skew_x(self, x):
"""Skew element along the x-axis by the given angle.
Parameters
----------
x : float
x-axis skew angle in degrees
"""
self.root.set("transform", "%s skewX(%f)" %
(self.root.get("transform") or '', x))
return self |
def update(self, *args, **kwargs):
"""Update the last section record"""
self.augment_args(args, kwargs)
kwargs['log_action'] = kwargs.get('log_action', 'update')
if not self.rec:
return self.add(**kwargs)
else:
for k, v in kwargs.items():
# Don't update object; use whatever was set in the original record
if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'):
setattr(self.rec, k, v)
self._session.merge(self.rec)
if self._logger:
self._logger.info(self.rec.log_str)
self._session.commit()
self._ai_rec_id = None
return self.rec.id | Update the last section record | Below is the the instruction that describes the task:
### Input:
Update the last section record
### Response:
def update(self, *args, **kwargs):
"""Update the last section record"""
self.augment_args(args, kwargs)
kwargs['log_action'] = kwargs.get('log_action', 'update')
if not self.rec:
return self.add(**kwargs)
else:
for k, v in kwargs.items():
# Don't update object; use whatever was set in the original record
if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'):
setattr(self.rec, k, v)
self._session.merge(self.rec)
if self._logger:
self._logger.info(self.rec.log_str)
self._session.commit()
self._ai_rec_id = None
return self.rec.id |
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0) | operator_si operator. | Below is the the instruction that describes the task:
### Input:
operator_si operator.
### Response:
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0) |
def _download_cl(cls, filename):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
Selects the correct endpoint for non us-east support:
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
In eu-central-1 gof3r does not support new AWS signatures,
so we fall back to the standard AWS commandline interface:
https://github.com/rlmcpherson/s3gof3r/issues/45
"""
file_info = cls.parse_remote(filename)
region = cls.get_region(filename)
if region in REGIONS_NEWPERMS["s3"]:
return cls._cl_aws_cli(file_info, region)
else:
return cls._cl_gof3r(file_info, region) | Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
Selects the correct endpoint for non us-east support:
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
In eu-central-1 gof3r does not support new AWS signatures,
so we fall back to the standard AWS commandline interface:
https://github.com/rlmcpherson/s3gof3r/issues/45 | Below is the the instruction that describes the task:
### Input:
Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
Selects the correct endpoint for non us-east support:
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
In eu-central-1 gof3r does not support new AWS signatures,
so we fall back to the standard AWS commandline interface:
https://github.com/rlmcpherson/s3gof3r/issues/45
### Response:
def _download_cl(cls, filename):
"""Provide potentially streaming download from S3 using gof3r
or the AWS CLI.
Selects the correct endpoint for non us-east support:
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
In eu-central-1 gof3r does not support new AWS signatures,
so we fall back to the standard AWS commandline interface:
https://github.com/rlmcpherson/s3gof3r/issues/45
"""
file_info = cls.parse_remote(filename)
region = cls.get_region(filename)
if region in REGIONS_NEWPERMS["s3"]:
return cls._cl_aws_cli(file_info, region)
else:
return cls._cl_gof3r(file_info, region) |
def split_shard(self, project_name, logstore_name, shardId, split_hash):
""" split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId: the shard id
:type split_hash: string
:param split_hash: the internal hash between the shard begin and end hash
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {"action": "split", "key": split_hash}
resource = "/logstores/" + logstore_name + "/shards/" + str(shardId)
(resp, header) = self._send("POST", project_name, None, resource, params, headers)
return ListShardResponse(resp, header) | split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId: the shard id
:type split_hash: string
:param split_hash: the internal hash between the shard begin and end hash
:return: ListShardResponse
:raise: LogException | Below is the the instruction that describes the task:
### Input:
split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId: the shard id
:type split_hash: string
:param split_hash: the internal hash between the shard begin and end hash
:return: ListShardResponse
:raise: LogException
### Response:
def split_shard(self, project_name, logstore_name, shardId, split_hash):
""" split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId: the shard id
:type split_hash: string
:param split_hash: the internal hash between the shard begin and end hash
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {"action": "split", "key": split_hash}
resource = "/logstores/" + logstore_name + "/shards/" + str(shardId)
(resp, header) = self._send("POST", project_name, None, resource, params, headers)
return ListShardResponse(resp, header) |
def load_module(name, original_module):
"""
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
"""
module = ModuleType(name)
if PY3:
import importlib.util
spec = importlib.util.find_spec(original_module.__name__)
source = spec.loader.get_code(original_module.__name__)
else:
if getattr(sys, "frozen", False):
raise NotImplementedError("Can't load modules on Python 2 with PyInstaller")
path = original_module.__file__
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
with open(path) as f:
source = f.read()
exec_(source, module.__dict__, module.__dict__)
return module | Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module. | Below is the the instruction that describes the task:
### Input:
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
### Response:
def load_module(name, original_module):
"""
Load a copy of a module, distinct from what you'd get if you imported
it directly.
@param str name: The name of the new module.
@param original_module: The original module we're recreating.
@return: A new, distinct module.
"""
module = ModuleType(name)
if PY3:
import importlib.util
spec = importlib.util.find_spec(original_module.__name__)
source = spec.loader.get_code(original_module.__name__)
else:
if getattr(sys, "frozen", False):
raise NotImplementedError("Can't load modules on Python 2 with PyInstaller")
path = original_module.__file__
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
with open(path) as f:
source = f.read()
exec_(source, module.__dict__, module.__dict__)
return module |
def is_cursor_before(self, position, char_offset=0):
"""Return True if cursor is before *position*"""
position = self.get_position(position) + char_offset
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
if position < cursor.position():
cursor.setPosition(position)
return self.textCursor() < cursor | Return True if cursor is before *position* | Below is the the instruction that describes the task:
### Input:
Return True if cursor is before *position*
### Response:
def is_cursor_before(self, position, char_offset=0):
"""Return True if cursor is before *position*"""
position = self.get_position(position) + char_offset
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
if position < cursor.position():
cursor.setPosition(position)
return self.textCursor() < cursor |
def patch_module(module, name, replacement, original=UNSPECIFIED, aliases=True, location=None, **_bogus_options):
"""
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
"""
rollback = Rollback()
seen = False
original = getattr(module, name) if original is UNSPECIFIED else original
location = module.__name__ if hasattr(module, '__name__') else type(module).__module__
target = module.__name__ if hasattr(module, '__name__') else type(module).__name__
try:
replacement.__module__ = location
except (TypeError, AttributeError):
pass
for alias in dir(module):
logdebug("alias:%s (%s)", alias, name)
if hasattr(module, alias):
obj = getattr(module, alias)
logdebug("- %s:%s (%s)", obj, original, obj is original)
if obj is original:
if aliases or alias == name:
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
if alias == name:
seen = True
elif alias == name:
if ismethod(obj):
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
seen = True
else:
raise AssertionError("%s.%s = %s is not %s." % (module, alias, obj, original))
if not seen:
warnings.warn('Setting %s.%s to %s. There was no previous definition, probably patching the wrong module.' % (
target, name, replacement
))
logdebug("= saving %s on %s.%s ...", replacement, target, name)
setattr(module, name, replacement)
rollback.merge(lambda: setattr(module, name, original))
return rollback | Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object. | Below is the the instruction that describes the task:
### Input:
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
### Response:
def patch_module(module, name, replacement, original=UNSPECIFIED, aliases=True, location=None, **_bogus_options):
"""
Low-level attribute patcher.
:param module module: Object to patch.
:param str name: Attribute to patch
:param replacement: The replacement value.
:param original: The original value (in case the object beeing patched uses descriptors or is plain weird).
:param bool aliases: If ``True`` patch all the attributes that have the same original value.
:returns: An :obj:`aspectlib.Rollback` object.
"""
rollback = Rollback()
seen = False
original = getattr(module, name) if original is UNSPECIFIED else original
location = module.__name__ if hasattr(module, '__name__') else type(module).__module__
target = module.__name__ if hasattr(module, '__name__') else type(module).__name__
try:
replacement.__module__ = location
except (TypeError, AttributeError):
pass
for alias in dir(module):
logdebug("alias:%s (%s)", alias, name)
if hasattr(module, alias):
obj = getattr(module, alias)
logdebug("- %s:%s (%s)", obj, original, obj is original)
if obj is original:
if aliases or alias == name:
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
if alias == name:
seen = True
elif alias == name:
if ismethod(obj):
logdebug("= saving %s on %s.%s ...", replacement, target, alias)
setattr(module, alias, replacement)
rollback.merge(lambda alias=alias: setattr(module, alias, original))
seen = True
else:
raise AssertionError("%s.%s = %s is not %s." % (module, alias, obj, original))
if not seen:
warnings.warn('Setting %s.%s to %s. There was no previous definition, probably patching the wrong module.' % (
target, name, replacement
))
logdebug("= saving %s on %s.%s ...", replacement, target, name)
setattr(module, name, replacement)
rollback.merge(lambda: setattr(module, name, original))
return rollback |
def _bessel_ive(v, z, cache=None):
"""Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0."""
# TODO(b/67497980): Switch to a more numerically faithful implementation.
z = tf.convert_to_tensor(value=z)
wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v
))
if float(v) >= 2:
raise ValueError(
'Evaluating bessel_i by recurrence becomes imprecise for large v')
cache = cache or {}
safe_z = tf.where(z > 0, z, tf.ones_like(z))
if v in cache:
return wrap(cache[v])
if v == 0:
cache[v] = tf.math.bessel_i0e(z)
elif v == 1:
cache[v] = tf.math.bessel_i1e(z)
elif v == 0.5:
# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2
sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * sinhe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
elif v == -0.5:
# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2
coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * coshe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
if v <= 1:
return wrap(cache[v])
# Recurrence relation:
cache[v] = (_bessel_ive(v - 2, z, cache) -
(2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z)
return wrap(cache[v]) | Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0. | Below is the the instruction that describes the task:
### Input:
Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0.
### Response:
def _bessel_ive(v, z, cache=None):
"""Computes I_v(z)*exp(-abs(z)) using a recurrence relation, where z > 0."""
# TODO(b/67497980): Switch to a more numerically faithful implementation.
z = tf.convert_to_tensor(value=z)
wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v
))
if float(v) >= 2:
raise ValueError(
'Evaluating bessel_i by recurrence becomes imprecise for large v')
cache = cache or {}
safe_z = tf.where(z > 0, z, tf.ones_like(z))
if v in cache:
return wrap(cache[v])
if v == 0:
cache[v] = tf.math.bessel_i0e(z)
elif v == 1:
cache[v] = tf.math.bessel_i1e(z)
elif v == 0.5:
# sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2
sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * sinhe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
elif v == -0.5:
# cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2
coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2
cache[v] = (
np.sqrt(2 / np.pi) * coshe(z) *
tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z)))
if v <= 1:
return wrap(cache[v])
# Recurrence relation:
cache[v] = (_bessel_ive(v - 2, z, cache) -
(2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z)
return wrap(cache[v]) |
def create_api_deployment(restApiId, stageName, stageDescription='', description='', cacheClusterEnabled=False,
cacheClusterSize='0.5', variables=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates a new API deployment.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_deployent restApiId stagename stageDescription='' \\
description='' cacheClusterEnabled=True|False cacheClusterSize=0.5 variables='{"name": "value"}'
'''
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployment = conn.create_deployment(restApiId=restApiId, stageName=stageName,
stageDescription=stageDescription, description=description,
cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize,
variables=variables)
return {'created': True, 'deployment': _convert_datetime_str(deployment)}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | Creates a new API deployment.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_deployent restApiId stagename stageDescription='' \\
description='' cacheClusterEnabled=True|False cacheClusterSize=0.5 variables='{"name": "value"}' | Below is the the instruction that describes the task:
### Input:
Creates a new API deployment.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_deployent restApiId stagename stageDescription='' \\
description='' cacheClusterEnabled=True|False cacheClusterSize=0.5 variables='{"name": "value"}'
### Response:
def create_api_deployment(restApiId, stageName, stageDescription='', description='', cacheClusterEnabled=False,
cacheClusterSize='0.5', variables=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates a new API deployment.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_deployent restApiId stagename stageDescription='' \\
description='' cacheClusterEnabled=True|False cacheClusterSize=0.5 variables='{"name": "value"}'
'''
try:
variables = dict() if variables is None else variables
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
deployment = conn.create_deployment(restApiId=restApiId, stageName=stageName,
stageDescription=stageDescription, description=description,
cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize,
variables=variables)
return {'created': True, 'deployment': _convert_datetime_str(deployment)}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} |
def custom_image(self, user):
"""Returns the path to the custom image set for this game, or None if
no image is set"""
for ext in self.valid_custom_image_extensions():
image_location = self._custom_image_path(user, ext)
if os.path.isfile(image_location):
return image_location
return None | Returns the path to the custom image set for this game, or None if
no image is set | Below is the the instruction that describes the task:
### Input:
Returns the path to the custom image set for this game, or None if
no image is set
### Response:
def custom_image(self, user):
"""Returns the path to the custom image set for this game, or None if
no image is set"""
for ext in self.valid_custom_image_extensions():
image_location = self._custom_image_path(user, ext)
if os.path.isfile(image_location):
return image_location
return None |
def submodules(self):
"""
Property to return all sub-modules of the node, recursively.
Returns:
list of Module: the sub-modules.
"""
submodules = []
submodules.extend(self.modules)
for p in self.packages:
submodules.extend(p.submodules)
return submodules | Property to return all sub-modules of the node, recursively.
Returns:
list of Module: the sub-modules. | Below is the the instruction that describes the task:
### Input:
Property to return all sub-modules of the node, recursively.
Returns:
list of Module: the sub-modules.
### Response:
def submodules(self):
"""
Property to return all sub-modules of the node, recursively.
Returns:
list of Module: the sub-modules.
"""
submodules = []
submodules.extend(self.modules)
for p in self.packages:
submodules.extend(p.submodules)
return submodules |
def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):
"""Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict
"""
result = defaultdict(set)
for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):
result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])
return dict(result) | Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict
### Response:
def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):
"""Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict
"""
result = defaultdict(set)
for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):
result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])
return dict(result) |
def to_dict(self, data=True):
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool, optional
Whether to include the actual data in the dictionary. When set to
False, returns just the schema.
See also
--------
Dataset.from_dict
"""
d = {'coords': {}, 'attrs': decode_numpy_dict_values(self.attrs),
'dims': dict(self.dims), 'data_vars': {}}
for k in self.coords:
d['coords'].update({k: self[k].variable.to_dict(data=data)})
for k in self.data_vars:
d['data_vars'].update({k: self[k].variable.to_dict(data=data)})
return d | Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool, optional
Whether to include the actual data in the dictionary. When set to
False, returns just the schema.
See also
--------
Dataset.from_dict | Below is the the instruction that describes the task:
### Input:
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool, optional
Whether to include the actual data in the dictionary. When set to
False, returns just the schema.
See also
--------
Dataset.from_dict
### Response:
def to_dict(self, data=True):
"""
Convert this dataset to a dictionary following xarray naming
conventions.
Converts all variables and attributes to native Python objects
Useful for coverting to json. To avoid datetime incompatibility
use decode_times=False kwarg in xarrray.open_dataset.
Parameters
----------
data : bool, optional
Whether to include the actual data in the dictionary. When set to
False, returns just the schema.
See also
--------
Dataset.from_dict
"""
d = {'coords': {}, 'attrs': decode_numpy_dict_values(self.attrs),
'dims': dict(self.dims), 'data_vars': {}}
for k in self.coords:
d['coords'].update({k: self[k].variable.to_dict(data=data)})
for k in self.data_vars:
d['data_vars'].update({k: self[k].variable.to_dict(data=data)})
return d |
def OpenAndRead(relative_path='debugger-blacklist.yaml'):
"""Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function.
"""
# Note: This logic follows the convention established by source-context.json
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return Read(f)
except IOError:
return None | Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function. | Below is the the instruction that describes the task:
### Input:
Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function.
### Response:
def OpenAndRead(relative_path='debugger-blacklist.yaml'):
"""Attempts to find the yaml configuration file, then read it.
Args:
relative_path: Optional relative path override.
Returns:
A Config object if the open and read were successful, None if the file
does not exist (which is not considered an error).
Raises:
Error (some subclass): As thrown by the called Read() function.
"""
# Note: This logic follows the convention established by source-context.json
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return Read(f)
except IOError:
return None |
def or_having(self, column, operator=None, value=None):
"""
Add a "having" clause to the query
:param column: The column
:type column: str
:param operator: The having clause operator
:type operator: str
:param value: The having clause value
:type value: mixed
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
return self.having(column, operator, value, 'or') | Add a "having" clause to the query
:param column: The column
:type column: str
:param operator: The having clause operator
:type operator: str
:param value: The having clause value
:type value: mixed
:return: The current QueryBuilder instance
:rtype: QueryBuilder | Below is the the instruction that describes the task:
### Input:
Add a "having" clause to the query
:param column: The column
:type column: str
:param operator: The having clause operator
:type operator: str
:param value: The having clause value
:type value: mixed
:return: The current QueryBuilder instance
:rtype: QueryBuilder
### Response:
def or_having(self, column, operator=None, value=None):
"""
Add a "having" clause to the query
:param column: The column
:type column: str
:param operator: The having clause operator
:type operator: str
:param value: The having clause value
:type value: mixed
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
return self.having(column, operator, value, 'or') |
def update(customer, **data):
"""
Update a customer from its id.
:param customer: The customer id or object
:type customer: string|Customer
:param data: The data you want to update
:return: The customer resource
:rtype resources.Customer
"""
if isinstance(customer, resources.Customer):
customer = customer.id
http_client = HttpClient()
response, _ = http_client.patch(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer), data)
return resources.Customer(**response) | Update a customer from its id.
:param customer: The customer id or object
:type customer: string|Customer
:param data: The data you want to update
:return: The customer resource
:rtype resources.Customer | Below is the the instruction that describes the task:
### Input:
Update a customer from its id.
:param customer: The customer id or object
:type customer: string|Customer
:param data: The data you want to update
:return: The customer resource
:rtype resources.Customer
### Response:
def update(customer, **data):
"""
Update a customer from its id.
:param customer: The customer id or object
:type customer: string|Customer
:param data: The data you want to update
:return: The customer resource
:rtype resources.Customer
"""
if isinstance(customer, resources.Customer):
customer = customer.id
http_client = HttpClient()
response, _ = http_client.patch(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer), data)
return resources.Customer(**response) |
def WaitUntilDone(self, timeout=None):
"""Wait until the flow completes.
Args:
timeout: timeout in seconds. None means default timeout (1 hour). 0 means
no timeout (wait forever).
Returns:
Fresh flow object.
Raises:
PollTimeoutError: if timeout is reached.
FlowFailedError: if the flow is not successful.
"""
f = utils.Poll(
generator=self.Get,
condition=lambda f: f.data.state != f.data.RUNNING,
timeout=timeout)
if f.data.state != f.data.TERMINATED:
raise errors.FlowFailedError(
"Flow %s (%s) failed: %s" %
(self.flow_id, self.client_id, f.data.context.current_state))
return f | Wait until the flow completes.
Args:
timeout: timeout in seconds. None means default timeout (1 hour). 0 means
no timeout (wait forever).
Returns:
Fresh flow object.
Raises:
PollTimeoutError: if timeout is reached.
FlowFailedError: if the flow is not successful. | Below is the the instruction that describes the task:
### Input:
Wait until the flow completes.
Args:
timeout: timeout in seconds. None means default timeout (1 hour). 0 means
no timeout (wait forever).
Returns:
Fresh flow object.
Raises:
PollTimeoutError: if timeout is reached.
FlowFailedError: if the flow is not successful.
### Response:
def WaitUntilDone(self, timeout=None):
"""Wait until the flow completes.
Args:
timeout: timeout in seconds. None means default timeout (1 hour). 0 means
no timeout (wait forever).
Returns:
Fresh flow object.
Raises:
PollTimeoutError: if timeout is reached.
FlowFailedError: if the flow is not successful.
"""
f = utils.Poll(
generator=self.Get,
condition=lambda f: f.data.state != f.data.RUNNING,
timeout=timeout)
if f.data.state != f.data.TERMINATED:
raise errors.FlowFailedError(
"Flow %s (%s) failed: %s" %
(self.flow_id, self.client_id, f.data.context.current_state))
return f |
def platform_data_dir():
"""
Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system
"""
if LINUX: # nocover
dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')
elif DARWIN: # nocover
dpath_ = '~/Library/Application Support'
elif WIN32: # nocover
dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')
else: # nocover
raise '~/AppData/Local'
dpath = normpath(expanduser(dpath_))
return dpath | Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system | Below is the the instruction that describes the task:
### Input:
Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system
### Response:
def platform_data_dir():
"""
Returns path for user-specific data files
Returns:
PathLike : path to the data dir used by the current operating system
"""
if LINUX: # nocover
dpath_ = os.environ.get('XDG_DATA_HOME', '~/.local/share')
elif DARWIN: # nocover
dpath_ = '~/Library/Application Support'
elif WIN32: # nocover
dpath_ = os.environ.get('APPDATA', '~/AppData/Roaming')
else: # nocover
raise '~/AppData/Local'
dpath = normpath(expanduser(dpath_))
return dpath |
def encrypt(cls, private_key, password, kdf=None, iterations=None):
'''
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted))
'''
if isinstance(private_key, keys.PrivateKey):
key_bytes = private_key.to_bytes()
else:
key_bytes = HexBytes(private_key)
if kdf is None:
kdf = cls.default_kdf
password_bytes = text_if_str(to_bytes, password)
assert len(key_bytes) == 32
return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations) | Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted)) | Below is the the instruction that describes the task:
### Input:
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted))
### Response:
def encrypt(cls, private_key, password, kdf=None, iterations=None):
'''
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted))
'''
if isinstance(private_key, keys.PrivateKey):
key_bytes = private_key.to_bytes()
else:
key_bytes = HexBytes(private_key)
if kdf is None:
kdf = cls.default_kdf
password_bytes = text_if_str(to_bytes, password)
assert len(key_bytes) == 32
return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations) |
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op | Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u". | Below is the the instruction that describes the task:
### Input:
Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
### Response:
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op |
def _add_default_tz_bindings(self, context, switch, network_id):
"""Configure any additional default transport zone bindings."""
default_tz = CONF.NVP.default_tz
# If there is no default tz specified it's pointless to try
# and add any additional default tz bindings.
if not default_tz:
LOG.warn("additional_default_tz_types specified, "
"but no default_tz. Skipping "
"_add_default_tz_bindings().")
return
# This should never be called without a neutron network uuid,
# we require it to bind some segment allocations.
if not network_id:
LOG.warn("neutron network_id not specified, skipping "
"_add_default_tz_bindings()")
return
for net_type in CONF.NVP.additional_default_tz_types:
if net_type in TZ_BINDINGS:
binding = TZ_BINDINGS[net_type]
binding.add(context, switch, default_tz, network_id)
else:
LOG.warn("Unknown default tz type %s" % (net_type)) | Configure any additional default transport zone bindings. | Below is the the instruction that describes the task:
### Input:
Configure any additional default transport zone bindings.
### Response:
def _add_default_tz_bindings(self, context, switch, network_id):
"""Configure any additional default transport zone bindings."""
default_tz = CONF.NVP.default_tz
# If there is no default tz specified it's pointless to try
# and add any additional default tz bindings.
if not default_tz:
LOG.warn("additional_default_tz_types specified, "
"but no default_tz. Skipping "
"_add_default_tz_bindings().")
return
# This should never be called without a neutron network uuid,
# we require it to bind some segment allocations.
if not network_id:
LOG.warn("neutron network_id not specified, skipping "
"_add_default_tz_bindings()")
return
for net_type in CONF.NVP.additional_default_tz_types:
if net_type in TZ_BINDINGS:
binding = TZ_BINDINGS[net_type]
binding.add(context, switch, default_tz, network_id)
else:
LOG.warn("Unknown default tz type %s" % (net_type)) |
def clone_placeholder(self, placeholder):
"""
Add a new placeholder shape based on *placeholder*.
"""
sp = placeholder.element
ph_type, orient, sz, idx = (
sp.ph_type, sp.ph_orient, sp.ph_sz, sp.ph_idx
)
id_ = self._next_shape_id
name = self._next_ph_name(ph_type, id_, orient)
self._spTree.add_placeholder(id_, name, ph_type, orient, sz, idx) | Add a new placeholder shape based on *placeholder*. | Below is the the instruction that describes the task:
### Input:
Add a new placeholder shape based on *placeholder*.
### Response:
def clone_placeholder(self, placeholder):
"""
Add a new placeholder shape based on *placeholder*.
"""
sp = placeholder.element
ph_type, orient, sz, idx = (
sp.ph_type, sp.ph_orient, sp.ph_sz, sp.ph_idx
)
id_ = self._next_shape_id
name = self._next_ph_name(ph_type, id_, orient)
self._spTree.add_placeholder(id_, name, ph_type, orient, sz, idx) |
def extract_email(text):
"""Extract email from text.
"""
result = list()
for tp in re.findall(_regex_extract_email, text.lower()):
for email in tp:
if re.match(_regex_validate_email, email):
result.append(email)
return result | Extract email from text. | Below is the the instruction that describes the task:
### Input:
Extract email from text.
### Response:
def extract_email(text):
"""Extract email from text.
"""
result = list()
for tp in re.findall(_regex_extract_email, text.lower()):
for email in tp:
if re.match(_regex_validate_email, email):
result.append(email)
return result |
def validate_rpc_sha(repo_dir, commit):
"""Validate/update a SHA given for the rpc-openstack repo."""
# Is the commit valid? Just in case the commit is a
# PR ref, we try both the ref given and the ref prepended
# with the remote 'origin'.
try:
osa_differ.validate_commits(repo_dir, [commit])
except exceptions.InvalidCommitException:
log.debug("The reference {c} cannot be found. Prepending "
"origin remote and retrying.".format(c=commit))
commit = 'origin/' + commit
osa_differ.validate_commits(repo_dir, [commit])
return commit | Validate/update a SHA given for the rpc-openstack repo. | Below is the the instruction that describes the task:
### Input:
Validate/update a SHA given for the rpc-openstack repo.
### Response:
def validate_rpc_sha(repo_dir, commit):
"""Validate/update a SHA given for the rpc-openstack repo."""
# Is the commit valid? Just in case the commit is a
# PR ref, we try both the ref given and the ref prepended
# with the remote 'origin'.
try:
osa_differ.validate_commits(repo_dir, [commit])
except exceptions.InvalidCommitException:
log.debug("The reference {c} cannot be found. Prepending "
"origin remote and retrying.".format(c=commit))
commit = 'origin/' + commit
osa_differ.validate_commits(repo_dir, [commit])
return commit |
def parse_stats_file(self, file_name):
""" Read and parse given file_name, return config as a dictionary """
stats = {}
try:
with open(file_name, "r") as fhandle:
fbuffer = []
save_buffer = False
for line in fhandle:
line = line.rstrip("\n")
line = self._trim(line)
if line == "" or line.startswith("#"):
continue
elif line.endswith("{"):
save_buffer = True
fbuffer.append(line)
continue
elif line.endswith("}"):
tmp_dict = self._parse_config_buffer(fbuffer)
fbuffer = None
fbuffer = list()
if len(tmp_dict) < 1:
continue
if tmp_dict["_type"] == "info":
stats["info"] = tmp_dict
elif tmp_dict["_type"] == "programstatus":
stats["programstatus"] = tmp_dict
else:
entity_type = tmp_dict["_type"]
if entity_type not in stats.keys():
stats[entity_type] = []
stats[entity_type].append(tmp_dict)
continue
elif save_buffer is True:
fbuffer.append(line)
except Exception as exception:
self.log.info("Caught exception: %s", exception)
return stats | Read and parse given file_name, return config as a dictionary | Below is the the instruction that describes the task:
### Input:
Read and parse given file_name, return config as a dictionary
### Response:
def parse_stats_file(self, file_name):
""" Read and parse given file_name, return config as a dictionary """
stats = {}
try:
with open(file_name, "r") as fhandle:
fbuffer = []
save_buffer = False
for line in fhandle:
line = line.rstrip("\n")
line = self._trim(line)
if line == "" or line.startswith("#"):
continue
elif line.endswith("{"):
save_buffer = True
fbuffer.append(line)
continue
elif line.endswith("}"):
tmp_dict = self._parse_config_buffer(fbuffer)
fbuffer = None
fbuffer = list()
if len(tmp_dict) < 1:
continue
if tmp_dict["_type"] == "info":
stats["info"] = tmp_dict
elif tmp_dict["_type"] == "programstatus":
stats["programstatus"] = tmp_dict
else:
entity_type = tmp_dict["_type"]
if entity_type not in stats.keys():
stats[entity_type] = []
stats[entity_type].append(tmp_dict)
continue
elif save_buffer is True:
fbuffer.append(line)
except Exception as exception:
self.log.info("Caught exception: %s", exception)
return stats |
def shakemap_contour(shakemap_layer_path, output_file_path='', active_band=1):
"""Creating contour from a shakemap layer.
:param shakemap_layer_path: The shake map raster layer path.
:type shakemap_layer_path: basestring
:param output_file_path: The path where the contour will be saved.
:type output_file_path: basestring
:param active_band: The band which the data located, default to 1.
:type active_band: int
:returns: The contour of the shake map layer path.
:rtype: basestring
"""
# Set output path
if not output_file_path:
output_file_path = unique_filename(suffix='.shp', dir=temp_dir())
output_directory = os.path.dirname(output_file_path)
output_file_name = os.path.basename(output_file_path)
output_base_name = os.path.splitext(output_file_name)[0]
# Based largely on
# http://svn.osgeo.org/gdal/trunk/autotest/alg/contour.py
driver = ogr.GetDriverByName('ESRI Shapefile')
ogr_dataset = driver.CreateDataSource(output_file_path)
if ogr_dataset is None:
# Probably the file existed and could not be overriden
raise ContourCreationError(
'Could not create datasource for:\n%s. Check that the file '
'does not already exist and that you do not have file system '
'permissions issues' % output_file_path)
layer = ogr_dataset.CreateLayer('contour')
for contour_field in contour_fields:
field_definition = create_ogr_field_from_definition(contour_field)
layer.CreateField(field_definition)
shakemap_data = gdal.Open(shakemap_layer_path, GA_ReadOnly)
# see http://gdal.org/java/org/gdal/gdal/gdal.html for these options
contour_interval = 0.5
contour_base = 0
fixed_level_list = []
use_no_data_flag = 0
no_data_value = -9999
id_field = 0 # first field defined above
elevation_field = 1 # second (MMI) field defined above
try:
gdal.ContourGenerate(
shakemap_data.GetRasterBand(active_band),
contour_interval,
contour_base,
fixed_level_list,
use_no_data_flag,
no_data_value,
layer,
id_field,
elevation_field)
except Exception as e:
LOGGER.exception('Contour creation failed')
raise ContourCreationError(str(e))
finally:
ogr_dataset.Release()
# Copy over the standard .prj file since ContourGenerate does not
# create a projection definition
projection_path = os.path.join(
output_directory, output_base_name + '.prj')
source_projection_path = resources_path(
'converter_data', 'mmi-contours.prj')
shutil.copyfile(source_projection_path, projection_path)
# Lastly copy over the standard qml (QGIS Style file)
qml_path = os.path.join(
output_directory, output_base_name + '.qml')
source_qml_path = resources_path('converter_data', 'mmi-contours.qml')
shutil.copyfile(source_qml_path, qml_path)
# Create metadata file
create_contour_metadata(output_file_path)
# Now update the additional columns - X,Y, ROMAN and RGB
try:
set_contour_properties(output_file_path)
except InvalidLayerError:
raise
del shakemap_data
return output_file_path | Creating contour from a shakemap layer.
:param shakemap_layer_path: The shake map raster layer path.
:type shakemap_layer_path: basestring
:param output_file_path: The path where the contour will be saved.
:type output_file_path: basestring
:param active_band: The band which the data located, default to 1.
:type active_band: int
:returns: The contour of the shake map layer path.
:rtype: basestring | Below is the the instruction that describes the task:
### Input:
Creating contour from a shakemap layer.
:param shakemap_layer_path: The shake map raster layer path.
:type shakemap_layer_path: basestring
:param output_file_path: The path where the contour will be saved.
:type output_file_path: basestring
:param active_band: The band which the data located, default to 1.
:type active_band: int
:returns: The contour of the shake map layer path.
:rtype: basestring
### Response:
def shakemap_contour(shakemap_layer_path, output_file_path='', active_band=1):
"""Creating contour from a shakemap layer.
:param shakemap_layer_path: The shake map raster layer path.
:type shakemap_layer_path: basestring
:param output_file_path: The path where the contour will be saved.
:type output_file_path: basestring
:param active_band: The band which the data located, default to 1.
:type active_band: int
:returns: The contour of the shake map layer path.
:rtype: basestring
"""
# Set output path
if not output_file_path:
output_file_path = unique_filename(suffix='.shp', dir=temp_dir())
output_directory = os.path.dirname(output_file_path)
output_file_name = os.path.basename(output_file_path)
output_base_name = os.path.splitext(output_file_name)[0]
# Based largely on
# http://svn.osgeo.org/gdal/trunk/autotest/alg/contour.py
driver = ogr.GetDriverByName('ESRI Shapefile')
ogr_dataset = driver.CreateDataSource(output_file_path)
if ogr_dataset is None:
# Probably the file existed and could not be overriden
raise ContourCreationError(
'Could not create datasource for:\n%s. Check that the file '
'does not already exist and that you do not have file system '
'permissions issues' % output_file_path)
layer = ogr_dataset.CreateLayer('contour')
for contour_field in contour_fields:
field_definition = create_ogr_field_from_definition(contour_field)
layer.CreateField(field_definition)
shakemap_data = gdal.Open(shakemap_layer_path, GA_ReadOnly)
# see http://gdal.org/java/org/gdal/gdal/gdal.html for these options
contour_interval = 0.5
contour_base = 0
fixed_level_list = []
use_no_data_flag = 0
no_data_value = -9999
id_field = 0 # first field defined above
elevation_field = 1 # second (MMI) field defined above
try:
gdal.ContourGenerate(
shakemap_data.GetRasterBand(active_band),
contour_interval,
contour_base,
fixed_level_list,
use_no_data_flag,
no_data_value,
layer,
id_field,
elevation_field)
except Exception as e:
LOGGER.exception('Contour creation failed')
raise ContourCreationError(str(e))
finally:
ogr_dataset.Release()
# Copy over the standard .prj file since ContourGenerate does not
# create a projection definition
projection_path = os.path.join(
output_directory, output_base_name + '.prj')
source_projection_path = resources_path(
'converter_data', 'mmi-contours.prj')
shutil.copyfile(source_projection_path, projection_path)
# Lastly copy over the standard qml (QGIS Style file)
qml_path = os.path.join(
output_directory, output_base_name + '.qml')
source_qml_path = resources_path('converter_data', 'mmi-contours.qml')
shutil.copyfile(source_qml_path, qml_path)
# Create metadata file
create_contour_metadata(output_file_path)
# Now update the additional columns - X,Y, ROMAN and RGB
try:
set_contour_properties(output_file_path)
except InvalidLayerError:
raise
del shakemap_data
return output_file_path |
def callback(status, message, job, result, exception, stacktrace):
"""Example callback function.
:param status: Job status. Possible values are "invalid" (job could not be
deserialized or was malformed), "failure" (job raised an exception),
"timeout" (job timed out), or "success" (job finished successfully and
returned a result).
:type status: str
:param message: Kafka message.
:type message: kq.Message
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param result: Job result, or None if an exception was raised.
:type result: object | None
:param exception: Exception raised by job, or None if there was none.
:type exception: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None
"""
assert status in ['invalid', 'success', 'timeout', 'failure']
assert isinstance(message, Message)
if status == 'invalid':
assert job is None
assert result is None
assert exception is None
assert stacktrace is None
if status == 'success':
assert isinstance(job, Job)
assert exception is None
assert stacktrace is None
elif status == 'timeout':
assert isinstance(job, Job)
assert result is None
assert exception is None
assert stacktrace is None
elif status == 'failure':
assert isinstance(job, Job)
assert result is None
assert exception is not None
assert stacktrace is not None | Example callback function.
:param status: Job status. Possible values are "invalid" (job could not be
deserialized or was malformed), "failure" (job raised an exception),
"timeout" (job timed out), or "success" (job finished successfully and
returned a result).
:type status: str
:param message: Kafka message.
:type message: kq.Message
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param result: Job result, or None if an exception was raised.
:type result: object | None
:param exception: Exception raised by job, or None if there was none.
:type exception: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None | Below is the the instruction that describes the task:
### Input:
Example callback function.
:param status: Job status. Possible values are "invalid" (job could not be
deserialized or was malformed), "failure" (job raised an exception),
"timeout" (job timed out), or "success" (job finished successfully and
returned a result).
:type status: str
:param message: Kafka message.
:type message: kq.Message
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param result: Job result, or None if an exception was raised.
:type result: object | None
:param exception: Exception raised by job, or None if there was none.
:type exception: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None
### Response:
def callback(status, message, job, result, exception, stacktrace):
"""Example callback function.
:param status: Job status. Possible values are "invalid" (job could not be
deserialized or was malformed), "failure" (job raised an exception),
"timeout" (job timed out), or "success" (job finished successfully and
returned a result).
:type status: str
:param message: Kafka message.
:type message: kq.Message
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param result: Job result, or None if an exception was raised.
:type result: object | None
:param exception: Exception raised by job, or None if there was none.
:type exception: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None
"""
assert status in ['invalid', 'success', 'timeout', 'failure']
assert isinstance(message, Message)
if status == 'invalid':
assert job is None
assert result is None
assert exception is None
assert stacktrace is None
if status == 'success':
assert isinstance(job, Job)
assert exception is None
assert stacktrace is None
elif status == 'timeout':
assert isinstance(job, Job)
assert result is None
assert exception is None
assert stacktrace is None
elif status == 'failure':
assert isinstance(job, Job)
assert result is None
assert exception is not None
assert stacktrace is not None |
def cmd_link(self, args):
'''handle link commands'''
if len(args) < 1:
self.show_link()
elif args[0] == "list":
self.cmd_link_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: link add LINK")
return
self.cmd_link_add(args[1:])
elif args[0] == "ports":
self.cmd_link_ports()
elif args[0] == "remove":
if len(args) != 2:
print("Usage: link remove LINK")
return
self.cmd_link_remove(args[1:])
else:
print("usage: link <list|add|remove>") | handle link commands | Below is the the instruction that describes the task:
### Input:
handle link commands
### Response:
def cmd_link(self, args):
'''handle link commands'''
if len(args) < 1:
self.show_link()
elif args[0] == "list":
self.cmd_link_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: link add LINK")
return
self.cmd_link_add(args[1:])
elif args[0] == "ports":
self.cmd_link_ports()
elif args[0] == "remove":
if len(args) != 2:
print("Usage: link remove LINK")
return
self.cmd_link_remove(args[1:])
else:
print("usage: link <list|add|remove>") |
def date_time(name=None):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T).
This field requires first a Date, and then a Time, without any space in
between.
:param name: name for the field
:return: grammar for a Date and Time field
"""
if name is None:
name = 'Date and Time Field'
date = basic.date('Date')
time = basic.time('Time')
date = date.setResultsName('date')
time = time.setResultsName('time')
field = pp.Group(date + time)
field.setParseAction(lambda d: _combine_date_time(d[0]))
field.setName(name)
return field.setResultsName('date_time') | Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T).
This field requires first a Date, and then a Time, without any space in
between.
:param name: name for the field
:return: grammar for a Date and Time field | Below is the the instruction that describes the task:
### Input:
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T).
This field requires first a Date, and then a Time, without any space in
between.
:param name: name for the field
:return: grammar for a Date and Time field
### Response:
def date_time(name=None):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T).
This field requires first a Date, and then a Time, without any space in
between.
:param name: name for the field
:return: grammar for a Date and Time field
"""
if name is None:
name = 'Date and Time Field'
date = basic.date('Date')
time = basic.time('Time')
date = date.setResultsName('date')
time = time.setResultsName('time')
field = pp.Group(date + time)
field.setParseAction(lambda d: _combine_date_time(d[0]))
field.setName(name)
return field.setResultsName('date_time') |
def add(self, name, value, unit=None):
"""Add symbolic link to Dynamic Number list.
name -- name of the symbolic link
value -- value of the link (if not a string, conversion is done with str())
unit -- if value is a numerical value, a unit can be added to invoke the \unit{}{} LaTeX command
"""
# check if unit provided
if unit is not None:
add_unit = True
unit = str(unit)
else:
add_unit = False
# convert value to string
value = str(value)
# write to file
f = open(self.file_dir, 'a')
if add_unit:
f.write("\\pgfkeys{dynamicnumber/%s/%s = \unit{%s}{%s}}\n" % (self.name, name, value, unit))
else:
f.write("\\pgfkeys{dynamicnumber/%s/%s = %s}\n" % (self.name, name, value))
f.close() | Add symbolic link to Dynamic Number list.
name -- name of the symbolic link
value -- value of the link (if not a string, conversion is done with str())
unit -- if value is a numerical value, a unit can be added to invoke the \unit{}{} LaTeX command | Below is the the instruction that describes the task:
### Input:
Add symbolic link to Dynamic Number list.
name -- name of the symbolic link
value -- value of the link (if not a string, conversion is done with str())
unit -- if value is a numerical value, a unit can be added to invoke the \unit{}{} LaTeX command
### Response:
def add(self, name, value, unit=None):
"""Add symbolic link to Dynamic Number list.
name -- name of the symbolic link
value -- value of the link (if not a string, conversion is done with str())
unit -- if value is a numerical value, a unit can be added to invoke the \unit{}{} LaTeX command
"""
# check if unit provided
if unit is not None:
add_unit = True
unit = str(unit)
else:
add_unit = False
# convert value to string
value = str(value)
# write to file
f = open(self.file_dir, 'a')
if add_unit:
f.write("\\pgfkeys{dynamicnumber/%s/%s = \unit{%s}{%s}}\n" % (self.name, name, value, unit))
else:
f.write("\\pgfkeys{dynamicnumber/%s/%s = %s}\n" % (self.name, name, value))
f.close() |
def _connect_to_rabbitmq(self):
"""Connect to RabbitMQ and assign a local attribute"""
global pending_rabbitmq_connection, rabbitmq_connection
if not rabbitmq_connection:
LOGGER.info('Creating a new RabbitMQ connection')
pending_rabbitmq_connection = self._new_rabbitmq_connection() | Connect to RabbitMQ and assign a local attribute | Below is the the instruction that describes the task:
### Input:
Connect to RabbitMQ and assign a local attribute
### Response:
def _connect_to_rabbitmq(self):
"""Connect to RabbitMQ and assign a local attribute"""
global pending_rabbitmq_connection, rabbitmq_connection
if not rabbitmq_connection:
LOGGER.info('Creating a new RabbitMQ connection')
pending_rabbitmq_connection = self._new_rabbitmq_connection() |
def __get_data_citation(self, l):
"""
If originalDataURL / investigators not in root data, check for a dataCitation pub entry.
:return:
"""
# loop once for each publication entry
for pub in l:
try:
# at the moment, these are the only keys of interest inside of dataCitation. Check each.
for key in ["url", "investigators"]:
if pub["type"] == "dataCitation" and key in pub:
noaa_key = self.__get_noaa_key(key)
self.data_citation[noaa_key] = pub[key]
except KeyError:
# no "type" key in pub
logger_lpd_noaa.info("lpd_noaa: get_data_citation: KeyError: pub is missing 'type'")
return | If originalDataURL / investigators not in root data, check for a dataCitation pub entry.
:return: | Below is the the instruction that describes the task:
### Input:
If originalDataURL / investigators not in root data, check for a dataCitation pub entry.
:return:
### Response:
def __get_data_citation(self, l):
"""
If originalDataURL / investigators not in root data, check for a dataCitation pub entry.
:return:
"""
# loop once for each publication entry
for pub in l:
try:
# at the moment, these are the only keys of interest inside of dataCitation. Check each.
for key in ["url", "investigators"]:
if pub["type"] == "dataCitation" and key in pub:
noaa_key = self.__get_noaa_key(key)
self.data_citation[noaa_key] = pub[key]
except KeyError:
# no "type" key in pub
logger_lpd_noaa.info("lpd_noaa: get_data_citation: KeyError: pub is missing 'type'")
return |
def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine)) | Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default. | Below is the the instruction that describes the task:
### Input:
Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
### Response:
def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine)) |
def _resolve_fallback(self, gates, n_qubits):
"""Resolve fallbacks and flatten gates."""
flattened = []
for g in gates:
if self._has_action(g):
flattened.append(g)
else:
flattened += self._resolve_fallback(g.fallback(n_qubits), n_qubits)
return flattened | Resolve fallbacks and flatten gates. | Below is the the instruction that describes the task:
### Input:
Resolve fallbacks and flatten gates.
### Response:
def _resolve_fallback(self, gates, n_qubits):
"""Resolve fallbacks and flatten gates."""
flattened = []
for g in gates:
if self._has_action(g):
flattened.append(g)
else:
flattened += self._resolve_fallback(g.fallback(n_qubits), n_qubits)
return flattened |
def handle_intermediate_response(self, item_session: ItemSession) -> Actions:
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
return action | Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`. | Below is the the instruction that describes the task:
### Input:
Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
### Response:
def handle_intermediate_response(self, item_session: ItemSession) -> Actions:
'''Callback for successful intermediate responses.
Returns:
A value from :class:`.hook.Actions`.
'''
self._waiter.reset()
action = self.handle_response(item_session)
return action |
def _eq(field, value, document):
"""
Returns True if the value of a document field is equal to a given value
"""
try:
return document.get(field, None) == value
except TypeError: # pragma: no cover Python < 3.0
return False | Returns True if the value of a document field is equal to a given value | Below is the the instruction that describes the task:
### Input:
Returns True if the value of a document field is equal to a given value
### Response:
def _eq(field, value, document):
"""
Returns True if the value of a document field is equal to a given value
"""
try:
return document.get(field, None) == value
except TypeError: # pragma: no cover Python < 3.0
return False |
def createSite(self, username, password, fullname,
email, description, securityQuestionIdx,
secuirtyQuestionAns, contentDir
):
"""
The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation. Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as the
initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - The password for the initial administrator account
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve
a forgotten password
securityQuestionAns - The answer to the secret question
contentDir - The path to the location of the site's content
"""
params = {
"username" : username,
"password" : password,
"fullname" : fullname,
"email" : email,
"description" : description,
"secuirtyQuestionAns" : secuirtyQuestionAns,
"securityQuestionIdx" : securityQuestionIdx,
"contentDir" : contentDir
}
url = self._url + "/createNewSite"
return self._get(url=url,
param_dict=params) | The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation. Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as the
initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - The password for the initial administrator account
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve
a forgotten password
securityQuestionAns - The answer to the secret question
contentDir - The path to the location of the site's content | Below is the the instruction that describes the task:
### Input:
The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation. Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as the
initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - The password for the initial administrator account
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve
a forgotten password
securityQuestionAns - The answer to the secret question
contentDir - The path to the location of the site's content
### Response:
def createSite(self, username, password, fullname,
email, description, securityQuestionIdx,
secuirtyQuestionAns, contentDir
):
"""
The create site operation initializes and configures Portal for
ArcGIS for use. It must be the first operation invoked after
installation. Creating a new site involves:
Creating the initial administrator account
Creating a new database administrator account (which is same as the
initial administrator account)
Creating token shared keys
Registering directories
This operation is time consuming, as the database is initialized
and populated with default templates and content. If the database
directory is not empty, this operation attempts to migrate the
database to the current version while keeping its data intact. At
the end of this operation, the web server that hosts the API is
restarted.
Inputs:
username - The initial administrator account name
password - The password for the initial administrator account
fullname - The full name for the initial administrator account
email - The account email address
description - An optional description for the account
securityQuestionIdx - The index of the secret question to retrieve
a forgotten password
securityQuestionAns - The answer to the secret question
contentDir - The path to the location of the site's content
"""
params = {
"username" : username,
"password" : password,
"fullname" : fullname,
"email" : email,
"description" : description,
"secuirtyQuestionAns" : secuirtyQuestionAns,
"securityQuestionIdx" : securityQuestionIdx,
"contentDir" : contentDir
}
url = self._url + "/createNewSite"
return self._get(url=url,
param_dict=params) |
def fastaParserSpectraClusterPy(header):
"""Custom parser for fasta headers adapted from
https://github.com/spectra-cluster/spectra-cluster-py
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header
"""
isUniprot = lambda h: h[0:3] in ['sp|', 'tr|', 'up|']
if isUniprot(header):
start = 3
end = header.find('|', start)
else:
start = 0
breakPositions = [header.find(' '), header.find('|')]
breakPositions = [i if i > 0 else len(header) for i in breakPositions]
end = min(breakPositions)
return {'id': header[start:end]} | Custom parser for fasta headers adapted from
https://github.com/spectra-cluster/spectra-cluster-py
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header | Below is the the instruction that describes the task:
### Input:
Custom parser for fasta headers adapted from
https://github.com/spectra-cluster/spectra-cluster-py
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header
### Response:
def fastaParserSpectraClusterPy(header):
"""Custom parser for fasta headers adapted from
https://github.com/spectra-cluster/spectra-cluster-py
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header
"""
isUniprot = lambda h: h[0:3] in ['sp|', 'tr|', 'up|']
if isUniprot(header):
start = 3
end = header.find('|', start)
else:
start = 0
breakPositions = [header.find(' '), header.find('|')]
breakPositions = [i if i > 0 else len(header) for i in breakPositions]
end = min(breakPositions)
return {'id': header[start:end]} |
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs) | Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. | Below is the the instruction that describes the task:
### Input:
Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie.
### Response:
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
parameters as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs) |
def _matrix_add_column(matrix, column, default=0):
"""Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
"""
height_difference = len(column) - len(matrix)
# The width of the matrix is the length of its longest row.
width = max(len(row) for row in matrix) if matrix else 0
# For now our offset is 0. We may need to shift our column down later.
offset = 0
# If we need extra rows, add them to the top of the matrix.
if height_difference > 0:
for _ in range(height_difference):
matrix.insert(0, [default] * width)
# If the column is shorter, we'll need to shift it down.
if height_difference < 0:
offset = -height_difference
#column = ([default] * offset) + column
for index, value in enumerate(column):
# The row index is the index in the column plus our offset.
row_index = index + offset
row = matrix[row_index]
# If this row is short, pad it with default values.
width_difference = width - len(row)
row.extend([default] * width_difference)
row.append(value) | Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary. | Below is the the instruction that describes the task:
### Input:
Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
### Response:
def _matrix_add_column(matrix, column, default=0):
"""Given a matrix as a list of lists, add a column to the right, filling in
with a default value if necessary.
"""
height_difference = len(column) - len(matrix)
# The width of the matrix is the length of its longest row.
width = max(len(row) for row in matrix) if matrix else 0
# For now our offset is 0. We may need to shift our column down later.
offset = 0
# If we need extra rows, add them to the top of the matrix.
if height_difference > 0:
for _ in range(height_difference):
matrix.insert(0, [default] * width)
# If the column is shorter, we'll need to shift it down.
if height_difference < 0:
offset = -height_difference
#column = ([default] * offset) + column
for index, value in enumerate(column):
# The row index is the index in the column plus our offset.
row_index = index + offset
row = matrix[row_index]
# If this row is short, pad it with default values.
width_difference = width - len(row)
row.extend([default] * width_difference)
row.append(value) |
def removeUserFromGroup(self, user_id, thread_id=None):
"""
Removes users from a group.
:param user_id: User ID to remove
:param thread_id: Group ID to remove people from. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {"uid": user_id, "tid": thread_id}
j = self._post(self.req_url.REMOVE_USER, data, fix_request=True, as_json=True) | Removes users from a group.
:param user_id: User ID to remove
:param thread_id: Group ID to remove people from. See :ref:`intro_threads`
:raises: FBchatException if request failed | Below is the the instruction that describes the task:
### Input:
Removes users from a group.
:param user_id: User ID to remove
:param thread_id: Group ID to remove people from. See :ref:`intro_threads`
:raises: FBchatException if request failed
### Response:
def removeUserFromGroup(self, user_id, thread_id=None):
"""
Removes users from a group.
:param user_id: User ID to remove
:param thread_id: Group ID to remove people from. See :ref:`intro_threads`
:raises: FBchatException if request failed
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {"uid": user_id, "tid": thread_id}
j = self._post(self.req_url.REMOVE_USER, data, fix_request=True, as_json=True) |
def compute( self ):
# compute the intersection graph into @xsections dictionary
wordlist = self.wordlist
""" build a dictionary of words, and their intersections """
xsections = {}
for i in range(len(wordlist)):
word_i = wordlist[i]
for j in range(len(wordlist)):
word_j = wordlist[j]
if i == j:
# force self-intersection to be 0
if not xsections.get(word_i,None):
xsections[word_i] = ['']
else:
xsections[word_i].extend([''])
continue
# optimize for, i > j, info is calculated already
if i > j:
xsec_counts = xsections[word_j][i]
else:
xsec_counts = tamil.utf8.word_intersection( word_i, word_j )
if not xsections.get(word_i,None):
xsections[word_i] = [xsec_counts]
else:
xsections[word_i].extend( [ xsec_counts ] )
self.xsections = xsections | build a dictionary of words, and their intersections | Below is the the instruction that describes the task:
### Input:
build a dictionary of words, and their intersections
### Response:
def compute( self ):
# compute the intersection graph into @xsections dictionary
wordlist = self.wordlist
""" build a dictionary of words, and their intersections """
xsections = {}
for i in range(len(wordlist)):
word_i = wordlist[i]
for j in range(len(wordlist)):
word_j = wordlist[j]
if i == j:
# force self-intersection to be 0
if not xsections.get(word_i,None):
xsections[word_i] = ['']
else:
xsections[word_i].extend([''])
continue
# optimize for, i > j, info is calculated already
if i > j:
xsec_counts = xsections[word_j][i]
else:
xsec_counts = tamil.utf8.word_intersection( word_i, word_j )
if not xsections.get(word_i,None):
xsections[word_i] = [xsec_counts]
else:
xsections[word_i].extend( [ xsec_counts ] )
self.xsections = xsections |
def get_birthday(code):
"""``get_birthday(code) -> string``
Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY.
Unfortunately it's not possible to guess the four digit birth year, given
that the Italian fiscal code uses only the last two digits (1983 -> 83).
Therefore, this function returns a string and not a datetime object.
eg: birthday('RCCMNL83S18D969H') -> 18-11-83
"""
assert isvalid(code)
day = int(code[9:11])
day = day < 32 and day or day - 40
month = MONTHSCODE.index(code[8]) + 1
year = int(code[6:8])
return "%02d-%02d-%02d" % (day, month, year) | ``get_birthday(code) -> string``
Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY.
Unfortunately it's not possible to guess the four digit birth year, given
that the Italian fiscal code uses only the last two digits (1983 -> 83).
Therefore, this function returns a string and not a datetime object.
eg: birthday('RCCMNL83S18D969H') -> 18-11-83 | Below is the the instruction that describes the task:
### Input:
``get_birthday(code) -> string``
Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY.
Unfortunately it's not possible to guess the four digit birth year, given
that the Italian fiscal code uses only the last two digits (1983 -> 83).
Therefore, this function returns a string and not a datetime object.
eg: birthday('RCCMNL83S18D969H') -> 18-11-83
### Response:
def get_birthday(code):
"""``get_birthday(code) -> string``
Birthday of the person whose fiscal code is 'code', in the format DD-MM-YY.
Unfortunately it's not possible to guess the four digit birth year, given
that the Italian fiscal code uses only the last two digits (1983 -> 83).
Therefore, this function returns a string and not a datetime object.
eg: birthday('RCCMNL83S18D969H') -> 18-11-83
"""
assert isvalid(code)
day = int(code[9:11])
day = day < 32 and day or day - 40
month = MONTHSCODE.index(code[8]) + 1
year = int(code[6:8])
return "%02d-%02d-%02d" % (day, month, year) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.