code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def match_any_char(self, chars, offset=0):
"""Match and return the current SourceString char if its in chars."""
if not self.has_space(offset=offset):
return ''
current = self.string[self.pos + offset]
return current if current in chars else '' | Match and return the current SourceString char if its in chars. | Below is the the instruction that describes the task:
### Input:
Match and return the current SourceString char if its in chars.
### Response:
def match_any_char(self, chars, offset=0):
"""Match and return the current SourceString char if its in chars."""
if not self.has_space(offset=offset):
return ''
current = self.string[self.pos + offset]
return current if current in chars else '' |
def get_state_batch(keys, namespace=None, consistent=True):
"""Get a batch of items from the state store."""
ukeys = set(keys)
if namespace:
ns_keys = ["{}:{}".format(namespace, key) for key in ukeys]
uvalues = {k: v for k, v
in zip(ukeys, get_item_batch(ns_keys, consistent=consistent))}
return list(zip(keys, (uvalues[k] for k in keys))) | Get a batch of items from the state store. | Below is the the instruction that describes the task:
### Input:
Get a batch of items from the state store.
### Response:
def get_state_batch(keys, namespace=None, consistent=True):
"""Get a batch of items from the state store."""
ukeys = set(keys)
if namespace:
ns_keys = ["{}:{}".format(namespace, key) for key in ukeys]
uvalues = {k: v for k, v
in zip(ukeys, get_item_batch(ns_keys, consistent=consistent))}
return list(zip(keys, (uvalues[k] for k in keys))) |
def check(self, check_all=False):
"""Check whether some modules need to be reloaded."""
if not self.enabled and not check_all:
return
if check_all or self.check_all:
modules = sys.modules.keys()
else:
modules = self.modules.keys()
for modname in modules:
m = sys.modules.get(modname, None)
if modname in self.skip_modules:
continue
if not hasattr(m, '__file__'):
continue
if m.__name__ == '__main__':
# we cannot reload(__main__)
continue
filename = m.__file__
path, ext = os.path.splitext(filename)
if ext.lower() == '.py':
ext = PY_COMPILED_EXT
pyc_filename = pyfile.cache_from_source(filename)
py_filename = filename
else:
pyc_filename = filename
try:
py_filename = pyfile.source_from_cache(filename)
except ValueError:
continue
try:
pymtime = os.stat(py_filename).st_mtime
if pymtime <= os.stat(pyc_filename).st_mtime:
continue
if self.failed.get(py_filename, None) == pymtime:
continue
except OSError:
continue
try:
superreload(m, reload, self.old_objects)
if py_filename in self.failed:
del self.failed[py_filename]
except:
print >> sys.stderr, "[autoreload of %s failed: %s]" % (
modname, traceback.format_exc(1))
self.failed[py_filename] = pymtime | Check whether some modules need to be reloaded. | Below is the the instruction that describes the task:
### Input:
Check whether some modules need to be reloaded.
### Response:
def check(self, check_all=False):
"""Check whether some modules need to be reloaded."""
if not self.enabled and not check_all:
return
if check_all or self.check_all:
modules = sys.modules.keys()
else:
modules = self.modules.keys()
for modname in modules:
m = sys.modules.get(modname, None)
if modname in self.skip_modules:
continue
if not hasattr(m, '__file__'):
continue
if m.__name__ == '__main__':
# we cannot reload(__main__)
continue
filename = m.__file__
path, ext = os.path.splitext(filename)
if ext.lower() == '.py':
ext = PY_COMPILED_EXT
pyc_filename = pyfile.cache_from_source(filename)
py_filename = filename
else:
pyc_filename = filename
try:
py_filename = pyfile.source_from_cache(filename)
except ValueError:
continue
try:
pymtime = os.stat(py_filename).st_mtime
if pymtime <= os.stat(pyc_filename).st_mtime:
continue
if self.failed.get(py_filename, None) == pymtime:
continue
except OSError:
continue
try:
superreload(m, reload, self.old_objects)
if py_filename in self.failed:
del self.failed[py_filename]
except:
print >> sys.stderr, "[autoreload of %s failed: %s]" % (
modname, traceback.format_exc(1))
self.failed[py_filename] = pymtime |
def _list_remote(store, maildir, verbose=False):
"""List the a maildir.
store is an abstract representation of the source maildir.
maildir is the local maildir to which mail will be pulled.
This is a generator for a reason. Because of the way ssh
multi-mastering works a single open TCP connection allows multiple
virtual ssh connections. So the encryption and tcp only has to be
done once.
If this command returned a list then the ssh list command would
have finished and the ssh connection for each message would have
to be made again.
"""
# This command produces a list of all files in the maildir like:
# base-filename timestamp container-directory
command = """echo {maildir}/{{cur,new}} | tr ' ' '\\n' | while read path ; do ls -1Ugo --time-style=+%s $path | sed -rne "s|[a-zA-Z-]+[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+([0-9]+)[ \t]+([0-9]+\\.[A-Za-z0-9]+)(\\.([.A-Za-z0-9-]+))*(:[2],([PRSTDF]*))*|\\2 \\1 $path|p";done"""
stdout = store.cmd(command, verbose)
lines = stdout.split("\n")
for line in lines:
parts = line.split(" ")
if len(parts) >= 3:
yield parts[0:3] | List the a maildir.
store is an abstract representation of the source maildir.
maildir is the local maildir to which mail will be pulled.
This is a generator for a reason. Because of the way ssh
multi-mastering works a single open TCP connection allows multiple
virtual ssh connections. So the encryption and tcp only has to be
done once.
If this command returned a list then the ssh list command would
have finished and the ssh connection for each message would have
to be made again. | Below is the the instruction that describes the task:
### Input:
List the a maildir.
store is an abstract representation of the source maildir.
maildir is the local maildir to which mail will be pulled.
This is a generator for a reason. Because of the way ssh
multi-mastering works a single open TCP connection allows multiple
virtual ssh connections. So the encryption and tcp only has to be
done once.
If this command returned a list then the ssh list command would
have finished and the ssh connection for each message would have
to be made again.
### Response:
def _list_remote(store, maildir, verbose=False):
"""List the a maildir.
store is an abstract representation of the source maildir.
maildir is the local maildir to which mail will be pulled.
This is a generator for a reason. Because of the way ssh
multi-mastering works a single open TCP connection allows multiple
virtual ssh connections. So the encryption and tcp only has to be
done once.
If this command returned a list then the ssh list command would
have finished and the ssh connection for each message would have
to be made again.
"""
# This command produces a list of all files in the maildir like:
# base-filename timestamp container-directory
command = """echo {maildir}/{{cur,new}} | tr ' ' '\\n' | while read path ; do ls -1Ugo --time-style=+%s $path | sed -rne "s|[a-zA-Z-]+[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+([0-9]+)[ \t]+([0-9]+\\.[A-Za-z0-9]+)(\\.([.A-Za-z0-9-]+))*(:[2],([PRSTDF]*))*|\\2 \\1 $path|p";done"""
stdout = store.cmd(command, verbose)
lines = stdout.split("\n")
for line in lines:
parts = line.split(" ")
if len(parts) >= 3:
yield parts[0:3] |
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, str) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isna(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return maybe_downcast_to_dtype(result, dtype) | try to cast the result to our original type, we may have
roundtripped thru object in the mean-time | Below is the the instruction that describes the task:
### Input:
try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
### Response:
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type, we may have
roundtripped thru object in the mean-time
"""
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, str) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isna(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return maybe_downcast_to_dtype(result, dtype) |
def close_shell(self, shell_id):
"""
Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool
"""
message_id = uuid.uuid4()
req = {'env:Envelope': self._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA
action='http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete',
shell_id=shell_id,
message_id=message_id)}
# SOAP message requires empty env:Body
req['env:Envelope'].setdefault('env:Body', {})
res = self.send_message(xmltodict.unparse(req))
root = ET.fromstring(res)
relates_to = next(
node for node in root.findall('.//*')
if node.tag.endswith('RelatesTo')).text
# TODO change assert into user-friendly exception
assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id | Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool | Below is the the instruction that describes the task:
### Input:
Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool
### Response:
def close_shell(self, shell_id):
"""
Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool
"""
message_id = uuid.uuid4()
req = {'env:Envelope': self._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA
action='http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete',
shell_id=shell_id,
message_id=message_id)}
# SOAP message requires empty env:Body
req['env:Envelope'].setdefault('env:Body', {})
res = self.send_message(xmltodict.unparse(req))
root = ET.fromstring(res)
relates_to = next(
node for node in root.findall('.//*')
if node.tag.endswith('RelatesTo')).text
# TODO change assert into user-friendly exception
assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id |
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode() | Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include. | Below is the the instruction that describes the task:
### Input:
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
### Response:
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode() |
def cv(data, units=False):
"""
Calculate coefficient of variation (cv) of data. Mean and standard deviation
are computed across time.
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time.
units : bool
Average `cv`.
Returns
-------
numpy.ndarray
If units=False, series of unit `cv`s.
float
If units=True, mean `cv` across units.
Examples
--------
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]))
array([ 0.48795004, 0.63887656])
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True)
0.56341330073710316
"""
mu = mean(data, time=True)
var = variance(data, time=True)
cv = np.sqrt(var) / mu
if units is True:
return np.mean(cv)
else:
return cv | Calculate coefficient of variation (cv) of data. Mean and standard deviation
are computed across time.
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time.
units : bool
Average `cv`.
Returns
-------
numpy.ndarray
If units=False, series of unit `cv`s.
float
If units=True, mean `cv` across units.
Examples
--------
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]))
array([ 0.48795004, 0.63887656])
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True)
0.56341330073710316 | Below is the the instruction that describes the task:
### Input:
Calculate coefficient of variation (cv) of data. Mean and standard deviation
are computed across time.
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time.
units : bool
Average `cv`.
Returns
-------
numpy.ndarray
If units=False, series of unit `cv`s.
float
If units=True, mean `cv` across units.
Examples
--------
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]))
array([ 0.48795004, 0.63887656])
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True)
0.56341330073710316
### Response:
def cv(data, units=False):
"""
Calculate coefficient of variation (cv) of data. Mean and standard deviation
are computed across time.
Parameters
----------
data : numpy.ndarray
1st axis unit, 2nd axis time.
units : bool
Average `cv`.
Returns
-------
numpy.ndarray
If units=False, series of unit `cv`s.
float
If units=True, mean `cv` across units.
Examples
--------
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]))
array([ 0.48795004, 0.63887656])
>>> cv(np.array([[1, 2, 3, 4, 5, 6], [11, 2, 3, 3, 4, 5]]), units=True)
0.56341330073710316
"""
mu = mean(data, time=True)
var = variance(data, time=True)
cv = np.sqrt(var) / mu
if units is True:
return np.mean(cv)
else:
return cv |
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) - events consumed: {2:d} - running: '
'{3!s}\n').format(
worker_status.identifier, worker_status.pid,
worker_status.number_of_consumed_events,
worker_status.status not in definitions.ERROR_STATUS_INDICATORS)
self._output_writer.Write(status_line) | Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status. | Below is the the instruction that describes the task:
### Input:
Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
### Response:
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) - events consumed: {2:d} - running: '
'{3!s}\n').format(
worker_status.identifier, worker_status.pid,
worker_status.number_of_consumed_events,
worker_status.status not in definitions.ERROR_STATUS_INDICATORS)
self._output_writer.Write(status_line) |
def draw_graph(G: nx.DiGraph, filename: str):
""" Draw a networkx graph with Pygraphviz. """
A = to_agraph(G)
A.graph_attr["rankdir"] = "LR"
A.draw(filename, prog="dot") | Draw a networkx graph with Pygraphviz. | Below is the the instruction that describes the task:
### Input:
Draw a networkx graph with Pygraphviz.
### Response:
def draw_graph(G: nx.DiGraph, filename: str):
""" Draw a networkx graph with Pygraphviz. """
A = to_agraph(G)
A.graph_attr["rankdir"] = "LR"
A.draw(filename, prog="dot") |
def shift_up(self, times=1):
"""
Finds Location shifted up by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e) | Finds Location shifted up by 1
:rtype: Location | Below is the the instruction that describes the task:
### Input:
Finds Location shifted up by 1
:rtype: Location
### Response:
def shift_up(self, times=1):
"""
Finds Location shifted up by 1
:rtype: Location
"""
try:
return Location(self._rank + times, self._file)
except IndexError as e:
raise IndexError(e) |
def set_handler(self, handler):
""" Set transport handler
@param handler: Handler, should derive from the
C{sockjs.cyclone.transports.base.BaseTransportMixin}
"""
if self.handler is not None:
raise Exception('Attempted to overwrite BaseSession handler')
self.handler = handler
self.transport_name = self.handler.name
if self.conn_info is None:
self.conn_info = handler.get_conn_info()
self.stats.sessionOpened(self.transport_name)
return True | Set transport handler
@param handler: Handler, should derive from the
C{sockjs.cyclone.transports.base.BaseTransportMixin} | Below is the the instruction that describes the task:
### Input:
Set transport handler
@param handler: Handler, should derive from the
C{sockjs.cyclone.transports.base.BaseTransportMixin}
### Response:
def set_handler(self, handler):
""" Set transport handler
@param handler: Handler, should derive from the
C{sockjs.cyclone.transports.base.BaseTransportMixin}
"""
if self.handler is not None:
raise Exception('Attempted to overwrite BaseSession handler')
self.handler = handler
self.transport_name = self.handler.name
if self.conn_info is None:
self.conn_info = handler.get_conn_info()
self.stats.sessionOpened(self.transport_name)
return True |
def _submit_gauges_from_histogram(
self, name, metric, send_histograms_buckets=True, custom_tags=None, hostname=None
):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if custom_tags is None:
custom_tags = []
# histograms do not have a value attribute
val = getattr(metric, self.METRIC_TYPES[4]).sample_count
if self._is_value_valid(val):
self._submit_gauge("{}.count".format(name), val, metric, custom_tags)
else:
self.log.debug("Metric value is not supported for metric {}.count.".format(name))
val = getattr(metric, self.METRIC_TYPES[4]).sample_sum
if self._is_value_valid(val):
self._submit_gauge("{}.sum".format(name), val, metric, custom_tags)
else:
self.log.debug("Metric value is not supported for metric {}.sum.".format(name))
if send_histograms_buckets:
for bucket in getattr(metric, self.METRIC_TYPES[4]).bucket:
val = bucket.cumulative_count
limit = bucket.upper_bound
if self._is_value_valid(val):
self._submit_gauge(
"{}.count".format(name),
val,
metric,
custom_tags=custom_tags + ["upper_bound:{}".format(limit)],
hostname=hostname,
)
else:
self.log.debug("Metric value is not supported for metric {}.count.".format(name)) | Extracts metrics from a prometheus histogram and sends them as gauges | Below is the the instruction that describes the task:
### Input:
Extracts metrics from a prometheus histogram and sends them as gauges
### Response:
def _submit_gauges_from_histogram(
self, name, metric, send_histograms_buckets=True, custom_tags=None, hostname=None
):
"""
Extracts metrics from a prometheus histogram and sends them as gauges
"""
if custom_tags is None:
custom_tags = []
# histograms do not have a value attribute
val = getattr(metric, self.METRIC_TYPES[4]).sample_count
if self._is_value_valid(val):
self._submit_gauge("{}.count".format(name), val, metric, custom_tags)
else:
self.log.debug("Metric value is not supported for metric {}.count.".format(name))
val = getattr(metric, self.METRIC_TYPES[4]).sample_sum
if self._is_value_valid(val):
self._submit_gauge("{}.sum".format(name), val, metric, custom_tags)
else:
self.log.debug("Metric value is not supported for metric {}.sum.".format(name))
if send_histograms_buckets:
for bucket in getattr(metric, self.METRIC_TYPES[4]).bucket:
val = bucket.cumulative_count
limit = bucket.upper_bound
if self._is_value_valid(val):
self._submit_gauge(
"{}.count".format(name),
val,
metric,
custom_tags=custom_tags + ["upper_bound:{}".format(limit)],
hostname=hostname,
)
else:
self.log.debug("Metric value is not supported for metric {}.count.".format(name)) |
def on_taskend(self, task):
""" Play sounds at task end.
"""
key = 'timer' if task.elapsed else 'end'
filename = self.files.get(key)
if filename:
self._play_sound(filename) | Play sounds at task end. | Below is the the instruction that describes the task:
### Input:
Play sounds at task end.
### Response:
def on_taskend(self, task):
""" Play sounds at task end.
"""
key = 'timer' if task.elapsed else 'end'
filename = self.files.get(key)
if filename:
self._play_sound(filename) |
def _bind_args(sig, param_matchers, args, kwargs):
'''
Attempt to bind the args to the type signature. First try to just bind
to the signature, then ensure that all arguments match the parameter
types.
'''
#Bind to signature. May throw its own TypeError
bound = sig.bind(*args, **kwargs)
if not all(param_matcher(bound.arguments[param_name])
for param_name, param_matcher in param_matchers):
raise TypeError
return bound | Attempt to bind the args to the type signature. First try to just bind
to the signature, then ensure that all arguments match the parameter
types. | Below is the the instruction that describes the task:
### Input:
Attempt to bind the args to the type signature. First try to just bind
to the signature, then ensure that all arguments match the parameter
types.
### Response:
def _bind_args(sig, param_matchers, args, kwargs):
'''
Attempt to bind the args to the type signature. First try to just bind
to the signature, then ensure that all arguments match the parameter
types.
'''
#Bind to signature. May throw its own TypeError
bound = sig.bind(*args, **kwargs)
if not all(param_matcher(bound.arguments[param_name])
for param_name, param_matcher in param_matchers):
raise TypeError
return bound |
def get_entity(
self, entity_type, entity_id, history_index=-1, connected=True):
"""Return an object instance for the given entity_type and id.
By default the object state matches the most recent state from
Juju. To get an instance of the object in an older state, pass
history_index, an index into the history deque for the entity.
"""
if history_index < 0 and history_index != -1:
history_index += len(self.entity_history(entity_type, entity_id))
if history_index < 0:
return None
try:
self.entity_data(entity_type, entity_id, history_index)
except IndexError:
return None
entity_class = get_entity_class(entity_type)
return entity_class(
entity_id, self.model, history_index=history_index,
connected=connected) | Return an object instance for the given entity_type and id.
By default the object state matches the most recent state from
Juju. To get an instance of the object in an older state, pass
history_index, an index into the history deque for the entity. | Below is the the instruction that describes the task:
### Input:
Return an object instance for the given entity_type and id.
By default the object state matches the most recent state from
Juju. To get an instance of the object in an older state, pass
history_index, an index into the history deque for the entity.
### Response:
def get_entity(
self, entity_type, entity_id, history_index=-1, connected=True):
"""Return an object instance for the given entity_type and id.
By default the object state matches the most recent state from
Juju. To get an instance of the object in an older state, pass
history_index, an index into the history deque for the entity.
"""
if history_index < 0 and history_index != -1:
history_index += len(self.entity_history(entity_type, entity_id))
if history_index < 0:
return None
try:
self.entity_data(entity_type, entity_id, history_index)
except IndexError:
return None
entity_class = get_entity_class(entity_type)
return entity_class(
entity_id, self.model, history_index=history_index,
connected=connected) |
def start_output (self):
"""
Write start of checking info as sql comment.
"""
super(SQLLogger, self).start_output()
if self.has_part("intro"):
self.write_intro()
self.writeln()
self.flush() | Write start of checking info as sql comment. | Below is the the instruction that describes the task:
### Input:
Write start of checking info as sql comment.
### Response:
def start_output (self):
"""
Write start of checking info as sql comment.
"""
super(SQLLogger, self).start_output()
if self.has_part("intro"):
self.write_intro()
self.writeln()
self.flush() |
def create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_end_value, warmup_duration,
save_history=False,
output_simulated_values=None):
"""
Helper method to create a LR scheduler with a linear warm-up.
Args:
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): LR scheduler after
the warm-up.
warmup_start_value (float): LR start value of the warm-up phase.
warmup_end_value (float): LR end value of the warm-up phase.
warmup_duration (int): warm-up phase duration, number of events.
save_history (bool, optional): whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values (list, optional): optional output of simulated LR values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated LR values.
Returns:
ConcatScheduler: LR scheduler with linear warm-up.
.. code-block:: python
torch_lr_scheduler = ExponentialLR(optimizer=optimizer, gamma=0.98)
lr_values = [None] * 100
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10,
output_simulated_values=lr_values)
lr_values = np.array(lr_values)
# Plot simulated values
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
"""
if not isinstance(lr_scheduler, (ParamScheduler, _LRScheduler)):
raise TypeError("Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler or "
"ParamScheduler, but given {}".format(type(lr_scheduler)))
if isinstance(lr_scheduler, _LRScheduler):
lr_scheduler = LRScheduler(lr_scheduler)
dummy_optimizer = {}
warmup_scheduler = LinearCyclicalScheduler(dummy_optimizer, param_name="lr",
start_value=warmup_start_value,
end_value=warmup_end_value,
cycle_size=warmup_duration * 2)
warmup_scheduler.optimizer_param_groups = lr_scheduler.optimizer_param_groups
schedulers = [warmup_scheduler, lr_scheduler]
durations = [warmup_duration, ]
combined_scheduler = ConcatScheduler(schedulers, durations=durations,
save_history=save_history)
if output_simulated_values is not None:
if not isinstance(output_simulated_values, list):
raise TypeError("Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, "
"but given {}.".format(type(output_simulated_values)))
num_events = len(output_simulated_values)
result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations)
for i in range(num_events):
output_simulated_values[i] = result[i]
return combined_scheduler | Helper method to create a LR scheduler with a linear warm-up.
Args:
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): LR scheduler after
the warm-up.
warmup_start_value (float): LR start value of the warm-up phase.
warmup_end_value (float): LR end value of the warm-up phase.
warmup_duration (int): warm-up phase duration, number of events.
save_history (bool, optional): whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values (list, optional): optional output of simulated LR values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated LR values.
Returns:
ConcatScheduler: LR scheduler with linear warm-up.
.. code-block:: python
torch_lr_scheduler = ExponentialLR(optimizer=optimizer, gamma=0.98)
lr_values = [None] * 100
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10,
output_simulated_values=lr_values)
lr_values = np.array(lr_values)
# Plot simulated values
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler) | Below is the the instruction that describes the task:
### Input:
Helper method to create a LR scheduler with a linear warm-up.
Args:
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): LR scheduler after
the warm-up.
warmup_start_value (float): LR start value of the warm-up phase.
warmup_end_value (float): LR end value of the warm-up phase.
warmup_duration (int): warm-up phase duration, number of events.
save_history (bool, optional): whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values (list, optional): optional output of simulated LR values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated LR values.
Returns:
ConcatScheduler: LR scheduler with linear warm-up.
.. code-block:: python
torch_lr_scheduler = ExponentialLR(optimizer=optimizer, gamma=0.98)
lr_values = [None] * 100
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10,
output_simulated_values=lr_values)
lr_values = np.array(lr_values)
# Plot simulated values
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
### Response:
def create_lr_scheduler_with_warmup(lr_scheduler, warmup_start_value, warmup_end_value, warmup_duration,
save_history=False,
output_simulated_values=None):
"""
Helper method to create a LR scheduler with a linear warm-up.
Args:
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): LR scheduler after
the warm-up.
warmup_start_value (float): LR start value of the warm-up phase.
warmup_end_value (float): LR end value of the warm-up phase.
warmup_duration (int): warm-up phase duration, number of events.
save_history (bool, optional): whether to log the parameter values to
`engine.state.param_history`, (default=False).
output_simulated_values (list, optional): optional output of simulated LR values.
If output_simulated_values is a list of None, e.g. `[None] * 100`, after the execution it will be filled
by 100 simulated LR values.
Returns:
ConcatScheduler: LR scheduler with linear warm-up.
.. code-block:: python
torch_lr_scheduler = ExponentialLR(optimizer=optimizer, gamma=0.98)
lr_values = [None] * 100
scheduler = create_lr_scheduler_with_warmup(torch_lr_scheduler,
warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10,
output_simulated_values=lr_values)
lr_values = np.array(lr_values)
# Plot simulated values
plt.plot(lr_values[:, 0], lr_values[:, 1], label="learning rate")
# Attach to the trainer
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
"""
if not isinstance(lr_scheduler, (ParamScheduler, _LRScheduler)):
raise TypeError("Argument lr_scheduler should be a subclass of torch.optim.lr_scheduler._LRScheduler or "
"ParamScheduler, but given {}".format(type(lr_scheduler)))
if isinstance(lr_scheduler, _LRScheduler):
lr_scheduler = LRScheduler(lr_scheduler)
dummy_optimizer = {}
warmup_scheduler = LinearCyclicalScheduler(dummy_optimizer, param_name="lr",
start_value=warmup_start_value,
end_value=warmup_end_value,
cycle_size=warmup_duration * 2)
warmup_scheduler.optimizer_param_groups = lr_scheduler.optimizer_param_groups
schedulers = [warmup_scheduler, lr_scheduler]
durations = [warmup_duration, ]
combined_scheduler = ConcatScheduler(schedulers, durations=durations,
save_history=save_history)
if output_simulated_values is not None:
if not isinstance(output_simulated_values, list):
raise TypeError("Argument output_simulated_values should be a list of None, e.g. `[None] * 100`, "
"but given {}.".format(type(output_simulated_values)))
num_events = len(output_simulated_values)
result = ConcatScheduler.simulate_values(num_events=num_events, schedulers=schedulers, durations=durations)
for i in range(num_events):
output_simulated_values[i] = result[i]
return combined_scheduler |
def coords_on_grid(self, x, y):
""" Snap coordinates on the grid with integer coordinates """
if isinstance(x, float):
x = int(self._round(x))
if isinstance(y, float):
y = int(self._round(y))
if not self._y_coord_down:
y = self._extents - y
return x, y | Snap coordinates on the grid with integer coordinates | Below is the the instruction that describes the task:
### Input:
Snap coordinates on the grid with integer coordinates
### Response:
def coords_on_grid(self, x, y):
""" Snap coordinates on the grid with integer coordinates """
if isinstance(x, float):
x = int(self._round(x))
if isinstance(y, float):
y = int(self._round(y))
if not self._y_coord_down:
y = self._extents - y
return x, y |
def from_seed(cls, seed, encoder=encoding.RawEncoder):
"""
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
"""
# decode the seed
seed = encoder.decode(seed)
# Verify the given seed type and size are correct
if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
raise exc.TypeError(("PrivateKey seed must be a {0} bytes long "
"binary sequence").format(cls.SEED_SIZE)
)
# generate a raw keypair from the given seed
raw_pk, raw_sk = nacl.bindings.crypto_box_seed_keypair(seed)
# construct a instance from the raw secret key
return cls(raw_sk) | Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey` | Below is the the instruction that describes the task:
### Input:
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
### Response:
def from_seed(cls, seed, encoder=encoding.RawEncoder):
"""
Generate a PrivateKey using a deterministic construction
starting from a caller-provided seed
.. warning:: The seed **must** be high-entropy; therefore,
its generator **must** be a cryptographic quality
random function like, for example, :func:`~nacl.utils.random`.
.. warning:: The seed **must** be protected and remain secret.
Anyone who knows the seed is really in possession of
the corresponding PrivateKey.
:param seed: The seed used to generate the private key
:rtype: :class:`~nacl.public.PrivateKey`
"""
# decode the seed
seed = encoder.decode(seed)
# Verify the given seed type and size are correct
if not (isinstance(seed, bytes) and len(seed) == cls.SEED_SIZE):
raise exc.TypeError(("PrivateKey seed must be a {0} bytes long "
"binary sequence").format(cls.SEED_SIZE)
)
# generate a raw keypair from the given seed
raw_pk, raw_sk = nacl.bindings.crypto_box_seed_keypair(seed)
# construct a instance from the raw secret key
return cls(raw_sk) |
def split_coords_2d(seq):
"""
:param seq: a flat list with lons and lats
:returns: a validated list of pairs (lon, lat)
>>> split_coords_2d([1.1, 2.1, 2.2, 2.3])
[(1.1, 2.1), (2.2, 2.3)]
"""
lons, lats = [], []
for i, el in enumerate(seq):
if i % 2 == 0:
lons.append(valid.longitude(el))
elif i % 2 == 1:
lats.append(valid.latitude(el))
return list(zip(lons, lats)) | :param seq: a flat list with lons and lats
:returns: a validated list of pairs (lon, lat)
>>> split_coords_2d([1.1, 2.1, 2.2, 2.3])
[(1.1, 2.1), (2.2, 2.3)] | Below is the the instruction that describes the task:
### Input:
:param seq: a flat list with lons and lats
:returns: a validated list of pairs (lon, lat)
>>> split_coords_2d([1.1, 2.1, 2.2, 2.3])
[(1.1, 2.1), (2.2, 2.3)]
### Response:
def split_coords_2d(seq):
"""
:param seq: a flat list with lons and lats
:returns: a validated list of pairs (lon, lat)
>>> split_coords_2d([1.1, 2.1, 2.2, 2.3])
[(1.1, 2.1), (2.2, 2.3)]
"""
lons, lats = [], []
for i, el in enumerate(seq):
if i % 2 == 0:
lons.append(valid.longitude(el))
elif i % 2 == 1:
lats.append(valid.latitude(el))
return list(zip(lons, lats)) |
def _render(self, contexts, partials):
"""render partials"""
try:
partial = partials[self.value]
except KeyError as e:
return self._escape(EMPTYSTRING)
partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial)
return inner_render(partial, contexts, partials, self.delimiter) | render partials | Below is the the instruction that describes the task:
### Input:
render partials
### Response:
def _render(self, contexts, partials):
"""render partials"""
try:
partial = partials[self.value]
except KeyError as e:
return self._escape(EMPTYSTRING)
partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial)
return inner_render(partial, contexts, partials, self.delimiter) |
def create_args_parser(description):
""" Create a command-line arguments parser for OSPD. """
parser = argparse.ArgumentParser(description=description)
def network_port(string):
""" Check if provided string is a valid network port. """
value = int(string)
if not 0 < value <= 65535:
raise argparse.ArgumentTypeError(
'port must be in ]0,65535] interval')
return value
def cacert_file(cacert):
""" Check if provided file is a valid CA Certificate """
try:
context = ssl.create_default_context(cafile=cacert)
except AttributeError:
# Python version < 2.7.9
return cacert
except IOError:
raise argparse.ArgumentTypeError('CA Certificate not found')
try:
not_after = context.get_ca_certs()[0]['notAfter']
not_after = ssl.cert_time_to_seconds(not_after)
not_before = context.get_ca_certs()[0]['notBefore']
not_before = ssl.cert_time_to_seconds(not_before)
except (KeyError, IndexError):
raise argparse.ArgumentTypeError('CA Certificate is erroneous')
if not_after < int(time.time()):
raise argparse.ArgumentTypeError('CA Certificate expired')
if not_before > int(time.time()):
raise argparse.ArgumentTypeError('CA Certificate not active yet')
return cacert
def log_level(string):
""" Check if provided string is a valid log level. """
value = getattr(logging, string.upper(), None)
if not isinstance(value, int):
raise argparse.ArgumentTypeError(
'log level must be one of {debug,info,warning,error,critical}')
return value
def filename(string):
""" Check if provided string is a valid file path. """
if not os.path.isfile(string):
raise argparse.ArgumentTypeError(
'%s is not a valid file path' % string)
return string
parser.add_argument('-p', '--port', default=PORT, type=network_port,
help='TCP Port to listen on. Default: {0}'.format(PORT))
parser.add_argument('-b', '--bind-address', default=ADDRESS,
help='Address to listen on. Default: {0}'
.format(ADDRESS))
parser.add_argument('-u', '--unix-socket',
help='Unix file socket to listen on.')
parser.add_argument('-k', '--key-file', type=filename,
help='Server key file. Default: {0}'.format(KEY_FILE))
parser.add_argument('-c', '--cert-file', type=filename,
help='Server cert file. Default: {0}'.format(CERT_FILE))
parser.add_argument('--ca-file', type=cacert_file,
help='CA cert file. Default: {0}'.format(CA_FILE))
parser.add_argument('-L', '--log-level', default='warning', type=log_level,
help='Wished level of logging. Default: WARNING')
parser.add_argument('--foreground', action='store_true',
help='Run in foreground and logs all messages to console.')
parser.add_argument('-l', '--log-file', type=filename,
help='Path to the logging file.')
parser.add_argument('--version', action='store_true',
help='Print version then exit.')
return parser | Create a command-line arguments parser for OSPD. | Below is the the instruction that describes the task:
### Input:
Create a command-line arguments parser for OSPD.
### Response:
def create_args_parser(description):
""" Create a command-line arguments parser for OSPD. """
parser = argparse.ArgumentParser(description=description)
def network_port(string):
""" Check if provided string is a valid network port. """
value = int(string)
if not 0 < value <= 65535:
raise argparse.ArgumentTypeError(
'port must be in ]0,65535] interval')
return value
def cacert_file(cacert):
""" Check if provided file is a valid CA Certificate """
try:
context = ssl.create_default_context(cafile=cacert)
except AttributeError:
# Python version < 2.7.9
return cacert
except IOError:
raise argparse.ArgumentTypeError('CA Certificate not found')
try:
not_after = context.get_ca_certs()[0]['notAfter']
not_after = ssl.cert_time_to_seconds(not_after)
not_before = context.get_ca_certs()[0]['notBefore']
not_before = ssl.cert_time_to_seconds(not_before)
except (KeyError, IndexError):
raise argparse.ArgumentTypeError('CA Certificate is erroneous')
if not_after < int(time.time()):
raise argparse.ArgumentTypeError('CA Certificate expired')
if not_before > int(time.time()):
raise argparse.ArgumentTypeError('CA Certificate not active yet')
return cacert
def log_level(string):
""" Check if provided string is a valid log level. """
value = getattr(logging, string.upper(), None)
if not isinstance(value, int):
raise argparse.ArgumentTypeError(
'log level must be one of {debug,info,warning,error,critical}')
return value
def filename(string):
""" Check if provided string is a valid file path. """
if not os.path.isfile(string):
raise argparse.ArgumentTypeError(
'%s is not a valid file path' % string)
return string
parser.add_argument('-p', '--port', default=PORT, type=network_port,
help='TCP Port to listen on. Default: {0}'.format(PORT))
parser.add_argument('-b', '--bind-address', default=ADDRESS,
help='Address to listen on. Default: {0}'
.format(ADDRESS))
parser.add_argument('-u', '--unix-socket',
help='Unix file socket to listen on.')
parser.add_argument('-k', '--key-file', type=filename,
help='Server key file. Default: {0}'.format(KEY_FILE))
parser.add_argument('-c', '--cert-file', type=filename,
help='Server cert file. Default: {0}'.format(CERT_FILE))
parser.add_argument('--ca-file', type=cacert_file,
help='CA cert file. Default: {0}'.format(CA_FILE))
parser.add_argument('-L', '--log-level', default='warning', type=log_level,
help='Wished level of logging. Default: WARNING')
parser.add_argument('--foreground', action='store_true',
help='Run in foreground and logs all messages to console.')
parser.add_argument('-l', '--log-file', type=filename,
help='Path to the logging file.')
parser.add_argument('--version', action='store_true',
help='Print version then exit.')
return parser |
def dropEdges(grph, minWeight = - float('inf'), maxWeight = float('inf'), parameterName = 'weight', ignoreUnweighted = False, dropSelfLoops = False):
"""Modifies _grph_ by dropping edges whose weight is not within the inclusive bounds of _minWeight_ and _maxWeight_, i.e after running _grph_ will only have edges whose weights meet the following inequality: _minWeight_ <= edge's weight <= _maxWeight_. A `Keyerror` will be raised if the graph is unweighted unless _ignoreUnweighted_ is `True`, the weight is determined by examining the attribute _parameterName_.
**Note**: none of the default options will result in _grph_ being modified so only specify the relevant ones, e.g. `dropEdges(G, dropSelfLoops = True)` will remove only the self loops from `G`.
# Parameters
_grph_ : `networkx Graph`
> The graph to be modified.
_minWeight_ : `optional [int or double]`
> default `-inf`, the minimum weight for an edge to be kept in the graph.
_maxWeight_ : `optional [int or double]`
> default `inf`, the maximum weight for an edge to be kept in the graph.
_parameterName_ : `optional [str]`
> default `'weight'`, key to weight field in the edge's attribute dictionary, the default is the same as networkx and metaknowledge so is likely to be correct
_ignoreUnweighted_ : `optional [bool]`
> default `False`, if `True` unweighted edges will kept
_dropSelfLoops_ : `optional [bool]`
> default `False`, if `True` self loops will be removed regardless of their weight
"""
count = 0
total = len(grph.edges())
if metaknowledge.VERBOSE_MODE:
progArgs = (0, "Dropping edges")
progKwargs = {}
else:
progArgs = (0, "Dropping edges")
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if dropSelfLoops:
slps = list(grph.selfloop_edges())
PBar.updateVal(0, "Dropping self {} loops".format(len(slps)))
for e in slps:
grph.remove_edge(e[0], e[1])
edgesToDrop = []
if minWeight != - float('inf') or maxWeight != float('inf'):
for e in grph.edges(data = True):
try:
val = e[2][parameterName]
except KeyError:
if not ignoreUnweighted:
raise KeyError("One or more Edges do not have weight or " + str(parameterName), " is not the name of the weight")
else:
pass
else:
count += 1
if count % 100000 == 0:
PBar.updateVal(count/ total, str(count) + " edges analysed and " + str(total -len(grph.edges())) + " edges dropped")
if val > maxWeight or val < minWeight:
edgesToDrop.append((e[0], e[1]))
grph.remove_edges_from(edgesToDrop)
PBar.finish(str(total - len(grph.edges())) + " edges out of " + str(total) + " dropped, " + str(len(grph.edges())) + " returned") | Modifies _grph_ by dropping edges whose weight is not within the inclusive bounds of _minWeight_ and _maxWeight_, i.e after running _grph_ will only have edges whose weights meet the following inequality: _minWeight_ <= edge's weight <= _maxWeight_. A `Keyerror` will be raised if the graph is unweighted unless _ignoreUnweighted_ is `True`, the weight is determined by examining the attribute _parameterName_.
**Note**: none of the default options will result in _grph_ being modified so only specify the relevant ones, e.g. `dropEdges(G, dropSelfLoops = True)` will remove only the self loops from `G`.
# Parameters
_grph_ : `networkx Graph`
> The graph to be modified.
_minWeight_ : `optional [int or double]`
> default `-inf`, the minimum weight for an edge to be kept in the graph.
_maxWeight_ : `optional [int or double]`
> default `inf`, the maximum weight for an edge to be kept in the graph.
_parameterName_ : `optional [str]`
> default `'weight'`, key to weight field in the edge's attribute dictionary, the default is the same as networkx and metaknowledge so is likely to be correct
_ignoreUnweighted_ : `optional [bool]`
> default `False`, if `True` unweighted edges will kept
_dropSelfLoops_ : `optional [bool]`
> default `False`, if `True` self loops will be removed regardless of their weight | Below is the the instruction that describes the task:
### Input:
Modifies _grph_ by dropping edges whose weight is not within the inclusive bounds of _minWeight_ and _maxWeight_, i.e after running _grph_ will only have edges whose weights meet the following inequality: _minWeight_ <= edge's weight <= _maxWeight_. A `Keyerror` will be raised if the graph is unweighted unless _ignoreUnweighted_ is `True`, the weight is determined by examining the attribute _parameterName_.
**Note**: none of the default options will result in _grph_ being modified so only specify the relevant ones, e.g. `dropEdges(G, dropSelfLoops = True)` will remove only the self loops from `G`.
# Parameters
_grph_ : `networkx Graph`
> The graph to be modified.
_minWeight_ : `optional [int or double]`
> default `-inf`, the minimum weight for an edge to be kept in the graph.
_maxWeight_ : `optional [int or double]`
> default `inf`, the maximum weight for an edge to be kept in the graph.
_parameterName_ : `optional [str]`
> default `'weight'`, key to weight field in the edge's attribute dictionary, the default is the same as networkx and metaknowledge so is likely to be correct
_ignoreUnweighted_ : `optional [bool]`
> default `False`, if `True` unweighted edges will kept
_dropSelfLoops_ : `optional [bool]`
> default `False`, if `True` self loops will be removed regardless of their weight
### Response:
def dropEdges(grph, minWeight = - float('inf'), maxWeight = float('inf'), parameterName = 'weight', ignoreUnweighted = False, dropSelfLoops = False):
"""Modifies _grph_ by dropping edges whose weight is not within the inclusive bounds of _minWeight_ and _maxWeight_, i.e after running _grph_ will only have edges whose weights meet the following inequality: _minWeight_ <= edge's weight <= _maxWeight_. A `Keyerror` will be raised if the graph is unweighted unless _ignoreUnweighted_ is `True`, the weight is determined by examining the attribute _parameterName_.
**Note**: none of the default options will result in _grph_ being modified so only specify the relevant ones, e.g. `dropEdges(G, dropSelfLoops = True)` will remove only the self loops from `G`.
# Parameters
_grph_ : `networkx Graph`
> The graph to be modified.
_minWeight_ : `optional [int or double]`
> default `-inf`, the minimum weight for an edge to be kept in the graph.
_maxWeight_ : `optional [int or double]`
> default `inf`, the maximum weight for an edge to be kept in the graph.
_parameterName_ : `optional [str]`
> default `'weight'`, key to weight field in the edge's attribute dictionary, the default is the same as networkx and metaknowledge so is likely to be correct
_ignoreUnweighted_ : `optional [bool]`
> default `False`, if `True` unweighted edges will kept
_dropSelfLoops_ : `optional [bool]`
> default `False`, if `True` self loops will be removed regardless of their weight
"""
count = 0
total = len(grph.edges())
if metaknowledge.VERBOSE_MODE:
progArgs = (0, "Dropping edges")
progKwargs = {}
else:
progArgs = (0, "Dropping edges")
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if dropSelfLoops:
slps = list(grph.selfloop_edges())
PBar.updateVal(0, "Dropping self {} loops".format(len(slps)))
for e in slps:
grph.remove_edge(e[0], e[1])
edgesToDrop = []
if minWeight != - float('inf') or maxWeight != float('inf'):
for e in grph.edges(data = True):
try:
val = e[2][parameterName]
except KeyError:
if not ignoreUnweighted:
raise KeyError("One or more Edges do not have weight or " + str(parameterName), " is not the name of the weight")
else:
pass
else:
count += 1
if count % 100000 == 0:
PBar.updateVal(count/ total, str(count) + " edges analysed and " + str(total -len(grph.edges())) + " edges dropped")
if val > maxWeight or val < minWeight:
edgesToDrop.append((e[0], e[1]))
grph.remove_edges_from(edgesToDrop)
PBar.finish(str(total - len(grph.edges())) + " edges out of " + str(total) + " dropped, " + str(len(grph.edges())) + " returned") |
def import_contacts(self, email, password, include_name=False):
"""
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
"""
data = {'email': email,
'password': password}
if include_name:
data['names'] = 1
return self.api_post('contacts', data) | Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail. | Below is the the instruction that describes the task:
### Input:
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
### Response:
def import_contacts(self, email, password, include_name=False):
"""
Fetch email contacts from a user's address book on one of the major email websites. Currently supports AOL, Gmail, Hotmail, and Yahoo! Mail.
"""
data = {'email': email,
'password': password}
if include_name:
data['names'] = 1
return self.api_post('contacts', data) |
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:T
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
"""
if not self.isNonAxi:
phi= 0.
self._compute_xyzforces(R,z,phi,t)
return np.cos(phi)*self._cached_Fx+np.sin(phi)*self._cached_Fy | NAME:
_Rforce
PURPOSE:T
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force | Below is the the instruction that describes the task:
### Input:
NAME:
_Rforce
PURPOSE:T
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
### Response:
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:T
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
"""
if not self.isNonAxi:
phi= 0.
self._compute_xyzforces(R,z,phi,t)
return np.cos(phi)*self._cached_Fx+np.sin(phi)*self._cached_Fy |
def cmd_arp_ping(ip, iface, verbose):
"""
Send ARP packets to check if a host it's alive in the local network.
Example:
\b
# habu.arp.ping 192.168.0.1
Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
res, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2)
for _, pkt in res:
if verbose:
print(pkt.show())
else:
print(pkt.summary()) | Send ARP packets to check if a host it's alive in the local network.
Example:
\b
# habu.arp.ping 192.168.0.1
Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding | Below is the the instruction that describes the task:
### Input:
Send ARP packets to check if a host it's alive in the local network.
Example:
\b
# habu.arp.ping 192.168.0.1
Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding
### Response:
def cmd_arp_ping(ip, iface, verbose):
"""
Send ARP packets to check if a host it's alive in the local network.
Example:
\b
# habu.arp.ping 192.168.0.1
Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
res, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2)
for _, pkt in res:
if verbose:
print(pkt.show())
else:
print(pkt.summary()) |
def get_request_body(self):
"""
Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax.
"""
if self._request_body is None:
settings = get_settings(self.application, force_instance=True)
content_type_header = headers.parse_content_type(
self.request.headers.get('Content-Type',
settings.default_content_type))
content_type = '/'.join([content_type_header.content_type,
content_type_header.content_subtype])
if content_type_header.content_suffix is not None:
content_type = '+'.join([content_type,
content_type_header.content_suffix])
try:
handler = settings[content_type]
except KeyError:
raise web.HTTPError(415, 'cannot decode body of type %s',
content_type)
try:
self._request_body = handler.from_bytes(self.request.body)
except Exception:
self._logger.exception('failed to decode request body')
raise web.HTTPError(400, 'failed to decode request')
return self._request_body | Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax. | Below is the the instruction that describes the task:
### Input:
Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax.
### Response:
def get_request_body(self):
"""
Fetch (and cache) the request body as a dictionary.
:raise web.HTTPError:
- if the content type cannot be matched, then the status code
is set to 415 Unsupported Media Type.
- if decoding the content body fails, then the status code is
set to 400 Bad Syntax.
"""
if self._request_body is None:
settings = get_settings(self.application, force_instance=True)
content_type_header = headers.parse_content_type(
self.request.headers.get('Content-Type',
settings.default_content_type))
content_type = '/'.join([content_type_header.content_type,
content_type_header.content_subtype])
if content_type_header.content_suffix is not None:
content_type = '+'.join([content_type,
content_type_header.content_suffix])
try:
handler = settings[content_type]
except KeyError:
raise web.HTTPError(415, 'cannot decode body of type %s',
content_type)
try:
self._request_body = handler.from_bytes(self.request.body)
except Exception:
self._logger.exception('failed to decode request body')
raise web.HTTPError(400, 'failed to decode request')
return self._request_body |
def remove_token(self, *, payer_id, credit_card_token_id):
"""
This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns:
"""
payload = {
"language": self.client.language.value,
"command": PaymentCommand.REMOVE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"removeCreditCardToken": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) | This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns: | Below is the the instruction that describes the task:
### Input:
This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns:
### Response:
def remove_token(self, *, payer_id, credit_card_token_id):
"""
This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns:
"""
payload = {
"language": self.client.language.value,
"command": PaymentCommand.REMOVE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"removeCreditCardToken": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload) |
def main():
"""Main method."""
args = parse_cmd_arguments()
html_file = args.file
try:
json.loads(args.add_tags or '{}')
json.loads(args.exc_tags or '{}')
except ValueError:
print('\033[91m' + 'Invalid json string: please provide a valid json '
'string e.g {}'.format('\'{"img": "data-url"}\'') + '\033[0m')
sys.exit(1)
staticfied = staticfy(html_file, args=args).encode('utf-8')
file_ops(staticfied, args=args) | Main method. | Below is the the instruction that describes the task:
### Input:
Main method.
### Response:
def main():
"""Main method."""
args = parse_cmd_arguments()
html_file = args.file
try:
json.loads(args.add_tags or '{}')
json.loads(args.exc_tags or '{}')
except ValueError:
print('\033[91m' + 'Invalid json string: please provide a valid json '
'string e.g {}'.format('\'{"img": "data-url"}\'') + '\033[0m')
sys.exit(1)
staticfied = staticfy(html_file, args=args).encode('utf-8')
file_ops(staticfied, args=args) |
def getData(self, pos):
"""
Returns dictionary with input and target given pos.
"""
retval = {}
if pos >= len(self.inputs):
raise IndexError('getData() pattern beyond range.', pos)
if self.verbosity >= 1: print("Getting input", pos, "...")
if len(self.inputMap) == 0:
if type(self.inputs[pos]) == dict: # allow inputs to be a dict
retval.update(self.inputs[pos])
else:
retval[self.layers[0].name] = self.inputs[pos]
else: # mapInput set manually
for vals in self.inputMap:
(name, offset) = vals
retval[name] = self.getDataMap("input", pos, name, offset)
if self.verbosity > 1: print("Loading target", pos, "...")
if len(self.targets) == 0:
pass # ok, no targets
elif len(self.targetMap) == 0:
if type(self.targets[pos]) == dict: # allow targets to be a dict
retval.update(self.targets[pos])
else:
retval[self.layers[len(self.layers)-1].name] = self.targets[pos]
else: # set manually
for vals in self.targetMap:
(name, offset) = vals
retval[name] = self.getDataMap("target", pos, name, offset)
return retval | Returns dictionary with input and target given pos. | Below is the the instruction that describes the task:
### Input:
Returns dictionary with input and target given pos.
### Response:
def getData(self, pos):
"""
Returns dictionary with input and target given pos.
"""
retval = {}
if pos >= len(self.inputs):
raise IndexError('getData() pattern beyond range.', pos)
if self.verbosity >= 1: print("Getting input", pos, "...")
if len(self.inputMap) == 0:
if type(self.inputs[pos]) == dict: # allow inputs to be a dict
retval.update(self.inputs[pos])
else:
retval[self.layers[0].name] = self.inputs[pos]
else: # mapInput set manually
for vals in self.inputMap:
(name, offset) = vals
retval[name] = self.getDataMap("input", pos, name, offset)
if self.verbosity > 1: print("Loading target", pos, "...")
if len(self.targets) == 0:
pass # ok, no targets
elif len(self.targetMap) == 0:
if type(self.targets[pos]) == dict: # allow targets to be a dict
retval.update(self.targets[pos])
else:
retval[self.layers[len(self.layers)-1].name] = self.targets[pos]
else: # set manually
for vals in self.targetMap:
(name, offset) = vals
retval[name] = self.getDataMap("target", pos, name, offset)
return retval |
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM | Indicate if this Monitor is currently recording. | Below is the the instruction that describes the task:
### Input:
Indicate if this Monitor is currently recording.
### Response:
def is_recording(self) -> Optional[bool]:
"""Indicate if this Monitor is currently recording."""
status_response = self._client.get_state(
'api/monitors/alarm/id:{}/command:status.json'.format(
self._monitor_id
)
)
if not status_response:
_LOGGER.warning('Could not get status for monitor {}'.format(
self._monitor_id
))
return None
status = status_response.get('status')
# ZoneMinder API returns an empty string to indicate that this monitor
# cannot record right now
if status == '':
return False
return int(status) == STATE_ALARM |
def run(self, hosts, function, attempts=1):
"""
Add the given function to a queue, and call it once for each host
according to the threading options.
Use decorators.bind() if you also want to pass additional
arguments to the callback function.
Returns an object that represents the queued task, and that may be
passed to is_completed() to check the status.
:type hosts: string|list(string)|Host|list(Host)
:param hosts: A hostname or Host object, or a list of them.
:type function: function
:param function: The function to execute.
:type attempts: int
:param attempts: The number of attempts on failure.
:rtype: object
:return: An object representing the task.
"""
return self._run(hosts, function, self.workqueue.enqueue, attempts) | Add the given function to a queue, and call it once for each host
according to the threading options.
Use decorators.bind() if you also want to pass additional
arguments to the callback function.
Returns an object that represents the queued task, and that may be
passed to is_completed() to check the status.
:type hosts: string|list(string)|Host|list(Host)
:param hosts: A hostname or Host object, or a list of them.
:type function: function
:param function: The function to execute.
:type attempts: int
:param attempts: The number of attempts on failure.
:rtype: object
:return: An object representing the task. | Below is the the instruction that describes the task:
### Input:
Add the given function to a queue, and call it once for each host
according to the threading options.
Use decorators.bind() if you also want to pass additional
arguments to the callback function.
Returns an object that represents the queued task, and that may be
passed to is_completed() to check the status.
:type hosts: string|list(string)|Host|list(Host)
:param hosts: A hostname or Host object, or a list of them.
:type function: function
:param function: The function to execute.
:type attempts: int
:param attempts: The number of attempts on failure.
:rtype: object
:return: An object representing the task.
### Response:
def run(self, hosts, function, attempts=1):
"""
Add the given function to a queue, and call it once for each host
according to the threading options.
Use decorators.bind() if you also want to pass additional
arguments to the callback function.
Returns an object that represents the queued task, and that may be
passed to is_completed() to check the status.
:type hosts: string|list(string)|Host|list(Host)
:param hosts: A hostname or Host object, or a list of them.
:type function: function
:param function: The function to execute.
:type attempts: int
:param attempts: The number of attempts on failure.
:rtype: object
:return: An object representing the task.
"""
return self._run(hosts, function, self.workqueue.enqueue, attempts) |
def get_color_theme(theme):
'''
Return the color theme to use
'''
# Keep the heavy lifting out of the module space
import salt.utils.data
import salt.utils.files
import salt.utils.yaml
if not os.path.isfile(theme):
log.warning('The named theme %s if not available', theme)
try:
with salt.utils.files.fopen(theme, 'rb') as fp_:
colors = salt.utils.data.decode(salt.utils.yaml.safe_load(fp_))
ret = {}
for color in colors:
ret[color] = '\033[{0}m'.format(colors[color])
if not isinstance(colors, dict):
log.warning('The theme file %s is not a dict', theme)
return {}
return ret
except Exception:
log.warning('Failed to read the color theme %s', theme)
return {} | Return the color theme to use | Below is the the instruction that describes the task:
### Input:
Return the color theme to use
### Response:
def get_color_theme(theme):
'''
Return the color theme to use
'''
# Keep the heavy lifting out of the module space
import salt.utils.data
import salt.utils.files
import salt.utils.yaml
if not os.path.isfile(theme):
log.warning('The named theme %s if not available', theme)
try:
with salt.utils.files.fopen(theme, 'rb') as fp_:
colors = salt.utils.data.decode(salt.utils.yaml.safe_load(fp_))
ret = {}
for color in colors:
ret[color] = '\033[{0}m'.format(colors[color])
if not isinstance(colors, dict):
log.warning('The theme file %s is not a dict', theme)
return {}
return ret
except Exception:
log.warning('Failed to read the color theme %s', theme)
return {} |
def parseDEI(self,
xbrl,
ignore_errors=0):
"""
Parse DEI from our XBRL soup and return a DEI object.
"""
dei_obj = DEI()
if ignore_errors == 2:
logging.basicConfig(filename='/tmp/xbrl.log',
level=logging.ERROR,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
else:
logger = None
trading_symbol = xbrl.find_all(name=re.compile("(dei:tradingsymbol)",
re.IGNORECASE | re.MULTILINE))
dei_obj.trading_symbol = \
self.data_processing(trading_symbol, xbrl,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
company_name = xbrl.find_all(name=re.compile("(dei:entityregistrantname)",
re.IGNORECASE | re.MULTILINE))
dei_obj.company_name = \
self.data_processing(company_name, xbrl,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
shares_outstanding = xbrl.find_all(name=re.compile("(dei:entitycommonstocksharesoutstanding)",
re.IGNORECASE | re.MULTILINE))
dei_obj.shares_outstanding = \
self.data_processing(shares_outstanding, xbrl,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
public_float = xbrl.find_all(name=re.compile("(dei:entitypublicfloat)",
re.IGNORECASE | re.MULTILINE))
dei_obj.public_float = \
self.data_processing(public_float, xbrl,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
return dei_obj | Parse DEI from our XBRL soup and return a DEI object. | Below is the the instruction that describes the task:
### Input:
Parse DEI from our XBRL soup and return a DEI object.
### Response:
def parseDEI(self,
xbrl,
ignore_errors=0):
"""
Parse DEI from our XBRL soup and return a DEI object.
"""
dei_obj = DEI()
if ignore_errors == 2:
logging.basicConfig(filename='/tmp/xbrl.log',
level=logging.ERROR,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
else:
logger = None
trading_symbol = xbrl.find_all(name=re.compile("(dei:tradingsymbol)",
re.IGNORECASE | re.MULTILINE))
dei_obj.trading_symbol = \
self.data_processing(trading_symbol, xbrl,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
company_name = xbrl.find_all(name=re.compile("(dei:entityregistrantname)",
re.IGNORECASE | re.MULTILINE))
dei_obj.company_name = \
self.data_processing(company_name, xbrl,
ignore_errors, logger,
options={'type': 'String',
'no_context': True})
shares_outstanding = xbrl.find_all(name=re.compile("(dei:entitycommonstocksharesoutstanding)",
re.IGNORECASE | re.MULTILINE))
dei_obj.shares_outstanding = \
self.data_processing(shares_outstanding, xbrl,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
public_float = xbrl.find_all(name=re.compile("(dei:entitypublicfloat)",
re.IGNORECASE | re.MULTILINE))
dei_obj.public_float = \
self.data_processing(public_float, xbrl,
ignore_errors, logger,
options={'type': 'Number',
'no_context': True})
return dei_obj |
def copy(self, props=None, value=None):
"""
Copy the Overlay possibly overriding props.
"""
return Overlay(self.text,
(self.start, self.end),
props=props or self.props,
value=value or self.value) | Copy the Overlay possibly overriding props. | Below is the the instruction that describes the task:
### Input:
Copy the Overlay possibly overriding props.
### Response:
def copy(self, props=None, value=None):
"""
Copy the Overlay possibly overriding props.
"""
return Overlay(self.text,
(self.start, self.end),
props=props or self.props,
value=value or self.value) |
def kernel_restarted_message(self, msg):
"""Show kernel restarted/died messages."""
if not self.is_error_shown:
# If there are kernel creation errors, jupyter_client will
# try to restart the kernel and qtconsole prints a
# message about it.
# So we read the kernel's stderr_file and display its
# contents in the client instead of the usual message shown
# by qtconsole.
try:
stderr = self._read_stderr()
except Exception:
stderr = None
if stderr:
self.show_kernel_error('<tt>%s</tt>' % stderr)
else:
self.shellwidget._append_html("<br>%s<hr><br>" % msg,
before_prompt=False) | Show kernel restarted/died messages. | Below is the the instruction that describes the task:
### Input:
Show kernel restarted/died messages.
### Response:
def kernel_restarted_message(self, msg):
"""Show kernel restarted/died messages."""
if not self.is_error_shown:
# If there are kernel creation errors, jupyter_client will
# try to restart the kernel and qtconsole prints a
# message about it.
# So we read the kernel's stderr_file and display its
# contents in the client instead of the usual message shown
# by qtconsole.
try:
stderr = self._read_stderr()
except Exception:
stderr = None
if stderr:
self.show_kernel_error('<tt>%s</tt>' % stderr)
else:
self.shellwidget._append_html("<br>%s<hr><br>" % msg,
before_prompt=False) |
def protein_header_split_generator(elements, headers, ns):
"""Loop through proteins of each PSM/peptide. If a protein does not
match any of headers, discard PSM/peptide immediately"""
for el in elements:
header_not_matching = False
for protein in el.findall('{%s}protein_id' % ns['xmlns']):
if not any((re.search(h, protein.text) for h in headers)):
header_not_matching = True
break
if header_not_matching:
formatting.clear_el(el)
else:
yield formatting.string_and_clear(el, ns) | Loop through proteins of each PSM/peptide. If a protein does not
match any of headers, discard PSM/peptide immediately | Below is the the instruction that describes the task:
### Input:
Loop through proteins of each PSM/peptide. If a protein does not
match any of headers, discard PSM/peptide immediately
### Response:
def protein_header_split_generator(elements, headers, ns):
"""Loop through proteins of each PSM/peptide. If a protein does not
match any of headers, discard PSM/peptide immediately"""
for el in elements:
header_not_matching = False
for protein in el.findall('{%s}protein_id' % ns['xmlns']):
if not any((re.search(h, protein.text) for h in headers)):
header_not_matching = True
break
if header_not_matching:
formatting.clear_el(el)
else:
yield formatting.string_and_clear(el, ns) |
def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2 | dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...] | Below is the the instruction that describes the task:
### Input:
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
### Response:
def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2 |
def ssh_sa_ssh_server_ssh_vrf_cont_use_vrf_use_vrf_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
ssh_vrf_cont = ET.SubElement(server, "ssh-vrf-cont")
use_vrf = ET.SubElement(ssh_vrf_cont, "use-vrf")
use_vrf_name = ET.SubElement(use_vrf, "use-vrf-name")
use_vrf_name.text = kwargs.pop('use_vrf_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ssh_sa_ssh_server_ssh_vrf_cont_use_vrf_use_vrf_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ssh_sa = ET.SubElement(config, "ssh-sa", xmlns="urn:brocade.com:mgmt:brocade-sec-services")
ssh = ET.SubElement(ssh_sa, "ssh")
server = ET.SubElement(ssh, "server")
ssh_vrf_cont = ET.SubElement(server, "ssh-vrf-cont")
use_vrf = ET.SubElement(ssh_vrf_cont, "use-vrf")
use_vrf_name = ET.SubElement(use_vrf, "use-vrf-name")
use_vrf_name.text = kwargs.pop('use_vrf_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _matrix_adjust(self, X):
"""Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
"""
data_matrix = X.data if sparse.issparse(X) else X
# Shift all values to specially encode for NAN/infinity/OTHER and 0
# Old value New Value
# --------- ---------
# N (0..int_max) N + 3
# np.NaN 2
# infinity 2
# *other* 1
#
# A value of 0 is reserved, as that is specially handled in sparse
# matrices.
data_matrix += len(SPARSE_ENCODINGS) + 1
data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN']
return X | Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities. | Below is the the instruction that describes the task:
### Input:
Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
### Response:
def _matrix_adjust(self, X):
"""Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
"""
data_matrix = X.data if sparse.issparse(X) else X
# Shift all values to specially encode for NAN/infinity/OTHER and 0
# Old value New Value
# --------- ---------
# N (0..int_max) N + 3
# np.NaN 2
# infinity 2
# *other* 1
#
# A value of 0 is reserved, as that is specially handled in sparse
# matrices.
data_matrix += len(SPARSE_ENCODINGS) + 1
data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN']
return X |
def inverse(self):
"""
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
"""
inv = self.__class__()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
if (inv.has_edge(each)):
inv.del_edge(each)
return inv | Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph. | Below is the the instruction that describes the task:
### Input:
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
### Response:
def inverse(self):
"""
Return the inverse of the graph.
@rtype: graph
@return: Complement graph for the graph.
"""
inv = self.__class__()
inv.add_nodes(self.nodes())
inv.complete()
for each in self.edges():
if (inv.has_edge(each)):
inv.del_edge(each)
return inv |
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text) | Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters. | Below is the the instruction that describes the task:
### Input:
Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
### Response:
def replace_characters(self, text, characters, replacement=''):
"""Remove characters from text.
Removes custom characters from input text or replaces them
with a string if specified.
Args:
text: The text to be processed.
characters: Characters that will be replaced.
replacement: New text that will replace the custom characters.
Returns:
The text without the given characters.
"""
if not characters:
return text
characters = ''.join(sorted(characters))
if characters in self._characters_regexes:
characters_regex = self._characters_regexes[characters]
else:
characters_regex = re.compile("[%s]" % re.escape(characters))
self._characters_regexes[characters] = characters_regex
return characters_regex.sub(replacement, text) |
def pop(stack, op_id):
"""Pop a value from the stack (i.e. read it from the tape).
Args:
stack: The stack to pop from.
op_id: A unique variable that is also passed into the matching push.
Allows optimization passes to track pairs of pushes and pops.
Returns:
The last value.
"""
if __debug__:
pushed_value, pushed_op_id = stack.pop()
assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id)
else:
pushed_value = stack.pop()
return pushed_value | Pop a value from the stack (i.e. read it from the tape).
Args:
stack: The stack to pop from.
op_id: A unique variable that is also passed into the matching push.
Allows optimization passes to track pairs of pushes and pops.
Returns:
The last value. | Below is the the instruction that describes the task:
### Input:
Pop a value from the stack (i.e. read it from the tape).
Args:
stack: The stack to pop from.
op_id: A unique variable that is also passed into the matching push.
Allows optimization passes to track pairs of pushes and pops.
Returns:
The last value.
### Response:
def pop(stack, op_id):
"""Pop a value from the stack (i.e. read it from the tape).
Args:
stack: The stack to pop from.
op_id: A unique variable that is also passed into the matching push.
Allows optimization passes to track pairs of pushes and pops.
Returns:
The last value.
"""
if __debug__:
pushed_value, pushed_op_id = stack.pop()
assert pushed_op_id == op_id, 'Wanted %s, got %s' % (op_id, pushed_op_id)
else:
pushed_value = stack.pop()
return pushed_value |
def list_group_maintainers(self, name):
"""
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
return self.project_service.list_group_maintainers(name) | Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure. | Below is the the instruction that describes the task:
### Input:
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
### Response:
def list_group_maintainers(self, name):
"""
Get the maintainers of a group.
Args:
name (string): Name of group to query.
Returns:
(list[string]): List of maintainer names.
Raises:
requests.HTTPError on failure.
"""
self.project_service.set_auth(self._token_project)
return self.project_service.list_group_maintainers(name) |
def click(self, force_no_call=False, milis=None):
"""
Call when the button is pressed. This start the callback function in a thread
If :milis is given, will release the button after :milis miliseconds
"""
if self.clicked:
return False
if not force_no_call and self.flags & self.CALL_ON_PRESS:
if self.flags & self.THREADED_CALL:
start_new_thread(self.func, ())
else:
self.func()
super().click()
if milis is not None:
start_new_thread(self.release, (), {'milis': milis}) | Call when the button is pressed. This start the callback function in a thread
If :milis is given, will release the button after :milis miliseconds | Below is the the instruction that describes the task:
### Input:
Call when the button is pressed. This start the callback function in a thread
If :milis is given, will release the button after :milis miliseconds
### Response:
def click(self, force_no_call=False, milis=None):
"""
Call when the button is pressed. This start the callback function in a thread
If :milis is given, will release the button after :milis miliseconds
"""
if self.clicked:
return False
if not force_no_call and self.flags & self.CALL_ON_PRESS:
if self.flags & self.THREADED_CALL:
start_new_thread(self.func, ())
else:
self.func()
super().click()
if milis is not None:
start_new_thread(self.release, (), {'milis': milis}) |
def find_one(cls, pattern, string, flags=0):
"""JS-like match object. Use index number to get groups, if not match or no group, will return ''.
Basic Usage::
>>> from torequests.utils import find_one
>>> string = "abcd"
>>> find_one("a.*", string)
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> find_one("a.*", string)[0]
'abcd'
>>> find_one("a.*", string)[1]
''
>>> find_one("a(.)", string)[0]
'ab'
>>> find_one("a(.)", string)[1]
'b'
>>> find_one("a(.)", string)[2] or "default"
'default'
>>> import re
>>> item = find_one("a(B)(C)", string, flags=re.I | re.S)
>>> item
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> item[0]
'abc'
>>> item[1]
'b'
>>> item[2]
'c'
>>> item[3]
''
>>> # import re
>>> # re.findone = find_one
>>> register_re_findone()
>>> re.findone('a(b)', 'abcd')[1] or 'default'
'b'
"""
item = re.search(pattern, string, flags=flags)
return cls(item) | JS-like match object. Use index number to get groups, if not match or no group, will return ''.
Basic Usage::
>>> from torequests.utils import find_one
>>> string = "abcd"
>>> find_one("a.*", string)
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> find_one("a.*", string)[0]
'abcd'
>>> find_one("a.*", string)[1]
''
>>> find_one("a(.)", string)[0]
'ab'
>>> find_one("a(.)", string)[1]
'b'
>>> find_one("a(.)", string)[2] or "default"
'default'
>>> import re
>>> item = find_one("a(B)(C)", string, flags=re.I | re.S)
>>> item
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> item[0]
'abc'
>>> item[1]
'b'
>>> item[2]
'c'
>>> item[3]
''
>>> # import re
>>> # re.findone = find_one
>>> register_re_findone()
>>> re.findone('a(b)', 'abcd')[1] or 'default'
'b' | Below is the the instruction that describes the task:
### Input:
JS-like match object. Use index number to get groups, if not match or no group, will return ''.
Basic Usage::
>>> from torequests.utils import find_one
>>> string = "abcd"
>>> find_one("a.*", string)
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> find_one("a.*", string)[0]
'abcd'
>>> find_one("a.*", string)[1]
''
>>> find_one("a(.)", string)[0]
'ab'
>>> find_one("a(.)", string)[1]
'b'
>>> find_one("a(.)", string)[2] or "default"
'default'
>>> import re
>>> item = find_one("a(B)(C)", string, flags=re.I | re.S)
>>> item
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> item[0]
'abc'
>>> item[1]
'b'
>>> item[2]
'c'
>>> item[3]
''
>>> # import re
>>> # re.findone = find_one
>>> register_re_findone()
>>> re.findone('a(b)', 'abcd')[1] or 'default'
'b'
### Response:
def find_one(cls, pattern, string, flags=0):
"""JS-like match object. Use index number to get groups, if not match or no group, will return ''.
Basic Usage::
>>> from torequests.utils import find_one
>>> string = "abcd"
>>> find_one("a.*", string)
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> find_one("a.*", string)[0]
'abcd'
>>> find_one("a.*", string)[1]
''
>>> find_one("a(.)", string)[0]
'ab'
>>> find_one("a(.)", string)[1]
'b'
>>> find_one("a(.)", string)[2] or "default"
'default'
>>> import re
>>> item = find_one("a(B)(C)", string, flags=re.I | re.S)
>>> item
<torequests.utils.RegMatch object at 0x0705F1D0>
>>> item[0]
'abc'
>>> item[1]
'b'
>>> item[2]
'c'
>>> item[3]
''
>>> # import re
>>> # re.findone = find_one
>>> register_re_findone()
>>> re.findone('a(b)', 'abcd')[1] or 'default'
'b'
"""
item = re.search(pattern, string, flags=flags)
return cls(item) |
def get_usage(self, start=None, end=None):
"""
Return the usage records for this load balancer. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on
the end time. These times should be Python datetime.datetime objects,
Python datetime.date objects, or strings in the format:
"YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD".
"""
return self.manager.get_usage(self, start=start, end=end) | Return the usage records for this load balancer. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on
the end time. These times should be Python datetime.datetime objects,
Python datetime.date objects, or strings in the format:
"YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD". | Below is the the instruction that describes the task:
### Input:
Return the usage records for this load balancer. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on
the end time. These times should be Python datetime.datetime objects,
Python datetime.date objects, or strings in the format:
"YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD".
### Response:
def get_usage(self, start=None, end=None):
"""
Return the usage records for this load balancer. You may optionally
include a start datetime or an end datetime, or both, which will limit
the records to those on or after the start time, and those before or on
the end time. These times should be Python datetime.datetime objects,
Python datetime.date objects, or strings in the format:
"YYYY-MM-DD HH:MM:SS" or "YYYY-MM-DD".
"""
return self.manager.get_usage(self, start=start, end=end) |
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3) | Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]} | Below is the the instruction that describes the task:
### Input:
Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
### Response:
def _generateModel1(numCategories):
""" Generate the initial, first order, and second order transition
probabilities for 'model1'. For this model, we generate the following
set of sequences:
0-10-15 (1X)
0-11-16 (1X)
0-12-17 (1X)
0-13-18 (1X)
0-14-19 (1X)
1-10-20 (1X)
1-11-21 (1X)
1-12-22 (1X)
1-13-23 (1X)
1-14-24 (1X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]}
"""
# --------------------------------------------------------------------
# Initial probabilities, 0 and 1 equally likely
initProb = numpy.zeros(numCategories)
initProb[0] = 0.5
initProb[1] = 0.5
# --------------------------------------------------------------------
# 1st order transitions
# both 0 and 1 should lead to 10,11,12,13,14 with equal probability
firstOrder = dict()
for catIdx in range(numCategories):
key = str([catIdx])
probs = numpy.ones(numCategories) / numCategories
if catIdx == 0 or catIdx == 1:
indices = numpy.array([10,11,12,13,14])
probs.fill(0)
probs[indices] = 1.0 # lead only to b
probs /= probs.sum()
firstOrder[key] = probs
# --------------------------------------------------------------------
# 2nd order transitions
# 0-10 should lead to 15
# 0-11 to 16
# ...
# 1-10 should lead to 20
# 1-11 shold lean to 21
# ...
secondOrder = dict()
for firstIdx in range(numCategories):
for secondIdx in range(numCategories):
key = str([firstIdx, secondIdx])
probs = numpy.ones(numCategories) / numCategories
if key == str([0,10]):
probs.fill(0)
probs[15] = 1
elif key == str([0,11]):
probs.fill(0)
probs[16] = 1
elif key == str([0,12]):
probs.fill(0)
probs[17] = 1
elif key == str([0,13]):
probs.fill(0)
probs[18] = 1
elif key == str([0,14]):
probs.fill(0)
probs[19] = 1
elif key == str([1,10]):
probs.fill(0)
probs[20] = 1
elif key == str([1,11]):
probs.fill(0)
probs[21] = 1
elif key == str([1,12]):
probs.fill(0)
probs[22] = 1
elif key == str([1,13]):
probs.fill(0)
probs[23] = 1
elif key == str([1,14]):
probs.fill(0)
probs[24] = 1
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3) |
def read_bytes(self, length) -> bytes:
"""
Read the specified number of bytes from the stream.
Args:
length (int): number of bytes to read.
Returns:
bytes: `length` number of bytes.
"""
value = self.stream.read(length)
return value | Read the specified number of bytes from the stream.
Args:
length (int): number of bytes to read.
Returns:
bytes: `length` number of bytes. | Below is the the instruction that describes the task:
### Input:
Read the specified number of bytes from the stream.
Args:
length (int): number of bytes to read.
Returns:
bytes: `length` number of bytes.
### Response:
def read_bytes(self, length) -> bytes:
"""
Read the specified number of bytes from the stream.
Args:
length (int): number of bytes to read.
Returns:
bytes: `length` number of bytes.
"""
value = self.stream.read(length)
return value |
def best(self):
"""
Returns the element with the highest probability.
"""
b = (-1e999999, None)
for k, c in iteritems(self.counts):
b = max(b, (c, k))
return b[1] | Returns the element with the highest probability. | Below is the the instruction that describes the task:
### Input:
Returns the element with the highest probability.
### Response:
def best(self):
"""
Returns the element with the highest probability.
"""
b = (-1e999999, None)
for k, c in iteritems(self.counts):
b = max(b, (c, k))
return b[1] |
def as_text(self, is_proof=True, is_pretty=False):
"""Return the DDO as a JSON text.
:param if is_proof: if False then do not include the 'proof' element.
:param is_pretty: If True return dictionary in a prettier way, bool
:return: str
"""
data = self.as_dictionary(is_proof)
if is_pretty:
return json.dumps(data, indent=2, separators=(',', ': '))
return json.dumps(data) | Return the DDO as a JSON text.
:param if is_proof: if False then do not include the 'proof' element.
:param is_pretty: If True return dictionary in a prettier way, bool
:return: str | Below is the the instruction that describes the task:
### Input:
Return the DDO as a JSON text.
:param if is_proof: if False then do not include the 'proof' element.
:param is_pretty: If True return dictionary in a prettier way, bool
:return: str
### Response:
def as_text(self, is_proof=True, is_pretty=False):
"""Return the DDO as a JSON text.
:param if is_proof: if False then do not include the 'proof' element.
:param is_pretty: If True return dictionary in a prettier way, bool
:return: str
"""
data = self.as_dictionary(is_proof)
if is_pretty:
return json.dumps(data, indent=2, separators=(',', ': '))
return json.dumps(data) |
def add(self, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
"""
Adds a process to the simulation. The process is embodied by a function, which will be called with the given
positional and keyword parameters when the simulation runs. As a process, this function runs on a special green
thread, and thus will be able to call functions `now()`, `advance()`, `pause()` and `stop()` to articulate its
events across the simulated timeline and control the simulation's flow.
"""
return self.add_in(0.0, fn_process, *args, **kwargs) | Adds a process to the simulation. The process is embodied by a function, which will be called with the given
positional and keyword parameters when the simulation runs. As a process, this function runs on a special green
thread, and thus will be able to call functions `now()`, `advance()`, `pause()` and `stop()` to articulate its
events across the simulated timeline and control the simulation's flow. | Below is the the instruction that describes the task:
### Input:
Adds a process to the simulation. The process is embodied by a function, which will be called with the given
positional and keyword parameters when the simulation runs. As a process, this function runs on a special green
thread, and thus will be able to call functions `now()`, `advance()`, `pause()` and `stop()` to articulate its
events across the simulated timeline and control the simulation's flow.
### Response:
def add(self, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
"""
Adds a process to the simulation. The process is embodied by a function, which will be called with the given
positional and keyword parameters when the simulation runs. As a process, this function runs on a special green
thread, and thus will be able to call functions `now()`, `advance()`, `pause()` and `stop()` to articulate its
events across the simulated timeline and control the simulation's flow.
"""
return self.add_in(0.0, fn_process, *args, **kwargs) |
def print_long(filename, stat, print_func):
"""Prints detailed information about the file passed in."""
size = stat_size(stat)
mtime = stat_mtime(stat)
file_mtime = time.localtime(mtime)
curr_time = time.time()
if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS):
print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[0],
decorated_filename(filename, stat)))
else:
print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[3], file_mtime[4],
decorated_filename(filename, stat))) | Prints detailed information about the file passed in. | Below is the the instruction that describes the task:
### Input:
Prints detailed information about the file passed in.
### Response:
def print_long(filename, stat, print_func):
"""Prints detailed information about the file passed in."""
size = stat_size(stat)
mtime = stat_mtime(stat)
file_mtime = time.localtime(mtime)
curr_time = time.time()
if mtime > (curr_time + SIX_MONTHS) or mtime < (curr_time - SIX_MONTHS):
print_func('%6d %s %2d %04d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[0],
decorated_filename(filename, stat)))
else:
print_func('%6d %s %2d %02d:%02d %s' % (size, MONTH[file_mtime[1]],
file_mtime[2], file_mtime[3], file_mtime[4],
decorated_filename(filename, stat))) |
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles | *Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files | Below is the the instruction that describes the task:
### Input:
*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
### Response:
def table(
self,
dirPath=None):
"""*Render the results as an ascii table*
**Key Arguments:**
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `tableSources` -- the top-level transient data
- `tablePhot` -- all photometry associated with the transients
- `tableSpec` -- all spectral data associated with the transients
- `tableFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in ascii table format:
.. code-block:: python
tableSources, tablePhot, tableSpec, tableFiles = tns.table()
print tableSources
.. code-block:: text
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| TNSId | TNSName | discoveryName | discSurvey | raSex | decSex | raDeg | decDeg | transRedshift | specType | discMag | discMagFilter | discDate | objectUrl | hostName | hostRedshift | separationArcsec | separationNorthArcsec | separationEastArcsec |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
| 2016asf | SN2016asf | ASASSN-16cs | ASAS-SN | 06:50:36.73 | +31:06:45.36 | 102.6530 | 31.1126 | 0.021 | SN Ia | 17.1 | V-Johnson | 2016-03-06 08:09:36 | http://wis-tns.weizmann.ac.il/object/2016asf | KUG 0647+311 | | 0.66 | 0.65 | -0.13 |
+----------+------------+----------------+-------------+--------------+---------------+-----------+----------+----------------+-----------+----------+----------------+----------------------+-----------------------------------------------+---------------+---------------+-------------------+------------------------+-----------------------+
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.table("~/tns")
.. image:: https://i.imgur.com/m09M0ho.png
:width: 800px
:alt: ascii files
"""
if dirPath:
p = self._file_prefix()
tableSources = self.sourceResults.table(
filepath=dirPath + "/" + p + "sources.ascii")
tablePhot = self.photResults.table(
filepath=dirPath + "/" + p + "phot.ascii")
tableSpec = self.specResults.table(
filepath=dirPath + "/" + p + "spec.ascii")
tableFiles = self.relatedFilesResults.table(
filepath=dirPath + "/" + p + "relatedFiles.ascii")
else:
tableSources = self.sourceResults.table()
tablePhot = self.photResults.table()
tableSpec = self.specResults.table()
tableFiles = self.relatedFilesResults.table()
return tableSources, tablePhot, tableSpec, tableFiles |
def get_LR(self,in_data):
"""
Splits incoming packed stereo data into separate left and right channels
and returns an array of left samples and an array of right samples
Parameters
----------
in_data : input data from the streaming object in the callback function.
Returns
-------
left_in : array of incoming left channel samples
right_in : array of incoming right channel samples
"""
for i in range(0,self.frame_length*2):
if i % 2:
self.right_in[(int)(i/2)] = in_data[i]
else:
self.left_in[(int)(i/2)] = in_data[i]
return self.left_in, self.right_in | Splits incoming packed stereo data into separate left and right channels
and returns an array of left samples and an array of right samples
Parameters
----------
in_data : input data from the streaming object in the callback function.
Returns
-------
left_in : array of incoming left channel samples
right_in : array of incoming right channel samples | Below is the the instruction that describes the task:
### Input:
Splits incoming packed stereo data into separate left and right channels
and returns an array of left samples and an array of right samples
Parameters
----------
in_data : input data from the streaming object in the callback function.
Returns
-------
left_in : array of incoming left channel samples
right_in : array of incoming right channel samples
### Response:
def get_LR(self,in_data):
"""
Splits incoming packed stereo data into separate left and right channels
and returns an array of left samples and an array of right samples
Parameters
----------
in_data : input data from the streaming object in the callback function.
Returns
-------
left_in : array of incoming left channel samples
right_in : array of incoming right channel samples
"""
for i in range(0,self.frame_length*2):
if i % 2:
self.right_in[(int)(i/2)] = in_data[i]
else:
self.left_in[(int)(i/2)] = in_data[i]
return self.left_in, self.right_in |
def configure_logging(debug=False):
"""Configure logging
The function configures log messages. By default, log messages
are sent to stderr. Set the parameter `debug` to activate the
debug mode.
:param debug: set the debug mode
"""
if not debug:
logging.basicConfig(level=logging.INFO,
format=LOG_FORMAT)
else:
logging.basicConfig(level=logging.DEBUG,
format=DEBUG_LOG_FORMAT) | Configure logging
The function configures log messages. By default, log messages
are sent to stderr. Set the parameter `debug` to activate the
debug mode.
:param debug: set the debug mode | Below is the the instruction that describes the task:
### Input:
Configure logging
The function configures log messages. By default, log messages
are sent to stderr. Set the parameter `debug` to activate the
debug mode.
:param debug: set the debug mode
### Response:
def configure_logging(debug=False):
"""Configure logging
The function configures log messages. By default, log messages
are sent to stderr. Set the parameter `debug` to activate the
debug mode.
:param debug: set the debug mode
"""
if not debug:
logging.basicConfig(level=logging.INFO,
format=LOG_FORMAT)
else:
logging.basicConfig(level=logging.DEBUG,
format=DEBUG_LOG_FORMAT) |
def text(self, x, y, txt=''):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=''):
s+=' '+self._dounderline(x,y,txt)
if(self.color_flag):
s='q '+self.text_color+' '+s+' Q'
self._out(s) | Output a string | Below is the the instruction that describes the task:
### Input:
Output a string
### Response:
def text(self, x, y, txt=''):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=''):
s+=' '+self._dounderline(x,y,txt)
if(self.color_flag):
s='q '+self.text_color+' '+s+' Q'
self._out(s) |
def parse_host(cls, host):
'''Parse the host and return hostname and port.'''
if host.endswith(']'):
return cls.parse_hostname(host), None
else:
hostname, sep, port = host.rpartition(':')
if sep:
port = int(port)
if port < 0 or port > 65535:
raise ValueError('Port number invalid')
else:
hostname = port
port = None
return cls.parse_hostname(hostname), port | Parse the host and return hostname and port. | Below is the the instruction that describes the task:
### Input:
Parse the host and return hostname and port.
### Response:
def parse_host(cls, host):
'''Parse the host and return hostname and port.'''
if host.endswith(']'):
return cls.parse_hostname(host), None
else:
hostname, sep, port = host.rpartition(':')
if sep:
port = int(port)
if port < 0 or port > 65535:
raise ValueError('Port number invalid')
else:
hostname = port
port = None
return cls.parse_hostname(hostname), port |
def is_installed_extension(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
runas=None):
'''
Test if a specific extension is installed
CLI Example:
.. code-block:: bash
salt '*' postgres.is_installed_extension
'''
installed_ext = get_installed_extension(
name,
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
return bool(installed_ext) | Test if a specific extension is installed
CLI Example:
.. code-block:: bash
salt '*' postgres.is_installed_extension | Below is the the instruction that describes the task:
### Input:
Test if a specific extension is installed
CLI Example:
.. code-block:: bash
salt '*' postgres.is_installed_extension
### Response:
def is_installed_extension(name,
user=None,
host=None,
port=None,
maintenance_db=None,
password=None,
runas=None):
'''
Test if a specific extension is installed
CLI Example:
.. code-block:: bash
salt '*' postgres.is_installed_extension
'''
installed_ext = get_installed_extension(
name,
user=user,
host=host,
port=port,
maintenance_db=maintenance_db,
password=password,
runas=runas)
return bool(installed_ext) |
def convert_timeval(seconds_since_epoch):
"""Convert time into C style timeval."""
frac, whole = math.modf(seconds_since_epoch)
microseconds = math.floor(frac * 1000000)
seconds = math.floor(whole)
return seconds, microseconds | Convert time into C style timeval. | Below is the the instruction that describes the task:
### Input:
Convert time into C style timeval.
### Response:
def convert_timeval(seconds_since_epoch):
"""Convert time into C style timeval."""
frac, whole = math.modf(seconds_since_epoch)
microseconds = math.floor(frac * 1000000)
seconds = math.floor(whole)
return seconds, microseconds |
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return | CLI main entry point. | Below is the the instruction that describes the task:
### Input:
CLI main entry point.
### Response:
def run():
"""CLI main entry point."""
# Use print() instead of logging when running in CLI mode:
set_pyftpsync_logger(None)
parser = argparse.ArgumentParser(
description="Synchronize folders over FTP.",
epilog="See also https://github.com/mar10/pyftpsync",
parents=[verbose_parser],
)
# Note: we want to allow --version to be combined with --verbose. However
# on Py2, argparse makes sub-commands mandatory, unless `action="version"` is used.
if check_cli_verbose(3) > 3:
version_info = "pyftpsync/{} Python/{} {}".format(
__version__, PYTHON_VERSION, platform.platform()
)
else:
version_info = "{}".format(__version__)
parser.add_argument("-V", "--version", action="version", version=version_info)
subparsers = parser.add_subparsers(help="sub-command help")
# --- Create the parser for the "upload" command ---------------------------
sp = subparsers.add_parser(
"upload",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files to remote folder",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite remote files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["local", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove remote files if they don't exist locally",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove remote files if they don't exist locally "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="upload")
# --- Create the parser for the "download" command -------------------------
sp = subparsers.add_parser(
"download",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="copy new and modified files from remote folder to local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--force",
action="store_true",
help="overwrite local files, even if the target is newer "
"(but no conflict was detected)",
)
sp.add_argument(
"--resolve",
default="ask",
choices=["remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.add_argument(
"--delete",
action="store_true",
help="remove local files if they don't exist on remote target",
)
sp.add_argument(
"--delete-unmatched",
action="store_true",
help="remove local files if they don't exist on remote target "
"or don't match the current filter (implies '--delete' option)",
)
sp.set_defaults(command="download")
# --- Create the parser for the "sync" command -----------------------------
sp = subparsers.add_parser(
"sync",
parents=[verbose_parser, common_parser, matcher_parser, creds_parser],
help="synchronize new and modified files between remote folder and local target",
)
sp.add_argument(
"local",
metavar="LOCAL",
default=".",
help="path to local folder (default: %(default)s)",
)
sp.add_argument("remote", metavar="REMOTE", help="path to remote folder")
sp.add_argument(
"--resolve",
default="ask",
choices=["old", "new", "local", "remote", "skip", "ask"],
help="conflict resolving strategy (default: '%(default)s')",
)
sp.set_defaults(command="sync")
# --- Create the parser for the "run" command -----------------------------
add_run_parser(subparsers)
# --- Create the parser for the "scan" command -----------------------------
add_scan_parser(subparsers)
# --- Parse command line ---------------------------------------------------
args = parser.parse_args()
args.verbose -= args.quiet
del args.quiet
# print("verbose", args.verbose)
ftp_debug = 0
if args.verbose >= 6:
ftp_debug = 1
# Modify the `args` from the `pyftpsync.yaml` config:
if getattr(args, "command", None) == "run":
handle_run_command(parser, args)
if callable(getattr(args, "command", None)):
# scan_handler
try:
return args.command(parser, args)
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
elif not hasattr(args, "command"):
parser.error(
"missing command (choose from 'upload', 'download', 'run', 'sync', 'scan')"
)
# Post-process and check arguments
if hasattr(args, "delete_unmatched") and args.delete_unmatched:
args.delete = True
args.local_target = make_target(args.local, {"ftp_debug": ftp_debug})
if args.remote == ".":
parser.error("'.' is expected to be the local target (not remote)")
args.remote_target = make_target(args.remote, {"ftp_debug": ftp_debug})
if not isinstance(args.local_target, FsTarget) and isinstance(
args.remote_target, FsTarget
):
parser.error("a file system target is expected to be local")
# Let the command handler do its thing
opts = namespace_to_dict(args)
if args.command == "upload":
s = UploadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "download":
s = DownloadSynchronizer(args.local_target, args.remote_target, opts)
elif args.command == "sync":
s = BiDirSynchronizer(args.local_target, args.remote_target, opts)
else:
parser.error("unknown command '{}'".format(args.command))
s.is_script = True
try:
s.run()
except KeyboardInterrupt:
print("\nAborted by user.", file=sys.stderr)
sys.exit(3)
finally:
# Prevent sporadic exceptions in ftplib, when closing in __del__
s.local.close()
s.remote.close()
stats = s.get_stats()
if args.verbose >= 5:
pprint(stats)
elif args.verbose >= 1:
if args.dry_run:
print("(DRY-RUN) ", end="")
print(
"Wrote {}/{} files in {} directories, skipped: {}.".format(
stats["files_written"],
stats["local_files"],
stats["local_dirs"],
stats["conflict_files_skipped"],
),
end="",
)
if stats["interactive_ask"]:
print()
else:
print(" Elap: {}.".format(stats["elap_str"]))
return |
def _format_type(cls):
"""Format a type name for printing."""
if cls.__module__ == _BUILTIN_MODULE:
return cls.__name__
else:
return '%s.%s' % (cls.__module__, cls.__name__) | Format a type name for printing. | Below is the the instruction that describes the task:
### Input:
Format a type name for printing.
### Response:
def _format_type(cls):
"""Format a type name for printing."""
if cls.__module__ == _BUILTIN_MODULE:
return cls.__name__
else:
return '%s.%s' % (cls.__module__, cls.__name__) |
def datetime_from_ordinal_float(days):
"""Inverse of `ordinal_float()`, converts a float number of days back to a `datetime` object
>>> dt = datetime.datetime(1970, 1, 1)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
>>> dt = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
"""
if isinstance(days, (float, int)):
if np.isnan(days) or days in set((float('nan'), float('inf'), float('-inf'))):
return days
dt = datetime.datetime.fromordinal(int(days))
seconds = (days - int(days)) * 3600. * 24.
microseconds = (seconds - int(seconds)) * 1000000
return dt + datetime.timedelta(days=0, seconds=int(seconds), microseconds=int(round(microseconds)))
return [datetime_from_ordinal_float(d) for d in days] | Inverse of `ordinal_float()`, converts a float number of days back to a `datetime` object
>>> dt = datetime.datetime(1970, 1, 1)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
>>> dt = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True | Below is the the instruction that describes the task:
### Input:
Inverse of `ordinal_float()`, converts a float number of days back to a `datetime` object
>>> dt = datetime.datetime(1970, 1, 1)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
>>> dt = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
### Response:
def datetime_from_ordinal_float(days):
"""Inverse of `ordinal_float()`, converts a float number of days back to a `datetime` object
>>> dt = datetime.datetime(1970, 1, 1)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
>>> dt = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
>>> datetime_from_ordinal_float(ordinal_float(dt)) == dt
True
"""
if isinstance(days, (float, int)):
if np.isnan(days) or days in set((float('nan'), float('inf'), float('-inf'))):
return days
dt = datetime.datetime.fromordinal(int(days))
seconds = (days - int(days)) * 3600. * 24.
microseconds = (seconds - int(seconds)) * 1000000
return dt + datetime.timedelta(days=0, seconds=int(seconds), microseconds=int(round(microseconds)))
return [datetime_from_ordinal_float(d) for d in days] |
def version(self, flag=None, cmd=None, path=None):
"""
Generates and logs a hash to distinguish this particular installation
of the program (on a certain host, with a certain compiler, program
version, etc.)
Specify the optional 'binary' argument if the wrapper name is not
actually the program, e.g. if your program has a Perl wrapper script.
Set 'binary' to the binary program that is likely to change between
versions.
Specify the optional 'cmd' argument if the command to run for version
information is different than what will be invoked by `run` (e.g.
if the program has a perl wrapper script, but you want to version an
underlying binary executable).
"""
# Setup the command to run.
if not cmd:
cmd = list(self.cmd)
if flag:
cmd.append(flag)
# Run the command.
try:
vstring = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
vstring = e.output
except OSError as e:
utils.failed_executable(cmd[0], e)
if not path:
path = cmd[0]
# Generate a hash.
vhash = diagnostics.log_program_version(self.name, vstring, path)
if vhash:
diagnostics.prefix.append(self.name)
diagnostics.log('version', vhash)
diagnostics.prefix.pop() | Generates and logs a hash to distinguish this particular installation
of the program (on a certain host, with a certain compiler, program
version, etc.)
Specify the optional 'binary' argument if the wrapper name is not
actually the program, e.g. if your program has a Perl wrapper script.
Set 'binary' to the binary program that is likely to change between
versions.
Specify the optional 'cmd' argument if the command to run for version
information is different than what will be invoked by `run` (e.g.
if the program has a perl wrapper script, but you want to version an
underlying binary executable). | Below is the the instruction that describes the task:
### Input:
Generates and logs a hash to distinguish this particular installation
of the program (on a certain host, with a certain compiler, program
version, etc.)
Specify the optional 'binary' argument if the wrapper name is not
actually the program, e.g. if your program has a Perl wrapper script.
Set 'binary' to the binary program that is likely to change between
versions.
Specify the optional 'cmd' argument if the command to run for version
information is different than what will be invoked by `run` (e.g.
if the program has a perl wrapper script, but you want to version an
underlying binary executable).
### Response:
def version(self, flag=None, cmd=None, path=None):
"""
Generates and logs a hash to distinguish this particular installation
of the program (on a certain host, with a certain compiler, program
version, etc.)
Specify the optional 'binary' argument if the wrapper name is not
actually the program, e.g. if your program has a Perl wrapper script.
Set 'binary' to the binary program that is likely to change between
versions.
Specify the optional 'cmd' argument if the command to run for version
information is different than what will be invoked by `run` (e.g.
if the program has a perl wrapper script, but you want to version an
underlying binary executable).
"""
# Setup the command to run.
if not cmd:
cmd = list(self.cmd)
if flag:
cmd.append(flag)
# Run the command.
try:
vstring = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
vstring = e.output
except OSError as e:
utils.failed_executable(cmd[0], e)
if not path:
path = cmd[0]
# Generate a hash.
vhash = diagnostics.log_program_version(self.name, vstring, path)
if vhash:
diagnostics.prefix.append(self.name)
diagnostics.log('version', vhash)
diagnostics.prefix.pop() |
def K2OSiO2(self, Left=35, Right=79, X0=30, X1=90, X_Gap=7, Base=0,
Top=19, Y0=1, Y1=19, Y_Gap=19, FontSize=12, xlabel=r'$SiO_2 wt\%$', ylabel=r'$K_2O wt\%$', width=12,
height=12, dpi=300):
self.setWindowTitle('K2OSiO2 diagram ')
self.axes.clear()
#self.axes.axis('off')
self.axes.set_xlabel(self.xlabel)
self.axes.set_ylabel(self.ylabel)
self.axes.spines['right'].set_color('none')
self.axes.spines['top'].set_color('none')
'''
self.axes.set_xticks([30,40,50,60,70,80,90])
self.axes.set_xticklabels([30,40,50,60,70,80,90])
self.axes.set_yticks([0, 5, 10, 15, 20])
self.axes.set_yticklabels([0, 5, 10, 15, 20])
self.axes.set_ylim(bottom=0)
'''
all_labels=[]
all_colors=[]
all_markers=[]
all_alpha=[]
for i in range(len(self._df)):
target = self._df.at[i, 'Label']
color = self._df.at[i, 'Color']
marker = self._df.at[i, 'Marker']
alpha = self._df.at[i, 'Alpha']
if target not in self.SVM_labels:
self.SVM_labels.append(target)
if target not in all_labels:
all_labels.append(target)
all_colors.append(color)
all_markers.append(marker)
all_alpha.append(alpha)
self.whole_labels = all_labels
PointLabels = []
PointColors = []
x = []
y = []
title = 'K2O-SiO2diagram'
self.setWindowTitle(title)
self.textbox.setText(self.reference)
k_1=(2.9-1.2)/(68-48)
y_1= 1.2+ (85-48)*k_1
y_0= 1.2+ (45-48)*k_1
self.DrawLine([(45, y_0),(48, 1.2), (68,2.9),(85,y_1)])
k_2=(1.2-0.3)/(68-48)
y_2= 0.3+ (85-48)*k_2
y_3= 0.3+ (45-48)*k_2
self.DrawLine([(45, y_3),(48, 0.3), (68, 1.2),(85,y_2)])
Labels=['High K','Medium K','Low K']
Locations=[(80,5),(80,3),(80,1)]
X_offset, Y_offset=0,0
for k in range(len(Labels)):
self.axes.annotate(Labels[k], Locations[k], xycoords='data', xytext=(X_offset, Y_offset),
textcoords='offset points',
fontsize=9, color='grey', alpha=0.8)
self.Check()
if self.OutPutCheck==True:
pass
if (self._changed):
df = self.CleanDataFile(self._df)
for i in range(len(df)):
TmpLabel = ''
if (df.at[i, 'Label'] in PointLabels or df.at[i, 'Label'] == ''):
TmpLabel = ''
else:
PointLabels.append(df.at[i, 'Label'])
TmpLabel = df.at[i, 'Label']
TmpColor = ''
if (df.at[i, 'Color'] in PointColors or df.at[i, 'Color'] == ''):
TmpColor = ''
else:
PointColors.append(df.at[i, 'Color'])
TmpColor = df.at[i, 'Color']
x.append(df.at[i, 'SiO2'])
y.append(df.at[i, 'K2O'])
Size = df.at[i, 'Size']
Color = df.at[i, 'Color']
# print(Color, df.at[i, 'SiO2'], (df.at[i, 'Na2O'] + df.at[i, 'K2O']))
Alpha = df.at[i, 'Alpha']
Marker = df.at[i, 'Marker']
Label = df.at[i, 'Label']
xtest=df.at[i, 'SiO2']
ytest=df.at[i, 'K2O']
for j in self.ItemNames:
if self.SelectDic[j].contains_point([xtest,ytest]):
self.LabelList.append(Label)
self.TypeList.append(j)
break
pass
self.axes.scatter(df.at[i, 'SiO2'], df.at[i, 'K2O'], marker=df.at[i, 'Marker'],
s=df.at[i, 'Size'], color=df.at[i, 'Color'], alpha=df.at[i, 'Alpha'], label=TmpLabel)
XtoFit = {}
YtoFit = {}
SVM_X=[]
SVM_Y=[]
for i in PointLabels:
XtoFit[i]=[]
YtoFit[i]=[]
for i in range(len(df)):
Alpha = df.at[i, 'Alpha']
Marker = df.at[i, 'Marker']
Label = df.at[i, 'Label']
xtest=df.at[i, 'SiO2']
ytest=df.at[i, 'K2O']
XtoFit[Label].append(xtest)
YtoFit[Label].append(ytest)
SVM_X.append(xtest)
SVM_Y.append(ytest)
if (self.shape_cb.isChecked()):
for i in PointLabels:
if XtoFit[i] != YtoFit[i]:
xmin, xmax = min(XtoFit[i]), max(XtoFit[i])
ymin, ymax = min(YtoFit[i]), max(YtoFit[i])
DensityColorMap = 'Greys'
DensityAlpha = 0.1
DensityLineColor = PointColors[PointLabels.index(i)]
DensityLineAlpha = 0.3
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:200j, ymin:ymax:200j]
# print(self.ShapeGroups)
# command='''xx, yy = np.mgrid[xmin:xmax:'''+str(self.ShapeGroups)+ '''j, ymin:ymax:''' +str(self.ShapeGroups)+'''j]'''
# exec(command)
# print(xx, yy)
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([XtoFit[i], YtoFit[i]])
kernelstatus = True
try:
st.gaussian_kde(values)
except Exception as e:
self.ErrorEvent(text=repr(e))
kernelstatus = False
if kernelstatus == True:
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
# Contourf plot
cfset = self.axes.contourf(xx, yy, f, cmap=DensityColorMap, alpha=DensityAlpha)
## Or kernel density estimate plot instead of the contourf plot
# self.axes.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = self.axes.contour(xx, yy, f, colors=DensityLineColor, alpha=DensityLineAlpha)
# Label plot
#self.axes.clabel(cset, inline=1, fontsize=10)
if (len(self.data_to_test) > 0):
contained = True
missing = 'Miss setting infor:'
for i in ['Label', 'Color', 'Marker', 'Alpha']:
if i not in self.data_to_test.columns.values.tolist():
contained = False
missing = missing + '\n' + i
if contained == True:
for i in self.data_to_test.columns.values.tolist():
if i not in self._df.columns.values.tolist():
self.data_to_test = self.data_to_test.drop(columns=i)
# print(self.data_to_test)
test_labels = []
test_colors = []
test_markers = []
test_alpha = []
for i in range(len(self.data_to_test)):
# print(self.data_to_test.at[i, 'Label'])
target = self.data_to_test.at[i, 'Label']
color = self.data_to_test.at[i, 'Color']
marker = self.data_to_test.at[i, 'Marker']
alpha = self.data_to_test.at[i, 'Alpha']
if target not in test_labels and target not in all_labels:
test_labels.append(target)
test_colors.append(color)
test_markers.append(marker)
test_alpha.append(alpha)
self.whole_labels = self.whole_labels + test_labels
self.load_settings_backup = self.data_to_test
Load_ItemsToTest = ['Label', 'Number', 'Tag', 'Name', 'Author', 'DataType', 'Marker', 'Color',
'Size',
'Alpha',
'Style', 'Width']
for i in self.data_to_test.columns.values.tolist():
if i not in Load_ItemsToTest:
self.load_settings_backup = self.load_settings_backup.drop(i, 1)
print(self.load_settings_backup, self.data_to_test)
print(self.load_settings_backup.shape, self.data_to_test.shape)
try:
for i in range(len(self.data_to_test)):
target = self.data_to_test.at[i, 'Label']
if target not in all_labels:
all_labels.append(target)
tmp_label = self.data_to_test.at[i, 'Label']
else:
tmp_label=''
x_load_test = self.data_to_test.at[i, 'SiO2']
y_load_test = self.data_to_test.at[i, 'K2O']
for j in self.ItemNames:
if self.SelectDic[j].contains_point([x_load_test, y_load_test]):
self.LabelList.append(self.data_to_test.at[i, 'Label'])
self.TypeList.append(j)
break
pass
if (self.show_load_data_cb.isChecked()):
self.axes.scatter(self.data_to_test.at[i, 'SiO2'],self.data_to_test.at[i, 'K2O'],
marker=self.data_to_test.at[i, 'Marker'],
s=self.data_to_test.at[i, 'Size'],
color=self.data_to_test.at[i, 'Color'],
alpha=self.data_to_test.at[i, 'Alpha'],
label=tmp_label)
except Exception as e:
self.ErrorEvent(text=repr(e))
if (self.legend_cb.isChecked()):
self.axes.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0, prop=fontprop)
self.All_X=SVM_X
self.All_Y=SVM_Y
if (self.hyperplane_cb.isChecked()):
clf = svm.SVC(C=1.0, kernel='linear',probability= True)
svm_x= SVM_X
svm_y= SVM_Y
print(len(svm_x),len(svm_y),len(df.index))
xx, yy = np.meshgrid(np.arange(min(svm_x), max(svm_x), np.ptp(svm_x) / 500),
np.arange(min(svm_y), max(svm_y), np.ptp(svm_y) / 500))
le = LabelEncoder()
le.fit(self._df.Label)
class_label = le.transform(self._df.Label)
svm_train= pd.concat([pd.DataFrame(svm_x),pd.DataFrame(svm_y)], axis=1)
svm_train=svm_train.values
clf.fit(svm_train,class_label)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
self.axes.contourf(xx, yy, Z, cmap='hot', alpha=0.2)
if (self.show_data_index_cb.isChecked()):
if 'Index' in self._df.columns.values:
for i in range(len(self._df)):
self.axes.annotate(self._df.at[i, 'Index'],
xy=(self.All_X[i],
self.All_Y[i]),
color=self._df.at[i, 'Color'],
alpha=self._df.at[i, 'Alpha'])
else:
for i in range(len(self._df)):
self.axes.annotate('No' + str(i + 1),
xy=(self.All_X[i],
self.All_Y[i]),
color=self._df.at[i, 'Color'],
alpha=self._df.at[i, 'Alpha'])
self.canvas.draw()
self.OutPutTitle='K2OSiO2'
self.OutPutData = pd.DataFrame(
{'Label': self.LabelList,
'RockType': self.TypeList
})
self.OutPutFig=self.fig | self.axes.set_xticks([30,40,50,60,70,80,90])
self.axes.set_xticklabels([30,40,50,60,70,80,90])
self.axes.set_yticks([0, 5, 10, 15, 20])
self.axes.set_yticklabels([0, 5, 10, 15, 20])
self.axes.set_ylim(bottom=0) | Below is the the instruction that describes the task:
### Input:
self.axes.set_xticks([30,40,50,60,70,80,90])
self.axes.set_xticklabels([30,40,50,60,70,80,90])
self.axes.set_yticks([0, 5, 10, 15, 20])
self.axes.set_yticklabels([0, 5, 10, 15, 20])
self.axes.set_ylim(bottom=0)
### Response:
def K2OSiO2(self, Left=35, Right=79, X0=30, X1=90, X_Gap=7, Base=0,
Top=19, Y0=1, Y1=19, Y_Gap=19, FontSize=12, xlabel=r'$SiO_2 wt\%$', ylabel=r'$K_2O wt\%$', width=12,
height=12, dpi=300):
self.setWindowTitle('K2OSiO2 diagram ')
self.axes.clear()
#self.axes.axis('off')
self.axes.set_xlabel(self.xlabel)
self.axes.set_ylabel(self.ylabel)
self.axes.spines['right'].set_color('none')
self.axes.spines['top'].set_color('none')
'''
self.axes.set_xticks([30,40,50,60,70,80,90])
self.axes.set_xticklabels([30,40,50,60,70,80,90])
self.axes.set_yticks([0, 5, 10, 15, 20])
self.axes.set_yticklabels([0, 5, 10, 15, 20])
self.axes.set_ylim(bottom=0)
'''
all_labels=[]
all_colors=[]
all_markers=[]
all_alpha=[]
for i in range(len(self._df)):
target = self._df.at[i, 'Label']
color = self._df.at[i, 'Color']
marker = self._df.at[i, 'Marker']
alpha = self._df.at[i, 'Alpha']
if target not in self.SVM_labels:
self.SVM_labels.append(target)
if target not in all_labels:
all_labels.append(target)
all_colors.append(color)
all_markers.append(marker)
all_alpha.append(alpha)
self.whole_labels = all_labels
PointLabels = []
PointColors = []
x = []
y = []
title = 'K2O-SiO2diagram'
self.setWindowTitle(title)
self.textbox.setText(self.reference)
k_1=(2.9-1.2)/(68-48)
y_1= 1.2+ (85-48)*k_1
y_0= 1.2+ (45-48)*k_1
self.DrawLine([(45, y_0),(48, 1.2), (68,2.9),(85,y_1)])
k_2=(1.2-0.3)/(68-48)
y_2= 0.3+ (85-48)*k_2
y_3= 0.3+ (45-48)*k_2
self.DrawLine([(45, y_3),(48, 0.3), (68, 1.2),(85,y_2)])
Labels=['High K','Medium K','Low K']
Locations=[(80,5),(80,3),(80,1)]
X_offset, Y_offset=0,0
for k in range(len(Labels)):
self.axes.annotate(Labels[k], Locations[k], xycoords='data', xytext=(X_offset, Y_offset),
textcoords='offset points',
fontsize=9, color='grey', alpha=0.8)
self.Check()
if self.OutPutCheck==True:
pass
if (self._changed):
df = self.CleanDataFile(self._df)
for i in range(len(df)):
TmpLabel = ''
if (df.at[i, 'Label'] in PointLabels or df.at[i, 'Label'] == ''):
TmpLabel = ''
else:
PointLabels.append(df.at[i, 'Label'])
TmpLabel = df.at[i, 'Label']
TmpColor = ''
if (df.at[i, 'Color'] in PointColors or df.at[i, 'Color'] == ''):
TmpColor = ''
else:
PointColors.append(df.at[i, 'Color'])
TmpColor = df.at[i, 'Color']
x.append(df.at[i, 'SiO2'])
y.append(df.at[i, 'K2O'])
Size = df.at[i, 'Size']
Color = df.at[i, 'Color']
# print(Color, df.at[i, 'SiO2'], (df.at[i, 'Na2O'] + df.at[i, 'K2O']))
Alpha = df.at[i, 'Alpha']
Marker = df.at[i, 'Marker']
Label = df.at[i, 'Label']
xtest=df.at[i, 'SiO2']
ytest=df.at[i, 'K2O']
for j in self.ItemNames:
if self.SelectDic[j].contains_point([xtest,ytest]):
self.LabelList.append(Label)
self.TypeList.append(j)
break
pass
self.axes.scatter(df.at[i, 'SiO2'], df.at[i, 'K2O'], marker=df.at[i, 'Marker'],
s=df.at[i, 'Size'], color=df.at[i, 'Color'], alpha=df.at[i, 'Alpha'], label=TmpLabel)
XtoFit = {}
YtoFit = {}
SVM_X=[]
SVM_Y=[]
for i in PointLabels:
XtoFit[i]=[]
YtoFit[i]=[]
for i in range(len(df)):
Alpha = df.at[i, 'Alpha']
Marker = df.at[i, 'Marker']
Label = df.at[i, 'Label']
xtest=df.at[i, 'SiO2']
ytest=df.at[i, 'K2O']
XtoFit[Label].append(xtest)
YtoFit[Label].append(ytest)
SVM_X.append(xtest)
SVM_Y.append(ytest)
if (self.shape_cb.isChecked()):
for i in PointLabels:
if XtoFit[i] != YtoFit[i]:
xmin, xmax = min(XtoFit[i]), max(XtoFit[i])
ymin, ymax = min(YtoFit[i]), max(YtoFit[i])
DensityColorMap = 'Greys'
DensityAlpha = 0.1
DensityLineColor = PointColors[PointLabels.index(i)]
DensityLineAlpha = 0.3
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:200j, ymin:ymax:200j]
# print(self.ShapeGroups)
# command='''xx, yy = np.mgrid[xmin:xmax:'''+str(self.ShapeGroups)+ '''j, ymin:ymax:''' +str(self.ShapeGroups)+'''j]'''
# exec(command)
# print(xx, yy)
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([XtoFit[i], YtoFit[i]])
kernelstatus = True
try:
st.gaussian_kde(values)
except Exception as e:
self.ErrorEvent(text=repr(e))
kernelstatus = False
if kernelstatus == True:
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
# Contourf plot
cfset = self.axes.contourf(xx, yy, f, cmap=DensityColorMap, alpha=DensityAlpha)
## Or kernel density estimate plot instead of the contourf plot
# self.axes.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = self.axes.contour(xx, yy, f, colors=DensityLineColor, alpha=DensityLineAlpha)
# Label plot
#self.axes.clabel(cset, inline=1, fontsize=10)
if (len(self.data_to_test) > 0):
contained = True
missing = 'Miss setting infor:'
for i in ['Label', 'Color', 'Marker', 'Alpha']:
if i not in self.data_to_test.columns.values.tolist():
contained = False
missing = missing + '\n' + i
if contained == True:
for i in self.data_to_test.columns.values.tolist():
if i not in self._df.columns.values.tolist():
self.data_to_test = self.data_to_test.drop(columns=i)
# print(self.data_to_test)
test_labels = []
test_colors = []
test_markers = []
test_alpha = []
for i in range(len(self.data_to_test)):
# print(self.data_to_test.at[i, 'Label'])
target = self.data_to_test.at[i, 'Label']
color = self.data_to_test.at[i, 'Color']
marker = self.data_to_test.at[i, 'Marker']
alpha = self.data_to_test.at[i, 'Alpha']
if target not in test_labels and target not in all_labels:
test_labels.append(target)
test_colors.append(color)
test_markers.append(marker)
test_alpha.append(alpha)
self.whole_labels = self.whole_labels + test_labels
self.load_settings_backup = self.data_to_test
Load_ItemsToTest = ['Label', 'Number', 'Tag', 'Name', 'Author', 'DataType', 'Marker', 'Color',
'Size',
'Alpha',
'Style', 'Width']
for i in self.data_to_test.columns.values.tolist():
if i not in Load_ItemsToTest:
self.load_settings_backup = self.load_settings_backup.drop(i, 1)
print(self.load_settings_backup, self.data_to_test)
print(self.load_settings_backup.shape, self.data_to_test.shape)
try:
for i in range(len(self.data_to_test)):
target = self.data_to_test.at[i, 'Label']
if target not in all_labels:
all_labels.append(target)
tmp_label = self.data_to_test.at[i, 'Label']
else:
tmp_label=''
x_load_test = self.data_to_test.at[i, 'SiO2']
y_load_test = self.data_to_test.at[i, 'K2O']
for j in self.ItemNames:
if self.SelectDic[j].contains_point([x_load_test, y_load_test]):
self.LabelList.append(self.data_to_test.at[i, 'Label'])
self.TypeList.append(j)
break
pass
if (self.show_load_data_cb.isChecked()):
self.axes.scatter(self.data_to_test.at[i, 'SiO2'],self.data_to_test.at[i, 'K2O'],
marker=self.data_to_test.at[i, 'Marker'],
s=self.data_to_test.at[i, 'Size'],
color=self.data_to_test.at[i, 'Color'],
alpha=self.data_to_test.at[i, 'Alpha'],
label=tmp_label)
except Exception as e:
self.ErrorEvent(text=repr(e))
if (self.legend_cb.isChecked()):
self.axes.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0, prop=fontprop)
self.All_X=SVM_X
self.All_Y=SVM_Y
if (self.hyperplane_cb.isChecked()):
clf = svm.SVC(C=1.0, kernel='linear',probability= True)
svm_x= SVM_X
svm_y= SVM_Y
print(len(svm_x),len(svm_y),len(df.index))
xx, yy = np.meshgrid(np.arange(min(svm_x), max(svm_x), np.ptp(svm_x) / 500),
np.arange(min(svm_y), max(svm_y), np.ptp(svm_y) / 500))
le = LabelEncoder()
le.fit(self._df.Label)
class_label = le.transform(self._df.Label)
svm_train= pd.concat([pd.DataFrame(svm_x),pd.DataFrame(svm_y)], axis=1)
svm_train=svm_train.values
clf.fit(svm_train,class_label)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
self.axes.contourf(xx, yy, Z, cmap='hot', alpha=0.2)
if (self.show_data_index_cb.isChecked()):
if 'Index' in self._df.columns.values:
for i in range(len(self._df)):
self.axes.annotate(self._df.at[i, 'Index'],
xy=(self.All_X[i],
self.All_Y[i]),
color=self._df.at[i, 'Color'],
alpha=self._df.at[i, 'Alpha'])
else:
for i in range(len(self._df)):
self.axes.annotate('No' + str(i + 1),
xy=(self.All_X[i],
self.All_Y[i]),
color=self._df.at[i, 'Color'],
alpha=self._df.at[i, 'Alpha'])
self.canvas.draw()
self.OutPutTitle='K2OSiO2'
self.OutPutData = pd.DataFrame(
{'Label': self.LabelList,
'RockType': self.TypeList
})
self.OutPutFig=self.fig |
def forward(self, sequence):
""" Forward propagate batch of sequences through the network, without accounting for the state """
data = self.input_block(sequence)
for idx in range(len(self.rnn_layers)):
data, _ = self.rnn_layers[idx](data)
if self.rnn_dropout_layers:
data = self.rnn_dropout_layers[idx](data)
# We are interested only in the last element of the sequence
if self.bidirectional:
last_hidden_size = self.rnn_layers_sizes[-1]
data = torch.cat([data[:, -1, :last_hidden_size], data[:, 0, last_hidden_size:]], dim=1)
else:
data = data[:, -1]
for idx in range(len(self.linear_layers_sizes)):
data = F.relu(self.linear_layers[idx](data))
if self.linear_dropout_layers:
data = self.linear_dropout_layers[idx](data)
data = self.output_layer(data)
return self.output_activation(data) | Forward propagate batch of sequences through the network, without accounting for the state | Below is the the instruction that describes the task:
### Input:
Forward propagate batch of sequences through the network, without accounting for the state
### Response:
def forward(self, sequence):
""" Forward propagate batch of sequences through the network, without accounting for the state """
data = self.input_block(sequence)
for idx in range(len(self.rnn_layers)):
data, _ = self.rnn_layers[idx](data)
if self.rnn_dropout_layers:
data = self.rnn_dropout_layers[idx](data)
# We are interested only in the last element of the sequence
if self.bidirectional:
last_hidden_size = self.rnn_layers_sizes[-1]
data = torch.cat([data[:, -1, :last_hidden_size], data[:, 0, last_hidden_size:]], dim=1)
else:
data = data[:, -1]
for idx in range(len(self.linear_layers_sizes)):
data = F.relu(self.linear_layers[idx](data))
if self.linear_dropout_layers:
data = self.linear_dropout_layers[idx](data)
data = self.output_layer(data)
return self.output_activation(data) |
def check_name_availability_local(
self, location, name, type, custom_headers=None, raw=False, **operation_config):
"""Check Name Availability for global uniqueness.
:param location: The location in which uniqueness will be verified.
:type location: str
:param name: Resource Name To Verify
:type name: str
:param type: Fully qualified resource type which includes provider
namespace
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResponse or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.mixedreality.models.CheckNameAvailabilityResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>`
"""
check_name_availability = models.CheckNameAvailabilityRequest(name=name, type=type)
# Construct URL
url = self.check_name_availability_local.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(check_name_availability, 'CheckNameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | Check Name Availability for global uniqueness.
:param location: The location in which uniqueness will be verified.
:type location: str
:param name: Resource Name To Verify
:type name: str
:param type: Fully qualified resource type which includes provider
namespace
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResponse or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.mixedreality.models.CheckNameAvailabilityResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>` | Below is the the instruction that describes the task:
### Input:
Check Name Availability for global uniqueness.
:param location: The location in which uniqueness will be verified.
:type location: str
:param name: Resource Name To Verify
:type name: str
:param type: Fully qualified resource type which includes provider
namespace
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResponse or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.mixedreality.models.CheckNameAvailabilityResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>`
### Response:
def check_name_availability_local(
self, location, name, type, custom_headers=None, raw=False, **operation_config):
"""Check Name Availability for global uniqueness.
:param location: The location in which uniqueness will be verified.
:type location: str
:param name: Resource Name To Verify
:type name: str
:param type: Fully qualified resource type which includes provider
namespace
:type type: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: CheckNameAvailabilityResponse or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.mixedreality.models.CheckNameAvailabilityResponse
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.mixedreality.models.ErrorResponseException>`
"""
check_name_availability = models.CheckNameAvailabilityRequest(name=name, type=type)
# Construct URL
url = self.check_name_availability_local.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(check_name_availability, 'CheckNameAvailabilityRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized |
async def get_match(self, m_id, force_update=False) -> Match:
""" get a single match by id
|methcoro|
Args:
m_id: match id
force_update (default=False): True to force an update to the Challonge API
Returns:
Match
Raises:
APIException
"""
found_m = self._find_match(m_id)
if force_update or found_m is None:
await self.get_matches()
found_m = self._find_match(m_id)
return found_m | get a single match by id
|methcoro|
Args:
m_id: match id
force_update (default=False): True to force an update to the Challonge API
Returns:
Match
Raises:
APIException | Below is the the instruction that describes the task:
### Input:
get a single match by id
|methcoro|
Args:
m_id: match id
force_update (default=False): True to force an update to the Challonge API
Returns:
Match
Raises:
APIException
### Response:
async def get_match(self, m_id, force_update=False) -> Match:
""" get a single match by id
|methcoro|
Args:
m_id: match id
force_update (default=False): True to force an update to the Challonge API
Returns:
Match
Raises:
APIException
"""
found_m = self._find_match(m_id)
if force_update or found_m is None:
await self.get_matches()
found_m = self._find_match(m_id)
return found_m |
def _modem_sm(self):
"""Handle modem response state machine."""
import datetime
read_timeout = READ_IDLE_TIMEOUT
while self.ser:
try:
resp = self.read(read_timeout)
except (serial.SerialException, SystemExit, TypeError):
_LOGGER.debug('Unable to read from port %s', self.port)
break
if self.state != self.STATE_IDLE and len(resp) == 0:
read_timeout = READ_IDLE_TIMEOUT
self.set_state(self.STATE_IDLE)
self.incomingcallnotificationfunc(self.state)
continue
resp = resp.decode()
resp = resp.strip('\r\n')
if self.cmd_response == '':
self.cmd_responselines.append(resp)
_LOGGER.debug('mdm: %s', resp)
if resp in ['OK', 'ERROR']:
self.cmd_response = resp
continue
if resp in ['RING']:
if self.state == self.STATE_IDLE:
self.cid_name = ''
self.cid_number = ''
self.cid_time = datetime.datetime.now()
self.set_state(self.STATE_RING)
self.incomingcallnotificationfunc(self.state)
read_timeout = READ_RING_TIMOUT
continue
if len(resp) <= 4 or resp.find('=') == -1:
continue
read_timeout = READ_RING_TIMOUT
cid_field, cid_data = resp.split('=')
cid_field = cid_field.strip()
cid_data = cid_data.strip()
if cid_field in ['DATE']:
self.cid_time = datetime.datetime.now()
continue
if cid_field in ['NMBR']:
self.cid_number = cid_data
continue
if cid_field in ['NAME']:
self.cid_name = cid_data
self.set_state(self.STATE_CALLERID)
self.incomingcallnotificationfunc(self.state)
_LOGGER.debug('CID: %s %s %s',
self.cid_time.strftime("%I:%M %p"),
self.cid_name,
self.cid_number)
try:
self.write(self.cmd_callerid)
except serial.SerialException:
_LOGGER.error('Unable to write to port %s', self.port)
break
continue
self.set_state(self.STATE_FAILED)
_LOGGER.debug('Exiting modem state machine')
return | Handle modem response state machine. | Below is the the instruction that describes the task:
### Input:
Handle modem response state machine.
### Response:
def _modem_sm(self):
"""Handle modem response state machine."""
import datetime
read_timeout = READ_IDLE_TIMEOUT
while self.ser:
try:
resp = self.read(read_timeout)
except (serial.SerialException, SystemExit, TypeError):
_LOGGER.debug('Unable to read from port %s', self.port)
break
if self.state != self.STATE_IDLE and len(resp) == 0:
read_timeout = READ_IDLE_TIMEOUT
self.set_state(self.STATE_IDLE)
self.incomingcallnotificationfunc(self.state)
continue
resp = resp.decode()
resp = resp.strip('\r\n')
if self.cmd_response == '':
self.cmd_responselines.append(resp)
_LOGGER.debug('mdm: %s', resp)
if resp in ['OK', 'ERROR']:
self.cmd_response = resp
continue
if resp in ['RING']:
if self.state == self.STATE_IDLE:
self.cid_name = ''
self.cid_number = ''
self.cid_time = datetime.datetime.now()
self.set_state(self.STATE_RING)
self.incomingcallnotificationfunc(self.state)
read_timeout = READ_RING_TIMOUT
continue
if len(resp) <= 4 or resp.find('=') == -1:
continue
read_timeout = READ_RING_TIMOUT
cid_field, cid_data = resp.split('=')
cid_field = cid_field.strip()
cid_data = cid_data.strip()
if cid_field in ['DATE']:
self.cid_time = datetime.datetime.now()
continue
if cid_field in ['NMBR']:
self.cid_number = cid_data
continue
if cid_field in ['NAME']:
self.cid_name = cid_data
self.set_state(self.STATE_CALLERID)
self.incomingcallnotificationfunc(self.state)
_LOGGER.debug('CID: %s %s %s',
self.cid_time.strftime("%I:%M %p"),
self.cid_name,
self.cid_number)
try:
self.write(self.cmd_callerid)
except serial.SerialException:
_LOGGER.error('Unable to write to port %s', self.port)
break
continue
self.set_state(self.STATE_FAILED)
_LOGGER.debug('Exiting modem state machine')
return |
def do_requests_to_getall(self, uri, requested_count):
"""Helps to make http request for get_all method.
Note:
This method will be checking for the pagination URI in the response
and make request to pagination URI to get all the resources.
"""
items = []
while uri:
logger.debug('Making HTTP request to get all resources. Uri: {0}'.format(uri))
response = self._connection.get(uri)
members = self.get_members(response)
items += members
logger.debug("Response getAll: nextPageUri = {0}, members list length: {1}".format(uri, str(len(members))))
uri = self.get_next_page(response, items, requested_count)
logger.debug('Total # of members found = {0}'.format(str(len(items))))
return items | Helps to make http request for get_all method.
Note:
This method will be checking for the pagination URI in the response
and make request to pagination URI to get all the resources. | Below is the the instruction that describes the task:
### Input:
Helps to make http request for get_all method.
Note:
This method will be checking for the pagination URI in the response
and make request to pagination URI to get all the resources.
### Response:
def do_requests_to_getall(self, uri, requested_count):
"""Helps to make http request for get_all method.
Note:
This method will be checking for the pagination URI in the response
and make request to pagination URI to get all the resources.
"""
items = []
while uri:
logger.debug('Making HTTP request to get all resources. Uri: {0}'.format(uri))
response = self._connection.get(uri)
members = self.get_members(response)
items += members
logger.debug("Response getAll: nextPageUri = {0}, members list length: {1}".format(uri, str(len(members))))
uri = self.get_next_page(response, items, requested_count)
logger.debug('Total # of members found = {0}'.format(str(len(items))))
return items |
def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} | Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
### Response:
def option_group_exists(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Check to see if an RDS option group exists.
CLI example::
salt myminion boto_rds.option_group_exists myoptiongr region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_option_groups(OptionGroupName=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)} |
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init | Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return: | Below is the the instruction that describes the task:
### Input:
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
### Response:
def get_comment_init(request, obj):
"""
Возвращает словарь для инициализации начальных значений модели комментария
:param request: запрос
:param obj: объект к которому добавляется комментарий
:return:
"""
if request.user.is_authenticated():
init = {'obj': obj, 'username': request.user.username, 'email': request.user.email}
else:
init = {'obj': obj}
return init |
def pairwise_intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths | Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections | Below is the the instruction that describes the task:
### Input:
Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
### Response:
def pairwise_intersection(boxlist1, boxlist2):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: Nx4 floatbox
boxlist2: Mx4
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
x_min1, y_min1, x_max1, y_max1 = tf.split(boxlist1, 4, axis=1)
x_min2, y_min2, x_max2, y_max2 = tf.split(boxlist2, 4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths |
def get_buys(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#list-buys"""
response = self._get('v2', 'accounts', account_id, 'buys', params=params)
return self._make_api_object(response, Buy) | https://developers.coinbase.com/api/v2#list-buys | Below is the the instruction that describes the task:
### Input:
https://developers.coinbase.com/api/v2#list-buys
### Response:
def get_buys(self, account_id, **params):
"""https://developers.coinbase.com/api/v2#list-buys"""
response = self._get('v2', 'accounts', account_id, 'buys', params=params)
return self._make_api_object(response, Buy) |
def validate(self, value, model_instance):
"""
Use custom validation for when using a multiple countries field.
"""
if not self.multiple:
return super(CountryField, self).validate(value, model_instance)
if not self.editable:
# Skip validation for non-editable fields.
return
if value:
choices = [option_key for option_key, option_value in self.choices]
for single_value in value:
if single_value not in choices:
raise exceptions.ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": single_value},
)
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages["blank"], code="blank") | Use custom validation for when using a multiple countries field. | Below is the the instruction that describes the task:
### Input:
Use custom validation for when using a multiple countries field.
### Response:
def validate(self, value, model_instance):
"""
Use custom validation for when using a multiple countries field.
"""
if not self.multiple:
return super(CountryField, self).validate(value, model_instance)
if not self.editable:
# Skip validation for non-editable fields.
return
if value:
choices = [option_key for option_key, option_value in self.choices]
for single_value in value:
if single_value not in choices:
raise exceptions.ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": single_value},
)
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages["blank"], code="blank") |
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
orig_objects = []
objects = []
def get_ref(value, objects=objects):
"""Returns the index of the given object in the object table,
adding it if needed.
"""
value = PythonicAdapter(Pass)._encode(value, None)
# Convert strs to FixedObjects here to make sure they get encoded
# correctly
if isinstance(value, (Container, FixedObject)):
if getattr(value, '_tmp_index', None):
index = value._tmp_index
else:
objects.append(value)
index = len(objects)
value._tmp_index = index
orig_objects.append(value) # save the object so we can
# strip the _tmp_indexes later
return Ref(index)
else:
return value # Inline value
def fix_fields(obj):
obj = PythonicAdapter(Pass)._encode(obj, None)
# Convert strs to FixedObjects here to make sure they get encoded
# correctly
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
root = PythonicAdapter(Pass)._encode(root, None)
i = 0
objects = [root]
root._tmp_index = 1
while i < len(objects):
objects[i] = fix_fields(objects[i])
i += 1
for obj in orig_objects:
obj._tmp_index = None
# Strip indexes off objects in case we save again later
return objects | Yield ref-containing obj table entries from object network | Below is the the instruction that describes the task:
### Input:
Yield ref-containing obj table entries from object network
### Response:
def encode_network(root):
"""Yield ref-containing obj table entries from object network"""
orig_objects = []
objects = []
def get_ref(value, objects=objects):
"""Returns the index of the given object in the object table,
adding it if needed.
"""
value = PythonicAdapter(Pass)._encode(value, None)
# Convert strs to FixedObjects here to make sure they get encoded
# correctly
if isinstance(value, (Container, FixedObject)):
if getattr(value, '_tmp_index', None):
index = value._tmp_index
else:
objects.append(value)
index = len(objects)
value._tmp_index = index
orig_objects.append(value) # save the object so we can
# strip the _tmp_indexes later
return Ref(index)
else:
return value # Inline value
def fix_fields(obj):
obj = PythonicAdapter(Pass)._encode(obj, None)
# Convert strs to FixedObjects here to make sure they get encoded
# correctly
if isinstance(obj, Container):
obj.update((k, get_ref(v)) for (k, v) in obj.items()
if k != 'class_name')
fixed_obj = obj
elif isinstance(obj, Dictionary):
fixed_obj = obj.__class__(dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, dict):
fixed_obj = dict(
(get_ref(field), get_ref(value))
for (field, value) in obj.items()
)
elif isinstance(obj, list):
fixed_obj = [get_ref(field) for field in obj]
elif isinstance(obj, Form):
fixed_obj = obj.__class__(**dict(
(field, get_ref(value))
for (field, value) in obj.value.items()
))
elif isinstance(obj, ContainsRefs):
fixed_obj = obj.__class__([get_ref(field)
for field in obj.value])
else:
return obj
fixed_obj._made_from = obj
return fixed_obj
root = PythonicAdapter(Pass)._encode(root, None)
i = 0
objects = [root]
root._tmp_index = 1
while i < len(objects):
objects[i] = fix_fields(objects[i])
i += 1
for obj in orig_objects:
obj._tmp_index = None
# Strip indexes off objects in case we save again later
return objects |
def update_courses(self, event, account_id, course_ids):
"""
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - course_ids
"""List of ids of courses to update. At most 500 courses may be updated in one call."""
data["course_ids"] = course_ids
# REQUIRED - event
"""no description"""
self._validate_enum(event, ["offer", "conclude", "delete", "undelete"])
data["event"] = event
self.logger.debug("PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, single_item=True) | Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered. | Below is the the instruction that describes the task:
### Input:
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
### Response:
def update_courses(self, event, account_id, course_ids):
"""
Update courses.
Update multiple courses in an account. Operates asynchronously; use the {api:ProgressController#show progress endpoint}
to query the status of an operation.
The action to take on each course. Must be one of 'offer', 'conclude', 'delete', or 'undelete'.
* 'offer' makes a course visible to students. This action is also called "publish" on the web site.
* 'conclude' prevents future enrollments and makes a course read-only for all participants. The course still appears
in prior-enrollment lists.
* 'delete' completely removes the course from the web site (including course menus and prior-enrollment lists).
All enrollments are deleted. Course content may be physically deleted at a future date.
* 'undelete' attempts to recover a course that has been deleted. (Recovery is not guaranteed; please conclude
rather than delete a course if there is any possibility the course will be used again.) The recovered course
will be unpublished. Deleted enrollments will not be recovered.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - course_ids
"""List of ids of courses to update. At most 500 courses may be updated in one call."""
data["course_ids"] = course_ids
# REQUIRED - event
"""no description"""
self._validate_enum(event, ["offer", "conclude", "delete", "undelete"])
data["event"] = event
self.logger.debug("PUT /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, single_item=True) |
def __getFormat(self, format):
"""
Defaults to JSON [ps: 'RDF' is the native rdflib representation]
"""
if format == "XML":
self.sparql.setReturnFormat(XML)
self.format = "XML"
elif format == "RDF":
self.sparql.setReturnFormat(RDF)
self.format = "RDF"
else:
self.sparql.setReturnFormat(JSON)
self.format = "JSON" | Defaults to JSON [ps: 'RDF' is the native rdflib representation] | Below is the the instruction that describes the task:
### Input:
Defaults to JSON [ps: 'RDF' is the native rdflib representation]
### Response:
def __getFormat(self, format):
"""
Defaults to JSON [ps: 'RDF' is the native rdflib representation]
"""
if format == "XML":
self.sparql.setReturnFormat(XML)
self.format = "XML"
elif format == "RDF":
self.sparql.setReturnFormat(RDF)
self.format = "RDF"
else:
self.sparql.setReturnFormat(JSON)
self.format = "JSON" |
def for_me(conditions, myself):
""" Am I among the intended audiences """
if not conditions.audience_restriction: # No audience restriction
return True
for restriction in conditions.audience_restriction:
if not restriction.audience:
continue
for audience in restriction.audience:
if audience.text.strip() == myself:
return True
else:
# print("Not for me: %s != %s" % (audience.text.strip(),
# myself))
pass
return False | Am I among the intended audiences | Below is the the instruction that describes the task:
### Input:
Am I among the intended audiences
### Response:
def for_me(conditions, myself):
""" Am I among the intended audiences """
if not conditions.audience_restriction: # No audience restriction
return True
for restriction in conditions.audience_restriction:
if not restriction.audience:
continue
for audience in restriction.audience:
if audience.text.strip() == myself:
return True
else:
# print("Not for me: %s != %s" % (audience.text.strip(),
# myself))
pass
return False |
def centroids(self, instrument, min_abundance=1e-4, points_per_fwhm=25):
"""
Estimates centroided peaks for a given instrument model.
:param instrument: instrument model
:param min_abundance: minimum abundance for including a peak
:param points_per_fwhm: grid density used for envelope calculation
:returns: peaks visible with the instrument used
:rtype: TheoreticalSpectrum
"""
assert self.ptr != ffi.NULL
centroids = ims.spectrum_envelope_centroids(self.ptr, instrument.ptr,
min_abundance, points_per_fwhm)
return _new_spectrum(CentroidedSpectrum, centroids) | Estimates centroided peaks for a given instrument model.
:param instrument: instrument model
:param min_abundance: minimum abundance for including a peak
:param points_per_fwhm: grid density used for envelope calculation
:returns: peaks visible with the instrument used
:rtype: TheoreticalSpectrum | Below is the the instruction that describes the task:
### Input:
Estimates centroided peaks for a given instrument model.
:param instrument: instrument model
:param min_abundance: minimum abundance for including a peak
:param points_per_fwhm: grid density used for envelope calculation
:returns: peaks visible with the instrument used
:rtype: TheoreticalSpectrum
### Response:
def centroids(self, instrument, min_abundance=1e-4, points_per_fwhm=25):
"""
Estimates centroided peaks for a given instrument model.
:param instrument: instrument model
:param min_abundance: minimum abundance for including a peak
:param points_per_fwhm: grid density used for envelope calculation
:returns: peaks visible with the instrument used
:rtype: TheoreticalSpectrum
"""
assert self.ptr != ffi.NULL
centroids = ims.spectrum_envelope_centroids(self.ptr, instrument.ptr,
min_abundance, points_per_fwhm)
return _new_spectrum(CentroidedSpectrum, centroids) |
def create_user(self, account_id, pseudonym_unique_id, communication_channel_address=None, communication_channel_confirmation_url=None, communication_channel_skip_confirmation=None, communication_channel_type=None, enable_sis_reactivation=None, force_validations=None, pseudonym_authentication_provider_id=None, pseudonym_force_self_registration=None, pseudonym_integration_id=None, pseudonym_password=None, pseudonym_send_confirmation=None, pseudonym_sis_user_id=None, user_birthdate=None, user_locale=None, user_name=None, user_short_name=None, user_skip_registration=None, user_sortable_name=None, user_terms_of_use=None, user_time_zone=None):
"""
Create a user.
Create and return a new user and pseudonym for an account.
If you don't have the "Modify login details for users" permission, but
self-registration is enabled on the account, you can still use this
endpoint to register new users. Certain fields will be required, and
others will be ignored (see below).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - user[name]
"""The full name of the user. This name will be used by teacher for grading.
Required if this is a self-registration."""
if user_name is not None:
data["user[name]"] = user_name
# OPTIONAL - user[short_name]
"""User's name as it will be displayed in discussions, messages, and comments."""
if user_short_name is not None:
data["user[short_name]"] = user_short_name
# OPTIONAL - user[sortable_name]
"""User's name as used to sort alphabetically in lists."""
if user_sortable_name is not None:
data["user[sortable_name]"] = user_sortable_name
# OPTIONAL - user[time_zone]
"""The time zone for the user. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if user_time_zone is not None:
data["user[time_zone]"] = user_time_zone
# OPTIONAL - user[locale]
"""The user's preferred language, from the list of languages Canvas supports.
This is in RFC-5646 format."""
if user_locale is not None:
data["user[locale]"] = user_locale
# OPTIONAL - user[birthdate]
"""The user's birth date."""
if user_birthdate is not None:
data["user[birthdate]"] = user_birthdate
# OPTIONAL - user[terms_of_use]
"""Whether the user accepts the terms of use. Required if this is a
self-registration and this canvas instance requires users to accept
the terms (on by default).
If this is true, it will mark the user as having accepted the terms of use."""
if user_terms_of_use is not None:
data["user[terms_of_use]"] = user_terms_of_use
# OPTIONAL - user[skip_registration]
"""Automatically mark the user as registered.
If this is true, it is recommended to set <tt>"pseudonym[send_confirmation]"</tt> to true as well.
Otherwise, the user will not receive any messages about their account creation.
The users communication channel confirmation can be skipped by setting
<tt>"communication_channel[skip_confirmation]"</tt> to true as well."""
if user_skip_registration is not None:
data["user[skip_registration]"] = user_skip_registration
# REQUIRED - pseudonym[unique_id]
"""User's login ID. If this is a self-registration, it must be a valid
email address."""
data["pseudonym[unique_id]"] = pseudonym_unique_id
# OPTIONAL - pseudonym[password]
"""User's password. Cannot be set during self-registration."""
if pseudonym_password is not None:
data["pseudonym[password]"] = pseudonym_password
# OPTIONAL - pseudonym[sis_user_id]
"""SIS ID for the user's account. To set this parameter, the caller must be
able to manage SIS permissions."""
if pseudonym_sis_user_id is not None:
data["pseudonym[sis_user_id]"] = pseudonym_sis_user_id
# OPTIONAL - pseudonym[integration_id]
"""Integration ID for the login. To set this parameter, the caller must be able to
manage SIS permissions. The Integration ID is a secondary
identifier useful for more complex SIS integrations."""
if pseudonym_integration_id is not None:
data["pseudonym[integration_id]"] = pseudonym_integration_id
# OPTIONAL - pseudonym[send_confirmation]
"""Send user notification of account creation if true.
Automatically set to true during self-registration."""
if pseudonym_send_confirmation is not None:
data["pseudonym[send_confirmation]"] = pseudonym_send_confirmation
# OPTIONAL - pseudonym[force_self_registration]
"""Send user a self-registration style email if true.
Setting it means the users will get a notification asking them
to "complete the registration process" by clicking it, setting
a password, and letting them in. Will only be executed on
if the user does not need admin approval.
Defaults to false unless explicitly provided."""
if pseudonym_force_self_registration is not None:
data["pseudonym[force_self_registration]"] = pseudonym_force_self_registration
# OPTIONAL - pseudonym[authentication_provider_id]
"""The authentication provider this login is associated with. Logins
associated with a specific provider can only be used with that provider.
Legacy providers (LDAP, CAS, SAML) will search for logins associated with
them, or unassociated logins. New providers will only search for logins
explicitly associated with them. This can be the integer ID of the
provider, or the type of the provider (in which case, it will find the
first matching provider)."""
if pseudonym_authentication_provider_id is not None:
data["pseudonym[authentication_provider_id]"] = pseudonym_authentication_provider_id
# OPTIONAL - communication_channel[type]
"""The communication channel type, e.g. 'email' or 'sms'."""
if communication_channel_type is not None:
data["communication_channel[type]"] = communication_channel_type
# OPTIONAL - communication_channel[address]
"""The communication channel address, e.g. the user's email address."""
if communication_channel_address is not None:
data["communication_channel[address]"] = communication_channel_address
# OPTIONAL - communication_channel[confirmation_url]
"""Only valid for account admins. If true, returns the new user account
confirmation URL in the response."""
if communication_channel_confirmation_url is not None:
data["communication_channel[confirmation_url]"] = communication_channel_confirmation_url
# OPTIONAL - communication_channel[skip_confirmation]
"""Only valid for site admins and account admins making requests; If true, the channel is
automatically validated and no confirmation email or SMS is sent.
Otherwise, the user must respond to a confirmation message to confirm the
channel.
If this is true, it is recommended to set <tt>"pseudonym[send_confirmation]"</tt> to true as well.
Otherwise, the user will not receive any messages about their account creation."""
if communication_channel_skip_confirmation is not None:
data["communication_channel[skip_confirmation]"] = communication_channel_skip_confirmation
# OPTIONAL - force_validations
"""If true, validations are performed on the newly created user (and their associated pseudonym)
even if the request is made by a privileged user like an admin. When set to false,
or not included in the request parameters, any newly created users are subject to
validations unless the request is made by a user with a 'manage_user_logins' right.
In which case, certain validations such as 'require_acceptance_of_terms' and
'require_presence_of_name' are not enforced. Use this parameter to return helpful json
errors while building users with an admin request."""
if force_validations is not None:
data["force_validations"] = force_validations
# OPTIONAL - enable_sis_reactivation
"""When true, will first try to re-activate a deleted user with matching sis_user_id if possible."""
if enable_sis_reactivation is not None:
data["enable_sis_reactivation"] = enable_sis_reactivation
self.logger.debug("POST /api/v1/accounts/{account_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/users".format(**path), data=data, params=params, single_item=True) | Create a user.
Create and return a new user and pseudonym for an account.
If you don't have the "Modify login details for users" permission, but
self-registration is enabled on the account, you can still use this
endpoint to register new users. Certain fields will be required, and
others will be ignored (see below). | Below is the the instruction that describes the task:
### Input:
Create a user.
Create and return a new user and pseudonym for an account.
If you don't have the "Modify login details for users" permission, but
self-registration is enabled on the account, you can still use this
endpoint to register new users. Certain fields will be required, and
others will be ignored (see below).
### Response:
def create_user(self, account_id, pseudonym_unique_id, communication_channel_address=None, communication_channel_confirmation_url=None, communication_channel_skip_confirmation=None, communication_channel_type=None, enable_sis_reactivation=None, force_validations=None, pseudonym_authentication_provider_id=None, pseudonym_force_self_registration=None, pseudonym_integration_id=None, pseudonym_password=None, pseudonym_send_confirmation=None, pseudonym_sis_user_id=None, user_birthdate=None, user_locale=None, user_name=None, user_short_name=None, user_skip_registration=None, user_sortable_name=None, user_terms_of_use=None, user_time_zone=None):
"""
Create a user.
Create and return a new user and pseudonym for an account.
If you don't have the "Modify login details for users" permission, but
self-registration is enabled on the account, you can still use this
endpoint to register new users. Certain fields will be required, and
others will be ignored (see below).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - user[name]
"""The full name of the user. This name will be used by teacher for grading.
Required if this is a self-registration."""
if user_name is not None:
data["user[name]"] = user_name
# OPTIONAL - user[short_name]
"""User's name as it will be displayed in discussions, messages, and comments."""
if user_short_name is not None:
data["user[short_name]"] = user_short_name
# OPTIONAL - user[sortable_name]
"""User's name as used to sort alphabetically in lists."""
if user_sortable_name is not None:
data["user[sortable_name]"] = user_sortable_name
# OPTIONAL - user[time_zone]
"""The time zone for the user. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if user_time_zone is not None:
data["user[time_zone]"] = user_time_zone
# OPTIONAL - user[locale]
"""The user's preferred language, from the list of languages Canvas supports.
This is in RFC-5646 format."""
if user_locale is not None:
data["user[locale]"] = user_locale
# OPTIONAL - user[birthdate]
"""The user's birth date."""
if user_birthdate is not None:
data["user[birthdate]"] = user_birthdate
# OPTIONAL - user[terms_of_use]
"""Whether the user accepts the terms of use. Required if this is a
self-registration and this canvas instance requires users to accept
the terms (on by default).
If this is true, it will mark the user as having accepted the terms of use."""
if user_terms_of_use is not None:
data["user[terms_of_use]"] = user_terms_of_use
# OPTIONAL - user[skip_registration]
"""Automatically mark the user as registered.
If this is true, it is recommended to set <tt>"pseudonym[send_confirmation]"</tt> to true as well.
Otherwise, the user will not receive any messages about their account creation.
The users communication channel confirmation can be skipped by setting
<tt>"communication_channel[skip_confirmation]"</tt> to true as well."""
if user_skip_registration is not None:
data["user[skip_registration]"] = user_skip_registration
# REQUIRED - pseudonym[unique_id]
"""User's login ID. If this is a self-registration, it must be a valid
email address."""
data["pseudonym[unique_id]"] = pseudonym_unique_id
# OPTIONAL - pseudonym[password]
"""User's password. Cannot be set during self-registration."""
if pseudonym_password is not None:
data["pseudonym[password]"] = pseudonym_password
# OPTIONAL - pseudonym[sis_user_id]
"""SIS ID for the user's account. To set this parameter, the caller must be
able to manage SIS permissions."""
if pseudonym_sis_user_id is not None:
data["pseudonym[sis_user_id]"] = pseudonym_sis_user_id
# OPTIONAL - pseudonym[integration_id]
"""Integration ID for the login. To set this parameter, the caller must be able to
manage SIS permissions. The Integration ID is a secondary
identifier useful for more complex SIS integrations."""
if pseudonym_integration_id is not None:
data["pseudonym[integration_id]"] = pseudonym_integration_id
# OPTIONAL - pseudonym[send_confirmation]
"""Send user notification of account creation if true.
Automatically set to true during self-registration."""
if pseudonym_send_confirmation is not None:
data["pseudonym[send_confirmation]"] = pseudonym_send_confirmation
# OPTIONAL - pseudonym[force_self_registration]
"""Send user a self-registration style email if true.
Setting it means the users will get a notification asking them
to "complete the registration process" by clicking it, setting
a password, and letting them in. Will only be executed on
if the user does not need admin approval.
Defaults to false unless explicitly provided."""
if pseudonym_force_self_registration is not None:
data["pseudonym[force_self_registration]"] = pseudonym_force_self_registration
# OPTIONAL - pseudonym[authentication_provider_id]
"""The authentication provider this login is associated with. Logins
associated with a specific provider can only be used with that provider.
Legacy providers (LDAP, CAS, SAML) will search for logins associated with
them, or unassociated logins. New providers will only search for logins
explicitly associated with them. This can be the integer ID of the
provider, or the type of the provider (in which case, it will find the
first matching provider)."""
if pseudonym_authentication_provider_id is not None:
data["pseudonym[authentication_provider_id]"] = pseudonym_authentication_provider_id
# OPTIONAL - communication_channel[type]
"""The communication channel type, e.g. 'email' or 'sms'."""
if communication_channel_type is not None:
data["communication_channel[type]"] = communication_channel_type
# OPTIONAL - communication_channel[address]
"""The communication channel address, e.g. the user's email address."""
if communication_channel_address is not None:
data["communication_channel[address]"] = communication_channel_address
# OPTIONAL - communication_channel[confirmation_url]
"""Only valid for account admins. If true, returns the new user account
confirmation URL in the response."""
if communication_channel_confirmation_url is not None:
data["communication_channel[confirmation_url]"] = communication_channel_confirmation_url
# OPTIONAL - communication_channel[skip_confirmation]
"""Only valid for site admins and account admins making requests; If true, the channel is
automatically validated and no confirmation email or SMS is sent.
Otherwise, the user must respond to a confirmation message to confirm the
channel.
If this is true, it is recommended to set <tt>"pseudonym[send_confirmation]"</tt> to true as well.
Otherwise, the user will not receive any messages about their account creation."""
if communication_channel_skip_confirmation is not None:
data["communication_channel[skip_confirmation]"] = communication_channel_skip_confirmation
# OPTIONAL - force_validations
"""If true, validations are performed on the newly created user (and their associated pseudonym)
even if the request is made by a privileged user like an admin. When set to false,
or not included in the request parameters, any newly created users are subject to
validations unless the request is made by a user with a 'manage_user_logins' right.
In which case, certain validations such as 'require_acceptance_of_terms' and
'require_presence_of_name' are not enforced. Use this parameter to return helpful json
errors while building users with an admin request."""
if force_validations is not None:
data["force_validations"] = force_validations
# OPTIONAL - enable_sis_reactivation
"""When true, will first try to re-activate a deleted user with matching sis_user_id if possible."""
if enable_sis_reactivation is not None:
data["enable_sis_reactivation"] = enable_sis_reactivation
self.logger.debug("POST /api/v1/accounts/{account_id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/users".format(**path), data=data, params=params, single_item=True) |
def _set_blob_properties(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
"""Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor
"""
if ud.requires_non_encrypted_md5_put:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.set_blob_properties(ud.entity, digest)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) | Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor | Below is the the instruction that describes the task:
### Input:
Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor
### Response:
def _set_blob_properties(self, ud):
# type: (Uploader, blobxfer.models.upload.Descriptor) -> None
"""Set blob properties (md5, cache control)
:param Uploader self: this
:param blobxfer.models.upload.Descriptor ud: upload descriptor
"""
if ud.requires_non_encrypted_md5_put:
digest = blobxfer.util.base64_encode_as_string(ud.md5.digest())
else:
digest = None
blobxfer.operations.azure.blob.set_blob_properties(ud.entity, digest)
if blobxfer.util.is_not_empty(ud.entity.replica_targets):
for ase in ud.entity.replica_targets:
blobxfer.operations.azure.blob.set_blob_properties(ase, digest) |
def is_field_unique_by_group(df, field_col, group_col):
''' Determine if field is constant by group in df
'''
def num_unique(x):
return len(pd.unique(x))
num_distinct = df.groupby(group_col)[field_col].agg(num_unique)
return all(num_distinct == 1) | Determine if field is constant by group in df | Below is the the instruction that describes the task:
### Input:
Determine if field is constant by group in df
### Response:
def is_field_unique_by_group(df, field_col, group_col):
''' Determine if field is constant by group in df
'''
def num_unique(x):
return len(pd.unique(x))
num_distinct = df.groupby(group_col)[field_col].agg(num_unique)
return all(num_distinct == 1) |
def startLoop():
"""
Use nested asyncio event loop for Jupyter notebooks.
"""
def _ipython_loop_asyncio(kernel):
'''
Use asyncio event loop for the given IPython kernel.
'''
loop = asyncio.get_event_loop()
def kernel_handler():
kernel.do_one_iteration()
loop.call_later(kernel._poll_interval, kernel_handler)
loop.call_soon(kernel_handler)
try:
if not loop.is_running():
loop.run_forever()
finally:
if not loop.is_running():
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
patchAsyncio()
loop = asyncio.get_event_loop()
if not loop.is_running():
from ipykernel.eventloops import register_integration, enable_gui
register_integration('asyncio')(_ipython_loop_asyncio)
enable_gui('asyncio') | Use nested asyncio event loop for Jupyter notebooks. | Below is the the instruction that describes the task:
### Input:
Use nested asyncio event loop for Jupyter notebooks.
### Response:
def startLoop():
"""
Use nested asyncio event loop for Jupyter notebooks.
"""
def _ipython_loop_asyncio(kernel):
'''
Use asyncio event loop for the given IPython kernel.
'''
loop = asyncio.get_event_loop()
def kernel_handler():
kernel.do_one_iteration()
loop.call_later(kernel._poll_interval, kernel_handler)
loop.call_soon(kernel_handler)
try:
if not loop.is_running():
loop.run_forever()
finally:
if not loop.is_running():
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
patchAsyncio()
loop = asyncio.get_event_loop()
if not loop.is_running():
from ipykernel.eventloops import register_integration, enable_gui
register_integration('asyncio')(_ipython_loop_asyncio)
enable_gui('asyncio') |
def determine_packages_to_sync(self):
"""
Update the self.packages_to_sync to contain packages that need to be
synced.
"""
# In case we don't find any changes we will stay on the currently
# synced serial.
self.target_serial = self.synced_serial
self.packages_to_sync = {}
logger.info(f"Current mirror serial: {self.synced_serial}")
if self.todolist.exists():
# We started a sync previously and left a todo list as well as the
# targetted serial. We'll try to keep going through the todo list
# and then mark the targetted serial as done.
logger.info("Resuming interrupted sync from local todo list.")
saved_todo = iter(open(self.todolist, encoding="utf-8"))
self.target_serial = int(next(saved_todo).strip())
for line in saved_todo:
package, serial = line.strip().split()
self.packages_to_sync[package] = int(serial)
elif not self.synced_serial:
logger.info("Syncing all packages.")
# First get the current serial, then start to sync. This makes us
# more defensive in case something changes on the server between
# those two calls.
self.packages_to_sync.update(self.master.all_packages())
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
else:
logger.info("Syncing based on changelog.")
self.packages_to_sync.update(
self.master.changed_packages(self.synced_serial)
)
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
# We can avoid downloading the main index page if we don't have
# anything todo at all during a changelog-based sync.
self.need_index_sync = bool(self.packages_to_sync)
self._filter_packages()
logger.info(f"Trying to reach serial: {self.target_serial}")
pkg_count = len(self.packages_to_sync)
logger.info(f"{pkg_count} packages to sync.") | Update the self.packages_to_sync to contain packages that need to be
synced. | Below is the the instruction that describes the task:
### Input:
Update the self.packages_to_sync to contain packages that need to be
synced.
### Response:
def determine_packages_to_sync(self):
"""
Update the self.packages_to_sync to contain packages that need to be
synced.
"""
# In case we don't find any changes we will stay on the currently
# synced serial.
self.target_serial = self.synced_serial
self.packages_to_sync = {}
logger.info(f"Current mirror serial: {self.synced_serial}")
if self.todolist.exists():
# We started a sync previously and left a todo list as well as the
# targetted serial. We'll try to keep going through the todo list
# and then mark the targetted serial as done.
logger.info("Resuming interrupted sync from local todo list.")
saved_todo = iter(open(self.todolist, encoding="utf-8"))
self.target_serial = int(next(saved_todo).strip())
for line in saved_todo:
package, serial = line.strip().split()
self.packages_to_sync[package] = int(serial)
elif not self.synced_serial:
logger.info("Syncing all packages.")
# First get the current serial, then start to sync. This makes us
# more defensive in case something changes on the server between
# those two calls.
self.packages_to_sync.update(self.master.all_packages())
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
else:
logger.info("Syncing based on changelog.")
self.packages_to_sync.update(
self.master.changed_packages(self.synced_serial)
)
self.target_serial = max(
[self.synced_serial] + list(self.packages_to_sync.values())
)
# We can avoid downloading the main index page if we don't have
# anything todo at all during a changelog-based sync.
self.need_index_sync = bool(self.packages_to_sync)
self._filter_packages()
logger.info(f"Trying to reach serial: {self.target_serial}")
pkg_count = len(self.packages_to_sync)
logger.info(f"{pkg_count} packages to sync.") |
def get(self, value):
"""
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
"""
_nothing = object()
item = self._values.get(value, _nothing)
if item is _nothing:
raise InvalidEnumItem(value)
return item | Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem | Below is the the instruction that describes the task:
### Input:
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
### Response:
def get(self, value):
"""
Get an enumeration item for an enumeration value.
:param unicode value: Enumeration value.
:raise InvalidEnumItem: If ``value`` does not match any known
enumeration value.
:rtype: EnumItem
"""
_nothing = object()
item = self._values.get(value, _nothing)
if item is _nothing:
raise InvalidEnumItem(value)
return item |
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars | Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters | Below is the the instruction that describes the task:
### Input:
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
### Response:
def set_stop_chars(self, stop_chars):
"""
Set stop characters used when determining end of URL.
.. deprecated:: 0.7
Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right`
instead.
:param list stop_chars: list of characters
"""
warnings.warn("Method set_stop_chars is deprecated, "
"use `set_stop_chars_left` or "
"`set_stop_chars_right` instead", DeprecationWarning)
self._stop_chars = set(stop_chars)
self._stop_chars_left = self._stop_chars
self._stop_chars_right = self._stop_chars |
def newsnr_sgveto_psdvar(snr, bchisq, sgchisq, psd_var_val):
""" Combined SNR derived from NewSNR, Sine-Gaussian Chisq and PSD
variation statistic """
nsnr = numpy.array(newsnr_sgveto(snr, bchisq, sgchisq), ndmin=1)
psd_var_val = numpy.array(psd_var_val, ndmin=1)
lgc = psd_var_val >= 1.8
nsnr[lgc] = nsnr[lgc] / numpy.sqrt(psd_var_val[lgc])
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0] | Combined SNR derived from NewSNR, Sine-Gaussian Chisq and PSD
variation statistic | Below is the the instruction that describes the task:
### Input:
Combined SNR derived from NewSNR, Sine-Gaussian Chisq and PSD
variation statistic
### Response:
def newsnr_sgveto_psdvar(snr, bchisq, sgchisq, psd_var_val):
""" Combined SNR derived from NewSNR, Sine-Gaussian Chisq and PSD
variation statistic """
nsnr = numpy.array(newsnr_sgveto(snr, bchisq, sgchisq), ndmin=1)
psd_var_val = numpy.array(psd_var_val, ndmin=1)
lgc = psd_var_val >= 1.8
nsnr[lgc] = nsnr[lgc] / numpy.sqrt(psd_var_val[lgc])
# If snr input is float, return a float. Otherwise return numpy array.
if hasattr(snr, '__len__'):
return nsnr
else:
return nsnr[0] |
def process(cls, post, render=True):
"""
This method takes the post data and renders it
:param post:
:param render:
:return:
"""
post["slug"] = cls.create_slug(post["title"])
post["editable"] = cls.is_author(post, current_user)
post["url"] = cls.construct_url(post)
post["priority"] = 0.8
if render:
cls.render_text(post)
post["meta"]["images"] = cls.extract_images(post) | This method takes the post data and renders it
:param post:
:param render:
:return: | Below is the the instruction that describes the task:
### Input:
This method takes the post data and renders it
:param post:
:param render:
:return:
### Response:
def process(cls, post, render=True):
"""
This method takes the post data and renders it
:param post:
:param render:
:return:
"""
post["slug"] = cls.create_slug(post["title"])
post["editable"] = cls.is_author(post, current_user)
post["url"] = cls.construct_url(post)
post["priority"] = 0.8
if render:
cls.render_text(post)
post["meta"]["images"] = cls.extract_images(post) |
def get_sort_on(allowed_indexes=None):
""" returns the 'sort_on' from the request
"""
sort_on = get("sort_on")
if allowed_indexes and sort_on not in allowed_indexes:
logger.warn("Index '{}' is not in allowed_indexes".format(sort_on))
return None
return sort_on | returns the 'sort_on' from the request | Below is the the instruction that describes the task:
### Input:
returns the 'sort_on' from the request
### Response:
def get_sort_on(allowed_indexes=None):
""" returns the 'sort_on' from the request
"""
sort_on = get("sort_on")
if allowed_indexes and sort_on not in allowed_indexes:
logger.warn("Index '{}' is not in allowed_indexes".format(sort_on))
return None
return sort_on |
def write(self, outfile=None, section=None):
"""Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
"""
with io.open(outfile or self.user_config_file(), 'wb') as f:
self.data.write(outfile=f, section=section) | Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config. | Below is the the instruction that describes the task:
### Input:
Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
### Response:
def write(self, outfile=None, section=None):
"""Write the current config to a file (defaults to user config).
:param str outfile: The path to the file to write to.
:param None/str section: The config section to write, or :data:`None`
to write the entire config.
"""
with io.open(outfile or self.user_config_file(), 'wb') as f:
self.data.write(outfile=f, section=section) |
def findDataItems(self, parentPath=None, ancestorPath=None,
type=None, id=None):
"""
You can use this operation to search through the various data
items registered in the server's data store.
Inputs:
parentPath - The path of the parent under which to find items
ancestorPath - The path of the ancestor under which to find
items.
type - A filter for the type of the items
id - A filter to search by the ID of the item
Output:
dictionary
"""
params = {
"f" : "json",
}
if parentPath is not None:
params['parentPath'] = parentPath
if ancestorPath is not None:
params['ancestorPath'] = ancestorPath
if type is not None:
params['type'] = type
if id is not None:
params['id'] = id
fURL = self._url + "/findItems"
return self._post(url=fURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | You can use this operation to search through the various data
items registered in the server's data store.
Inputs:
parentPath - The path of the parent under which to find items
ancestorPath - The path of the ancestor under which to find
items.
type - A filter for the type of the items
id - A filter to search by the ID of the item
Output:
dictionary | Below is the the instruction that describes the task:
### Input:
You can use this operation to search through the various data
items registered in the server's data store.
Inputs:
parentPath - The path of the parent under which to find items
ancestorPath - The path of the ancestor under which to find
items.
type - A filter for the type of the items
id - A filter to search by the ID of the item
Output:
dictionary
### Response:
def findDataItems(self, parentPath=None, ancestorPath=None,
type=None, id=None):
"""
You can use this operation to search through the various data
items registered in the server's data store.
Inputs:
parentPath - The path of the parent under which to find items
ancestorPath - The path of the ancestor under which to find
items.
type - A filter for the type of the items
id - A filter to search by the ID of the item
Output:
dictionary
"""
params = {
"f" : "json",
}
if parentPath is not None:
params['parentPath'] = parentPath
if ancestorPath is not None:
params['ancestorPath'] = ancestorPath
if type is not None:
params['type'] = type
if id is not None:
params['id'] = id
fURL = self._url + "/findItems"
return self._post(url=fURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) |
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed):
"""
Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message)
"""
return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer,
f_asset_group, f_confirmed) | Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message) | Below is the the instruction that describes the task:
### Input:
Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message)
### Response:
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed):
"""
Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message)
"""
return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer,
f_asset_group, f_confirmed) |
def domain(host, port, username, password, avail_timeout=240.):
"""
Endpoint domain.
:param str host:
Endpoint host.
:param int port:
Endpoint port.
:param str username:
Endpoint username.
:param str password:
Endpoint password.
:param float avail_timeout:
Availability check timeout in seconds.
:rtype:
Domain
:return:
Domain for the given endpoint parameters.
"""
return Domain(Endpoint(host, port, username, password), avail_timeout) | Endpoint domain.
:param str host:
Endpoint host.
:param int port:
Endpoint port.
:param str username:
Endpoint username.
:param str password:
Endpoint password.
:param float avail_timeout:
Availability check timeout in seconds.
:rtype:
Domain
:return:
Domain for the given endpoint parameters. | Below is the the instruction that describes the task:
### Input:
Endpoint domain.
:param str host:
Endpoint host.
:param int port:
Endpoint port.
:param str username:
Endpoint username.
:param str password:
Endpoint password.
:param float avail_timeout:
Availability check timeout in seconds.
:rtype:
Domain
:return:
Domain for the given endpoint parameters.
### Response:
def domain(host, port, username, password, avail_timeout=240.):
"""
Endpoint domain.
:param str host:
Endpoint host.
:param int port:
Endpoint port.
:param str username:
Endpoint username.
:param str password:
Endpoint password.
:param float avail_timeout:
Availability check timeout in seconds.
:rtype:
Domain
:return:
Domain for the given endpoint parameters.
"""
return Domain(Endpoint(host, port, username, password), avail_timeout) |
def _get_function_matches(attributes_a, attributes_b, filter_set_a=None, filter_set_b=None):
"""
:param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches | :param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects. | Below is the the instruction that describes the task:
### Input:
:param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects.
### Response:
def _get_function_matches(attributes_a, attributes_b, filter_set_a=None, filter_set_b=None):
"""
:param attributes_a: A dict of functions to their attributes
:param attributes_b: A dict of functions to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the functions in this set.
:param filter_set_b: A set to limit attributes_b to the functions in this set.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches |
def get_image(vm_):
'''
Return the image object to use
'''
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
images = avail_images()
for key, value in six.iteritems(images):
if vm_image and vm_image in (images[key]['id'], images[key]['name']):
return images[key]
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
) | Return the image object to use | Below is the the instruction that describes the task:
### Input:
Return the image object to use
### Response:
def get_image(vm_):
'''
Return the image object to use
'''
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
images = avail_images()
for key, value in six.iteritems(images):
if vm_image and vm_image in (images[key]['id'], images[key]['name']):
return images[key]
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
) |
def on_any_event(self, event):
"""On any event method"""
for delegate in self.delegates:
if hasattr(delegate, "on_any_event"):
delegate.on_any_event(event) | On any event method | Below is the the instruction that describes the task:
### Input:
On any event method
### Response:
def on_any_event(self, event):
"""On any event method"""
for delegate in self.delegates:
if hasattr(delegate, "on_any_event"):
delegate.on_any_event(event) |
def absolute_url_for(endpoint, **values):
"""Absolute url for endpoint."""
config = current_app.config
site_domain = config.get('SITE_DOMAIN')
relative_url = url_for(endpoint, **values)
return join_url(site_domain, relative_url) | Absolute url for endpoint. | Below is the the instruction that describes the task:
### Input:
Absolute url for endpoint.
### Response:
def absolute_url_for(endpoint, **values):
"""Absolute url for endpoint."""
config = current_app.config
site_domain = config.get('SITE_DOMAIN')
relative_url = url_for(endpoint, **values)
return join_url(site_domain, relative_url) |
def find_host_network_interfaces_of_type(self, type_p):
"""Searches through all host network interfaces and returns a list of interfaces of the specified type
in type_p of type :class:`HostNetworkInterfaceType`
type of the host network interfaces to search for.
return network_interfaces of type :class:`IHostNetworkInterface`
Found host network interface objects.
"""
if not isinstance(type_p, HostNetworkInterfaceType):
raise TypeError("type_p can only be an instance of type HostNetworkInterfaceType")
network_interfaces = self._call("findHostNetworkInterfacesOfType",
in_p=[type_p])
network_interfaces = [IHostNetworkInterface(a) for a in network_interfaces]
return network_interfaces | Searches through all host network interfaces and returns a list of interfaces of the specified type
in type_p of type :class:`HostNetworkInterfaceType`
type of the host network interfaces to search for.
return network_interfaces of type :class:`IHostNetworkInterface`
Found host network interface objects. | Below is the the instruction that describes the task:
### Input:
Searches through all host network interfaces and returns a list of interfaces of the specified type
in type_p of type :class:`HostNetworkInterfaceType`
type of the host network interfaces to search for.
return network_interfaces of type :class:`IHostNetworkInterface`
Found host network interface objects.
### Response:
def find_host_network_interfaces_of_type(self, type_p):
"""Searches through all host network interfaces and returns a list of interfaces of the specified type
in type_p of type :class:`HostNetworkInterfaceType`
type of the host network interfaces to search for.
return network_interfaces of type :class:`IHostNetworkInterface`
Found host network interface objects.
"""
if not isinstance(type_p, HostNetworkInterfaceType):
raise TypeError("type_p can only be an instance of type HostNetworkInterfaceType")
network_interfaces = self._call("findHostNetworkInterfacesOfType",
in_p=[type_p])
network_interfaces = [IHostNetworkInterface(a) for a in network_interfaces]
return network_interfaces |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.