code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1, timeout=1000):
"""
Encode some ProduceRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.PRODUCE_KEY))
message.append(struct.pack('>hii', acks, timeout,
len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(struct.pack('>h%dsi' % len(topic), len(topic), topic,
len(topic_payloads)))
for partition, payload in topic_payloads.items():
msg_set = KafkaProtocol._encode_message_set(payload.messages)
message.append(struct.pack('>ii%ds' % len(msg_set), partition,
len(msg_set), msg_set))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) | Encode some ProduceRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout | Below is the the instruction that describes the task:
### Input:
Encode some ProduceRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
### Response:
def encode_produce_request(cls, client_id, correlation_id,
payloads=None, acks=1, timeout=1000):
"""
Encode some ProduceRequest structs
Arguments:
client_id: string
correlation_id: int
payloads: list of ProduceRequest
acks: How "acky" you want the request to be
0: immediate response
1: written to disk by the leader
2+: waits for this many number of replicas to sync
-1: waits for all replicas to be in sync
timeout: Maximum time the server will wait for acks from replicas.
This is _not_ a socket timeout
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.PRODUCE_KEY))
message.append(struct.pack('>hii', acks, timeout,
len(grouped_payloads)))
for topic, topic_payloads in grouped_payloads.items():
message.append(struct.pack('>h%dsi' % len(topic), len(topic), topic,
len(topic_payloads)))
for partition, payload in topic_payloads.items():
msg_set = KafkaProtocol._encode_message_set(payload.messages)
message.append(struct.pack('>ii%ds' % len(msg_set), partition,
len(msg_set), msg_set))
msg = b''.join(message)
return struct.pack('>i%ds' % len(msg), len(msg), msg) |
def disconnect_all(self, message=""):
"""Disconnects all connections."""
with self.mutex:
for conn in self.connections:
conn.disconnect(message) | Disconnects all connections. | Below is the the instruction that describes the task:
### Input:
Disconnects all connections.
### Response:
def disconnect_all(self, message=""):
"""Disconnects all connections."""
with self.mutex:
for conn in self.connections:
conn.disconnect(message) |
def changememokey(ctx, key, account):
""" Change the memo key of an account
"""
pprint(ctx.blockchain.update_memo_key(key, account=account)) | Change the memo key of an account | Below is the the instruction that describes the task:
### Input:
Change the memo key of an account
### Response:
def changememokey(ctx, key, account):
""" Change the memo key of an account
"""
pprint(ctx.blockchain.update_memo_key(key, account=account)) |
def _validate_fname(fname, arg_name):
"""Validate that a string is a valid file name."""
if fname is not None:
msg = "Argument `{0}` is not valid".format(arg_name)
if (not isinstance(fname, str)) or (isinstance(fname, str) and ("\0" in fname)):
raise RuntimeError(msg)
try:
if not os.path.exists(fname):
os.access(fname, os.W_OK)
except (TypeError, ValueError): # pragma: no cover
raise RuntimeError(msg) | Validate that a string is a valid file name. | Below is the the instruction that describes the task:
### Input:
Validate that a string is a valid file name.
### Response:
def _validate_fname(fname, arg_name):
"""Validate that a string is a valid file name."""
if fname is not None:
msg = "Argument `{0}` is not valid".format(arg_name)
if (not isinstance(fname, str)) or (isinstance(fname, str) and ("\0" in fname)):
raise RuntimeError(msg)
try:
if not os.path.exists(fname):
os.access(fname, os.W_OK)
except (TypeError, ValueError): # pragma: no cover
raise RuntimeError(msg) |
def query(self, req, timeout=None, metadata=None, credentials=None):
"""Runs query operation."""
return self.stub.Query(req, timeout=timeout, metadata=metadata,
credentials=credentials) | Runs query operation. | Below is the the instruction that describes the task:
### Input:
Runs query operation.
### Response:
def query(self, req, timeout=None, metadata=None, credentials=None):
"""Runs query operation."""
return self.stub.Query(req, timeout=timeout, metadata=metadata,
credentials=credentials) |
def load_ui_from_file(name: str):
"""
Returns a tuple from uic.loadUiType(), loading the ui file with the given name.
:param name:
:return:
"""
ui_file = _get_ui_qfile(name)
try:
base_type = uic.loadUiType(ui_file, from_imports=True)
finally:
ui_file.close()
return base_type | Returns a tuple from uic.loadUiType(), loading the ui file with the given name.
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Returns a tuple from uic.loadUiType(), loading the ui file with the given name.
:param name:
:return:
### Response:
def load_ui_from_file(name: str):
"""
Returns a tuple from uic.loadUiType(), loading the ui file with the given name.
:param name:
:return:
"""
ui_file = _get_ui_qfile(name)
try:
base_type = uic.loadUiType(ui_file, from_imports=True)
finally:
ui_file.close()
return base_type |
def set_source(self, propname, pores):
r"""
Applies a given source term to the specified pores
Parameters
----------
propname : string
The property name of the source term model to be applied
pores : array_like
The pore indices where the source term should be applied
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised.
"""
locs = self.tomask(pores=pores)
if (not np.all(np.isnan(self['pore.bc_value'][locs]))) or \
(not np.all(np.isnan(self['pore.bc_rate'][locs]))):
raise Exception('Boundary conditions already present in given ' +
'pores, cannot also assign source terms')
self[propname] = locs
self.settings['sources'].append(propname) | r"""
Applies a given source term to the specified pores
Parameters
----------
propname : string
The property name of the source term model to be applied
pores : array_like
The pore indices where the source term should be applied
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised. | Below is the the instruction that describes the task:
### Input:
r"""
Applies a given source term to the specified pores
Parameters
----------
propname : string
The property name of the source term model to be applied
pores : array_like
The pore indices where the source term should be applied
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised.
### Response:
def set_source(self, propname, pores):
r"""
Applies a given source term to the specified pores
Parameters
----------
propname : string
The property name of the source term model to be applied
pores : array_like
The pore indices where the source term should be applied
Notes
-----
Source terms cannot be applied in pores where boundary conditions have
already been set. Attempting to do so will result in an error being
raised.
"""
locs = self.tomask(pores=pores)
if (not np.all(np.isnan(self['pore.bc_value'][locs]))) or \
(not np.all(np.isnan(self['pore.bc_rate'][locs]))):
raise Exception('Boundary conditions already present in given ' +
'pores, cannot also assign source terms')
self[propname] = locs
self.settings['sources'].append(propname) |
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties | Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor) | Below is the the instruction that describes the task:
### Input:
Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)
### Response:
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.editor import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties |
def settle(ctx, symbol, amount, account):
""" Fund the fee pool of an asset
"""
print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account)) | Fund the fee pool of an asset | Below is the the instruction that describes the task:
### Input:
Fund the fee pool of an asset
### Response:
def settle(ctx, symbol, amount, account):
""" Fund the fee pool of an asset
"""
print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account)) |
def register_frame(self, frame):
"""
Register the Frame that owns this Widget.
:param frame: The owning Frame.
"""
self._frame = frame
self.string_len = wcswidth if self._frame.canvas.unicode_aware else len | Register the Frame that owns this Widget.
:param frame: The owning Frame. | Below is the the instruction that describes the task:
### Input:
Register the Frame that owns this Widget.
:param frame: The owning Frame.
### Response:
def register_frame(self, frame):
"""
Register the Frame that owns this Widget.
:param frame: The owning Frame.
"""
self._frame = frame
self.string_len = wcswidth if self._frame.canvas.unicode_aware else len |
def matches(self, string, context=None):
"""
Search for all matches with current configuration against input_string
:param string: string to search into
:type string: str
:param context: context to use
:type context: dict
:return: A custom list of matches
:rtype: Matches
"""
matches = Matches(input_string=string)
if context is None:
context = {}
self._matches_patterns(matches, context)
self._execute_rules(matches, context)
return matches | Search for all matches with current configuration against input_string
:param string: string to search into
:type string: str
:param context: context to use
:type context: dict
:return: A custom list of matches
:rtype: Matches | Below is the the instruction that describes the task:
### Input:
Search for all matches with current configuration against input_string
:param string: string to search into
:type string: str
:param context: context to use
:type context: dict
:return: A custom list of matches
:rtype: Matches
### Response:
def matches(self, string, context=None):
"""
Search for all matches with current configuration against input_string
:param string: string to search into
:type string: str
:param context: context to use
:type context: dict
:return: A custom list of matches
:rtype: Matches
"""
matches = Matches(input_string=string)
if context is None:
context = {}
self._matches_patterns(matches, context)
self._execute_rules(matches, context)
return matches |
def _build_model(self, traj, brian_list, network_dict):
"""Builds the neuron groups from `traj`.
Adds the neuron groups to `brian_list` and `network_dict`.
"""
model = traj.parameters.model
# Create the equations for both models
eqs_dict = self._build_model_eqs(traj)
# Create inhibitory neurons
eqs_i = eqs_dict['i']
neurons_i = NeuronGroup(N=model.N_i,
model = eqs_i,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
method='Euler')
# Create excitatory neurons
eqs_e = eqs_dict['e']
neurons_e = NeuronGroup(N=model.N_e,
model = eqs_e,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
method='Euler')
# Set the bias terms
neurons_e.mu =rand(model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min
neurons_i.mu =rand(model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min
# Set initial membrane potentials
neurons_e.V = rand(model.N_e)
neurons_i.V = rand(model.N_i)
# Add both groups to the `brian_list` and the `network_dict`
brian_list.append(neurons_i)
brian_list.append(neurons_e)
network_dict['neurons_e']=neurons_e
network_dict['neurons_i']=neurons_i | Builds the neuron groups from `traj`.
Adds the neuron groups to `brian_list` and `network_dict`. | Below is the the instruction that describes the task:
### Input:
Builds the neuron groups from `traj`.
Adds the neuron groups to `brian_list` and `network_dict`.
### Response:
def _build_model(self, traj, brian_list, network_dict):
"""Builds the neuron groups from `traj`.
Adds the neuron groups to `brian_list` and `network_dict`.
"""
model = traj.parameters.model
# Create the equations for both models
eqs_dict = self._build_model_eqs(traj)
# Create inhibitory neurons
eqs_i = eqs_dict['i']
neurons_i = NeuronGroup(N=model.N_i,
model = eqs_i,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
method='Euler')
# Create excitatory neurons
eqs_e = eqs_dict['e']
neurons_e = NeuronGroup(N=model.N_e,
model = eqs_e,
threshold=model.V_th,
reset=model.reset_func,
refractory=model.refractory,
method='Euler')
# Set the bias terms
neurons_e.mu =rand(model.N_e) * (model.mu_e_max - model.mu_e_min) + model.mu_e_min
neurons_i.mu =rand(model.N_i) * (model.mu_i_max - model.mu_i_min) + model.mu_i_min
# Set initial membrane potentials
neurons_e.V = rand(model.N_e)
neurons_i.V = rand(model.N_i)
# Add both groups to the `brian_list` and the `network_dict`
brian_list.append(neurons_i)
brian_list.append(neurons_e)
network_dict['neurons_e']=neurons_e
network_dict['neurons_i']=neurons_i |
def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array
"""
# the requested order for filtering the astropy observations table is more precise than the order
# of the TimeMoc object
current_max_order = self.max_order
new_max_order = TimeMOC.time_resolution_to_order(delta_t)
if new_max_order > current_max_order:
message = 'Requested time resolution filtering cannot be applied.\n' \
'Filtering is applied with a time resolution of {0} sec.'.format(
TimeMOC.order_to_time_resolution(current_max_order).sec)
warnings.warn(message, UserWarning)
rough_tmoc = self.degrade_to_order(new_max_order)
pix_arr = (times.jd * TimeMOC.DAY_MICRO_SEC)
pix_arr = pix_arr.astype(int)
intervals_arr = rough_tmoc._interval_set._intervals
inf_arr = np.vstack([pix_arr[i] >= intervals_arr[:, 0] for i in range(pix_arr.shape[0])])
sup_arr = np.vstack([pix_arr[i] <= intervals_arr[:, 1] for i in range(pix_arr.shape[0])])
if keep_inside:
res = inf_arr & sup_arr
filtered_rows = np.any(res, axis=1)
else:
res = ~inf_arr | ~sup_arr
filtered_rows = np.all(res, axis=1)
return filtered_rows | Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array | Below is the the instruction that describes the task:
### Input:
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array
### Response:
def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME):
"""
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the
TMOC instance.
Parameters
----------
times : `astropy.time.Time`
astropy times to check whether they are contained in the TMOC or not.
keep_inside : bool, optional
True by default. If so the filtered table contains only observations that are located the MOC.
If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC.
delta_t : `astropy.time.TimeDelta`, optional
the duration of one observation. It is set to 30 min by default. This data is used to compute the
more efficient TimeMOC order to represent the observations (Best order = the less precise order which
is able to discriminate two observations separated by ``delta_t``).
Returns
-------
array : `~numpy.darray`
A mask boolean array
"""
# the requested order for filtering the astropy observations table is more precise than the order
# of the TimeMoc object
current_max_order = self.max_order
new_max_order = TimeMOC.time_resolution_to_order(delta_t)
if new_max_order > current_max_order:
message = 'Requested time resolution filtering cannot be applied.\n' \
'Filtering is applied with a time resolution of {0} sec.'.format(
TimeMOC.order_to_time_resolution(current_max_order).sec)
warnings.warn(message, UserWarning)
rough_tmoc = self.degrade_to_order(new_max_order)
pix_arr = (times.jd * TimeMOC.DAY_MICRO_SEC)
pix_arr = pix_arr.astype(int)
intervals_arr = rough_tmoc._interval_set._intervals
inf_arr = np.vstack([pix_arr[i] >= intervals_arr[:, 0] for i in range(pix_arr.shape[0])])
sup_arr = np.vstack([pix_arr[i] <= intervals_arr[:, 1] for i in range(pix_arr.shape[0])])
if keep_inside:
res = inf_arr & sup_arr
filtered_rows = np.any(res, axis=1)
else:
res = ~inf_arr | ~sup_arr
filtered_rows = np.all(res, axis=1)
return filtered_rows |
def tuple(data, field_name):
"""
RETURN LIST OF TUPLES
"""
if isinstance(data, Cube):
Log.error("not supported yet")
if isinstance(data, FlatList):
Log.error("not supported yet")
if is_data(field_name) and "value" in field_name:
# SIMPLIFY {"value":value} AS STRING
field_name = field_name["value"]
# SIMPLE PYTHON ITERABLE ASSUMED
if is_text(field_name):
if len(split_field(field_name)) == 1:
return [(d[field_name],) for d in data]
else:
path = split_field(field_name)
output = []
flat_list._tuple1(data, path, 0, output)
return output
elif is_list(field_name):
paths = [_select_a_field(f) for f in field_name]
output = FlatList()
_tuple((), unwrap(data), paths, 0, output)
return output
else:
paths = [_select_a_field(field_name)]
output = FlatList()
_tuple((), data, paths, 0, output)
return output | RETURN LIST OF TUPLES | Below is the the instruction that describes the task:
### Input:
RETURN LIST OF TUPLES
### Response:
def tuple(data, field_name):
"""
RETURN LIST OF TUPLES
"""
if isinstance(data, Cube):
Log.error("not supported yet")
if isinstance(data, FlatList):
Log.error("not supported yet")
if is_data(field_name) and "value" in field_name:
# SIMPLIFY {"value":value} AS STRING
field_name = field_name["value"]
# SIMPLE PYTHON ITERABLE ASSUMED
if is_text(field_name):
if len(split_field(field_name)) == 1:
return [(d[field_name],) for d in data]
else:
path = split_field(field_name)
output = []
flat_list._tuple1(data, path, 0, output)
return output
elif is_list(field_name):
paths = [_select_a_field(f) for f in field_name]
output = FlatList()
_tuple((), unwrap(data), paths, 0, output)
return output
else:
paths = [_select_a_field(field_name)]
output = FlatList()
_tuple((), data, paths, 0, output)
return output |
def _main_loop(self):
'''
Main loop for the stats collector
'''
while self.active:
self.expire()
if self.roll and self.is_expired():
self.start_time = self.start_time + self.window
self._set_key()
self.purge_old()
time.sleep(self.cycle_time)
self._clean_up() | Main loop for the stats collector | Below is the the instruction that describes the task:
### Input:
Main loop for the stats collector
### Response:
def _main_loop(self):
'''
Main loop for the stats collector
'''
while self.active:
self.expire()
if self.roll and self.is_expired():
self.start_time = self.start_time + self.window
self._set_key()
self.purge_old()
time.sleep(self.cycle_time)
self._clean_up() |
def fling_forward_horizontally(self, *args, **selectors):
"""
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.forward() | Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not. | Below is the the instruction that describes the task:
### Input:
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
### Response:
def fling_forward_horizontally(self, *args, **selectors):
"""
Perform fling forward (horizontally)action on the object which has *selectors* attributes.
Return whether the object can be fling or not.
"""
return self.device(**selectors).fling.horiz.forward() |
def forebrain():
"""Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object
"""
filename = 'data/ForebrainGlut/hgForebrainGlut.loom'
url = 'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom'
adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
adata.var_names_make_unique()
return adata | Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object | Below is the the instruction that describes the task:
### Input:
Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object
### Response:
def forebrain():
"""Developing human forebrain.
Forebrain tissue of a week 10 embryo, focusing on the glutamatergic neuronal lineage.
Returns
-------
Returns `adata` object
"""
filename = 'data/ForebrainGlut/hgForebrainGlut.loom'
url = 'http://pklab.med.harvard.edu/velocyto/hgForebrainGlut/hgForebrainGlut.loom'
adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
adata.var_names_make_unique()
return adata |
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path)) | Build a package | Below is the the instruction that describes the task:
### Input:
Build a package
### Response:
def _build(self, args):
'''
Build a package
'''
if len(args) < 2:
raise SPMInvocationError('A path to a formula must be specified')
self.abspath = args[1].rstrip('/')
comps = self.abspath.split('/')
self.relpath = comps[-1]
formula_path = '{0}/FORMULA'.format(self.abspath)
if not os.path.exists(formula_path):
raise SPMPackageError('Formula file {0} not found'.format(formula_path))
with salt.utils.files.fopen(formula_path) as fp_:
formula_conf = salt.utils.yaml.safe_load(fp_)
for field in ('name', 'version', 'release', 'summary', 'description'):
if field not in formula_conf:
raise SPMPackageError('Invalid package: a {0} must be defined'.format(field))
out_path = '{0}/{1}-{2}-{3}.spm'.format(
self.opts['spm_build_dir'],
formula_conf['name'],
formula_conf['version'],
formula_conf['release'],
)
if not os.path.exists(self.opts['spm_build_dir']):
os.mkdir(self.opts['spm_build_dir'])
self.formula_conf = formula_conf
formula_tar = tarfile.open(out_path, 'w:bz2')
if 'files' in formula_conf:
# This allows files to be added to the SPM file in a specific order.
# It also allows for files to be tagged as a certain type, as with
# RPM files. This tag is ignored here, but is used when installing
# the SPM file.
if isinstance(formula_conf['files'], list):
formula_dir = tarfile.TarInfo(formula_conf['name'])
formula_dir.type = tarfile.DIRTYPE
formula_tar.addfile(formula_dir)
for file_ in formula_conf['files']:
for ftype in FILE_TYPES:
if file_.startswith('{0}|'.format(ftype)):
file_ = file_.lstrip('{0}|'.format(ftype))
formula_tar.add(
os.path.join(os.getcwd(), file_),
os.path.join(formula_conf['name'], file_),
)
else:
# If no files are specified, then the whole directory will be added.
try:
formula_tar.add(formula_path, formula_conf['name'], filter=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], filter=self._exclude)
except TypeError:
formula_tar.add(formula_path, formula_conf['name'], exclude=self._exclude)
formula_tar.add(self.abspath, formula_conf['name'], exclude=self._exclude)
formula_tar.close()
self.ui.status('Built package {0}'.format(out_path)) |
def start(self):
"""Start transmitting message (add to list if needed)."""
if self._index is None:
self._index = ctypes.c_uint32()
_canlib.canSchedulerAddMessage(self._scheduler,
self._msg,
self._index)
_canlib.canSchedulerStartMessage(self._scheduler,
self._index,
self._count) | Start transmitting message (add to list if needed). | Below is the the instruction that describes the task:
### Input:
Start transmitting message (add to list if needed).
### Response:
def start(self):
"""Start transmitting message (add to list if needed)."""
if self._index is None:
self._index = ctypes.c_uint32()
_canlib.canSchedulerAddMessage(self._scheduler,
self._msg,
self._index)
_canlib.canSchedulerStartMessage(self._scheduler,
self._index,
self._count) |
def page_for(self, member, page_size=DEFAULT_PAGE_SIZE):
'''
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
'''
return self.page_for_in(self.leaderboard_name, member, page_size) | Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard. | Below is the the instruction that describes the task:
### Input:
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
### Response:
def page_for(self, member, page_size=DEFAULT_PAGE_SIZE):
'''
Determine the page where a member falls in the leaderboard.
@param member [String] Member name.
@param page_size [int] Page size to be used in determining page location.
@return the page where a member falls in the leaderboard.
'''
return self.page_for_in(self.leaderboard_name, member, page_size) |
def decompose(
val: TValue,
*,
intercepting_decomposer: Callable[['cirq.Operation'],
Union[None,
NotImplementedType,
'cirq.OP_TREE']] = None,
fallback_decomposer: Callable[['cirq.Operation'],
Union[None,
NotImplementedType,
'cirq.OP_TREE']] = None,
keep: Callable[['cirq.Operation'], bool] = None,
on_stuck_raise: Union[None,
Exception,
Callable[['cirq.Operation'],
Union[None, Exception]]]
= _value_error_describing_bad_operation
) -> List['cirq.Operation']:
"""Recursively decomposes a value into `cirq.Operation`s meeting a criteria.
Args:
val: The value to decompose into operations.
intercepting_decomposer: An optional method that is called before the
default decomposer (the value's `_decompose_` method). If
`intercepting_decomposer` is specified and returns a result that
isn't `NotImplemented` or `None`, that result is used. Otherwise the
decomposition falls back to the default decomposer.
Note that `val` will be passed into `intercepting_decomposer`, even
if `val` isn't a `cirq.Operation`.
fallback_decomposer: An optional decomposition that used after the
`intercepting_decomposer` and the default decomposer (the value's
`_decompose_` method) both fail.
keep: A predicate that determines if the initial operation or
intermediate decomposed operations should be kept or else need to be
decomposed further. If `keep` isn't specified, it defaults to "value
can't be decomposed anymore".
on_stuck_raise: If there is an operation that can't be decomposed and
also can't be kept, `on_stuck_raise` is used to determine what error
to raise. `on_stuck_raise` can either directly be an `Exception`, or
a method that takes the problematic operation and returns an
`Exception`. If `on_stuck_raise` is set to `None` or a method that
returns `None`, undecomposable operations are simply silently kept.
`on_stuck_raise` defaults to a `ValueError` describing the unwanted
undecomposable operation.
Returns:
A list of operations that the given value was decomposed into. If
`on_stuck_raise` isn't set to None, all operations in the list will
satisfy the predicate specified by `keep`.
Raises:
TypeError:
`val` isn't a `cirq.Operation` and can't be decomposed even once.
(So it's not possible to return a list of operations.)
ValueError:
Default type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
TError:
Custom type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
"""
from cirq import ops # HACK: Avoids circular dependencies.
if (on_stuck_raise is not _value_error_describing_bad_operation and
keep is None):
raise ValueError(
"Must specify 'keep' if specifying 'on_stuck_raise', because it's "
"not possible to get stuck if you don't have a criteria on what's "
"acceptable to keep.")
decomposers = [d
for d in [intercepting_decomposer,
_default_decomposer,
fallback_decomposer]
if d]
def decomposer(op):
for d in decomposers:
r = d(op)
if r is not NotImplemented and r is not None:
return r
return NotImplemented
output = []
queue = [val] # type: List[Any]
while queue:
item = queue.pop(0)
if isinstance(item, ops.Operation) and keep is not None and keep(item):
output.append(item)
continue
decomposed = decomposer(item)
if decomposed is not NotImplemented and decomposed is not None:
queue[:0] = ops.flatten_op_tree(decomposed)
continue
if (not isinstance(item, ops.Operation) and
isinstance(item, collections.Iterable)):
queue[:0] = ops.flatten_op_tree(item)
continue
if keep is not None and on_stuck_raise is not None:
if isinstance(on_stuck_raise, Exception):
raise on_stuck_raise
elif callable(on_stuck_raise):
error = on_stuck_raise(item)
if error is not None:
raise error
output.append(item)
return output | Recursively decomposes a value into `cirq.Operation`s meeting a criteria.
Args:
val: The value to decompose into operations.
intercepting_decomposer: An optional method that is called before the
default decomposer (the value's `_decompose_` method). If
`intercepting_decomposer` is specified and returns a result that
isn't `NotImplemented` or `None`, that result is used. Otherwise the
decomposition falls back to the default decomposer.
Note that `val` will be passed into `intercepting_decomposer`, even
if `val` isn't a `cirq.Operation`.
fallback_decomposer: An optional decomposition that used after the
`intercepting_decomposer` and the default decomposer (the value's
`_decompose_` method) both fail.
keep: A predicate that determines if the initial operation or
intermediate decomposed operations should be kept or else need to be
decomposed further. If `keep` isn't specified, it defaults to "value
can't be decomposed anymore".
on_stuck_raise: If there is an operation that can't be decomposed and
also can't be kept, `on_stuck_raise` is used to determine what error
to raise. `on_stuck_raise` can either directly be an `Exception`, or
a method that takes the problematic operation and returns an
`Exception`. If `on_stuck_raise` is set to `None` or a method that
returns `None`, undecomposable operations are simply silently kept.
`on_stuck_raise` defaults to a `ValueError` describing the unwanted
undecomposable operation.
Returns:
A list of operations that the given value was decomposed into. If
`on_stuck_raise` isn't set to None, all operations in the list will
satisfy the predicate specified by `keep`.
Raises:
TypeError:
`val` isn't a `cirq.Operation` and can't be decomposed even once.
(So it's not possible to return a list of operations.)
ValueError:
Default type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
TError:
Custom type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate. | Below is the the instruction that describes the task:
### Input:
Recursively decomposes a value into `cirq.Operation`s meeting a criteria.
Args:
val: The value to decompose into operations.
intercepting_decomposer: An optional method that is called before the
default decomposer (the value's `_decompose_` method). If
`intercepting_decomposer` is specified and returns a result that
isn't `NotImplemented` or `None`, that result is used. Otherwise the
decomposition falls back to the default decomposer.
Note that `val` will be passed into `intercepting_decomposer`, even
if `val` isn't a `cirq.Operation`.
fallback_decomposer: An optional decomposition that used after the
`intercepting_decomposer` and the default decomposer (the value's
`_decompose_` method) both fail.
keep: A predicate that determines if the initial operation or
intermediate decomposed operations should be kept or else need to be
decomposed further. If `keep` isn't specified, it defaults to "value
can't be decomposed anymore".
on_stuck_raise: If there is an operation that can't be decomposed and
also can't be kept, `on_stuck_raise` is used to determine what error
to raise. `on_stuck_raise` can either directly be an `Exception`, or
a method that takes the problematic operation and returns an
`Exception`. If `on_stuck_raise` is set to `None` or a method that
returns `None`, undecomposable operations are simply silently kept.
`on_stuck_raise` defaults to a `ValueError` describing the unwanted
undecomposable operation.
Returns:
A list of operations that the given value was decomposed into. If
`on_stuck_raise` isn't set to None, all operations in the list will
satisfy the predicate specified by `keep`.
Raises:
TypeError:
`val` isn't a `cirq.Operation` and can't be decomposed even once.
(So it's not possible to return a list of operations.)
ValueError:
Default type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
TError:
Custom type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
### Response:
def decompose(
val: TValue,
*,
intercepting_decomposer: Callable[['cirq.Operation'],
Union[None,
NotImplementedType,
'cirq.OP_TREE']] = None,
fallback_decomposer: Callable[['cirq.Operation'],
Union[None,
NotImplementedType,
'cirq.OP_TREE']] = None,
keep: Callable[['cirq.Operation'], bool] = None,
on_stuck_raise: Union[None,
Exception,
Callable[['cirq.Operation'],
Union[None, Exception]]]
= _value_error_describing_bad_operation
) -> List['cirq.Operation']:
"""Recursively decomposes a value into `cirq.Operation`s meeting a criteria.
Args:
val: The value to decompose into operations.
intercepting_decomposer: An optional method that is called before the
default decomposer (the value's `_decompose_` method). If
`intercepting_decomposer` is specified and returns a result that
isn't `NotImplemented` or `None`, that result is used. Otherwise the
decomposition falls back to the default decomposer.
Note that `val` will be passed into `intercepting_decomposer`, even
if `val` isn't a `cirq.Operation`.
fallback_decomposer: An optional decomposition that used after the
`intercepting_decomposer` and the default decomposer (the value's
`_decompose_` method) both fail.
keep: A predicate that determines if the initial operation or
intermediate decomposed operations should be kept or else need to be
decomposed further. If `keep` isn't specified, it defaults to "value
can't be decomposed anymore".
on_stuck_raise: If there is an operation that can't be decomposed and
also can't be kept, `on_stuck_raise` is used to determine what error
to raise. `on_stuck_raise` can either directly be an `Exception`, or
a method that takes the problematic operation and returns an
`Exception`. If `on_stuck_raise` is set to `None` or a method that
returns `None`, undecomposable operations are simply silently kept.
`on_stuck_raise` defaults to a `ValueError` describing the unwanted
undecomposable operation.
Returns:
A list of operations that the given value was decomposed into. If
`on_stuck_raise` isn't set to None, all operations in the list will
satisfy the predicate specified by `keep`.
Raises:
TypeError:
`val` isn't a `cirq.Operation` and can't be decomposed even once.
(So it's not possible to return a list of operations.)
ValueError:
Default type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
TError:
Custom type of error raised if there's an undecomposable operation
that doesn't satisfy the given `keep` predicate.
"""
from cirq import ops # HACK: Avoids circular dependencies.
if (on_stuck_raise is not _value_error_describing_bad_operation and
keep is None):
raise ValueError(
"Must specify 'keep' if specifying 'on_stuck_raise', because it's "
"not possible to get stuck if you don't have a criteria on what's "
"acceptable to keep.")
decomposers = [d
for d in [intercepting_decomposer,
_default_decomposer,
fallback_decomposer]
if d]
def decomposer(op):
for d in decomposers:
r = d(op)
if r is not NotImplemented and r is not None:
return r
return NotImplemented
output = []
queue = [val] # type: List[Any]
while queue:
item = queue.pop(0)
if isinstance(item, ops.Operation) and keep is not None and keep(item):
output.append(item)
continue
decomposed = decomposer(item)
if decomposed is not NotImplemented and decomposed is not None:
queue[:0] = ops.flatten_op_tree(decomposed)
continue
if (not isinstance(item, ops.Operation) and
isinstance(item, collections.Iterable)):
queue[:0] = ops.flatten_op_tree(item)
continue
if keep is not None and on_stuck_raise is not None:
if isinstance(on_stuck_raise, Exception):
raise on_stuck_raise
elif callable(on_stuck_raise):
error = on_stuck_raise(item)
if error is not None:
raise error
output.append(item)
return output |
def _convert_to_folder(self, packages):
"""
Silverstripe's page contains a list of composer packages. This
function converts those to folder names. These may be different due
to installer-name.
Implemented exponential backoff in order to prevent packager from
being overly sensitive about the number of requests I was making.
@see: https://github.com/composer/installers#custom-install-names
@see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7
"""
url = 'http://packagist.org/p/%s.json'
with ThreadPoolExecutor(max_workers=12) as executor:
futures = []
for package in packages:
future = executor.submit(self._get, url, package)
futures.append({
'future': future,
'package': package
})
folders = []
for i, future in enumerate(futures, start=1):
r = future['future'].result()
package = future['package']
if not 'installer-name' in r.text:
folder_name = package.split('/')[1]
else:
splat = list(filter(None, re.split(r'[^a-zA-Z0-9-_.,]', r.text)))
folder_name = splat[splat.index('installer-name') + 1]
if not folder_name in folders:
folders.append(folder_name)
else:
print("Folder %s is duplicated (current %s, previous %s)" % (folder_name,
package, folders.index(folder_name)))
if i % 25 == 0:
print("Done %s." % i)
return folders | Silverstripe's page contains a list of composer packages. This
function converts those to folder names. These may be different due
to installer-name.
Implemented exponential backoff in order to prevent packager from
being overly sensitive about the number of requests I was making.
@see: https://github.com/composer/installers#custom-install-names
@see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7 | Below is the the instruction that describes the task:
### Input:
Silverstripe's page contains a list of composer packages. This
function converts those to folder names. These may be different due
to installer-name.
Implemented exponential backoff in order to prevent packager from
being overly sensitive about the number of requests I was making.
@see: https://github.com/composer/installers#custom-install-names
@see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7
### Response:
def _convert_to_folder(self, packages):
"""
Silverstripe's page contains a list of composer packages. This
function converts those to folder names. These may be different due
to installer-name.
Implemented exponential backoff in order to prevent packager from
being overly sensitive about the number of requests I was making.
@see: https://github.com/composer/installers#custom-install-names
@see: https://github.com/richardsjoqvist/silverstripe-localdate/issues/7
"""
url = 'http://packagist.org/p/%s.json'
with ThreadPoolExecutor(max_workers=12) as executor:
futures = []
for package in packages:
future = executor.submit(self._get, url, package)
futures.append({
'future': future,
'package': package
})
folders = []
for i, future in enumerate(futures, start=1):
r = future['future'].result()
package = future['package']
if not 'installer-name' in r.text:
folder_name = package.split('/')[1]
else:
splat = list(filter(None, re.split(r'[^a-zA-Z0-9-_.,]', r.text)))
folder_name = splat[splat.index('installer-name') + 1]
if not folder_name in folders:
folders.append(folder_name)
else:
print("Folder %s is duplicated (current %s, previous %s)" % (folder_name,
package, folders.index(folder_name)))
if i % 25 == 0:
print("Done %s." % i)
return folders |
def msgconvert(email):
"""
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
"""
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError:
message = "To use this function you must install 'msgconvert' tool"
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph) | Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3) | Below is the the instruction that describes the task:
### Input:
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
### Response:
def msgconvert(email):
"""
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
"""
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError:
message = "To use this function you must install 'msgconvert' tool"
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph) |
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs | Convert a list of AttributeProto to a dict, with names as keys. | Below is the the instruction that describes the task:
### Input:
Convert a list of AttributeProto to a dict, with names as keys.
### Response:
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs |
def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super(FormAdmin, self).get_urls()
extra_urls = [
re_path("^(?P<form_id>\d+)/entries/$",
self.admin_site.admin_view(self.entries_view),
name="form_entries"),
re_path("^(?P<form_id>\d+)/entries/show/$",
self.admin_site.admin_view(self.entries_view),
{"show": True}, name="form_entries_show"),
re_path("^(?P<form_id>\d+)/entries/export/$",
self.admin_site.admin_view(self.entries_view),
{"export": True}, name="form_entries_export"),
re_path("^file/(?P<field_entry_id>\d+)/$",
self.admin_site.admin_view(self.file_view),
name="form_file"),
]
return extra_urls + urls | Add the entries view to urls. | Below is the the instruction that describes the task:
### Input:
Add the entries view to urls.
### Response:
def get_urls(self):
"""
Add the entries view to urls.
"""
urls = super(FormAdmin, self).get_urls()
extra_urls = [
re_path("^(?P<form_id>\d+)/entries/$",
self.admin_site.admin_view(self.entries_view),
name="form_entries"),
re_path("^(?P<form_id>\d+)/entries/show/$",
self.admin_site.admin_view(self.entries_view),
{"show": True}, name="form_entries_show"),
re_path("^(?P<form_id>\d+)/entries/export/$",
self.admin_site.admin_view(self.entries_view),
{"export": True}, name="form_entries_export"),
re_path("^file/(?P<field_entry_id>\d+)/$",
self.admin_site.admin_view(self.file_view),
name="form_file"),
]
return extra_urls + urls |
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False):
"""Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding)
"""
adata = data.copy() if copy else data
if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey)
adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2)
if highly_variable and 'highly_variable' in adata.var.keys():
adata.var[vkey + '_genes'] &= adata.var['highly_variable']
logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes']))
return adata if copy else None | Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding) | Below is the the instruction that describes the task:
### Input:
Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding)
### Response:
def velocity_genes(data, vkey='velocity', min_r2=0.01, highly_variable=None, copy=False):
"""Estimates velocities in a gene-specific manner
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` (default: `'velocity'`)
Name under which to refer to the computed velocities for `velocity_graph` and `velocity_embedding`.
min_r2: `float` (default: 0.01)
Minimum threshold for coefficient of determination
highly_variable: `bool` (default: `None`)
Whether to include highly variable genes only.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Updates `adata` attributes
velocity_genes: `.var`
genes to be used for further velocity analysis (velocity graph and embedding)
"""
adata = data.copy() if copy else data
if vkey + '_genes' not in adata.var.keys(): velocity(data, vkey)
adata.var[vkey + '_genes'] = np.array(adata.var[vkey + '_genes'], dtype=bool) & (adata.var[vkey + '_r2'] > min_r2)
if highly_variable and 'highly_variable' in adata.var.keys():
adata.var[vkey + '_genes'] &= adata.var['highly_variable']
logg.info('Number of obtained velocity_genes:', np.sum(adata.var[vkey + '_genes']))
return adata if copy else None |
def pprint_value_string(self, value):
"""Pretty print the dimension value and unit.
Args:
value: Dimension value to format
Returns:
Formatted dimension value string with unit
"""
unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit)
value = self.pprint_value(value)
return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit) | Pretty print the dimension value and unit.
Args:
value: Dimension value to format
Returns:
Formatted dimension value string with unit | Below is the the instruction that describes the task:
### Input:
Pretty print the dimension value and unit.
Args:
value: Dimension value to format
Returns:
Formatted dimension value string with unit
### Response:
def pprint_value_string(self, value):
"""Pretty print the dimension value and unit.
Args:
value: Dimension value to format
Returns:
Formatted dimension value string with unit
"""
unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit)
value = self.pprint_value(value)
return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit) |
def use(backend):
"""Use the given database backend, e.g. 'MySQLdb', 'psycopg2',
'MySQLdb', etc.
"""
global Backend
try:
Backend = importlib.import_module(backend)
except ImportError:
msg = 'Could not import (load) database.backend: %s' % backend
raise cfg.AitConfigError(msg) | Use the given database backend, e.g. 'MySQLdb', 'psycopg2',
'MySQLdb', etc. | Below is the the instruction that describes the task:
### Input:
Use the given database backend, e.g. 'MySQLdb', 'psycopg2',
'MySQLdb', etc.
### Response:
def use(backend):
"""Use the given database backend, e.g. 'MySQLdb', 'psycopg2',
'MySQLdb', etc.
"""
global Backend
try:
Backend = importlib.import_module(backend)
except ImportError:
msg = 'Could not import (load) database.backend: %s' % backend
raise cfg.AitConfigError(msg) |
def configurar_interface_de_rede(self, configuracao):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
resp = self._http_post('configurarinterfacederede',
configuracao=configuracao.documento())
conteudo = resp.json()
return RespostaSAT.configurar_interface_de_rede(conteudo.get('retorno')) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT | Below is the the instruction that describes the task:
### Input:
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
### Response:
def configurar_interface_de_rede(self, configuracao):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
resp = self._http_post('configurarinterfacederede',
configuracao=configuracao.documento())
conteudo = resp.json()
return RespostaSAT.configurar_interface_de_rede(conteudo.get('retorno')) |
def identity(self, surface):
"""
Fast scale operation that does not sample the results
"""
return self._pygame.transform.scale(surface, self._output_size) | Fast scale operation that does not sample the results | Below is the the instruction that describes the task:
### Input:
Fast scale operation that does not sample the results
### Response:
def identity(self, surface):
"""
Fast scale operation that does not sample the results
"""
return self._pygame.transform.scale(surface, self._output_size) |
def inputhook_glut():
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
signal.signal(signal.SIGINT, glut_int_handler)
try:
t = clock()
# Make sure the default window is set after a window has been closed
if glut.glutGetWindow() == 0:
glut.glutSetWindow( 1 )
glutMainLoopEvent()
return 0
while not stdin_ready():
glutMainLoopEvent()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 5*60.0:
# print 'Sleep for 5 s' # dbg
time.sleep(5.0)
elif used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0 | Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance. | Below is the the instruction that describes the task:
### Input:
Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
### Response:
def inputhook_glut():
"""Run the pyglet event loop by processing pending events only.
This keeps processing pending events until stdin is ready. After
processing all pending events, a call to time.sleep is inserted. This is
needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
signal.signal(signal.SIGINT, glut_int_handler)
try:
t = clock()
# Make sure the default window is set after a window has been closed
if glut.glutGetWindow() == 0:
glut.glutSetWindow( 1 )
glutMainLoopEvent()
return 0
while not stdin_ready():
glutMainLoopEvent()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 5*60.0:
# print 'Sleep for 5 s' # dbg
time.sleep(5.0)
elif used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
except KeyboardInterrupt:
pass
return 0 |
def flatten_rules(self, declarations):
"""
Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Returns:
list: Given SCSS source with all comments removed.
"""
rules = []
for protocole, paths in declarations:
# If there is a protocole (like 'url), drop it
if protocole:
continue
# Unquote and possibly split multiple rule in the same declaration
rules.extend([self.strip_quotes(v.strip())
for v in paths.split(',')])
return list(filter(self.filter_rules, rules)) | Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Returns:
list: Given SCSS source with all comments removed. | Below is the the instruction that describes the task:
### Input:
Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Returns:
list: Given SCSS source with all comments removed.
### Response:
def flatten_rules(self, declarations):
"""
Flatten returned import rules from regex.
Because import rules can contains multiple items in the same rule
(called multiline import rule), the regex ``REGEX_IMPORT_RULE``
return a list of unquoted items for each rule.
Args:
declarations (list): A SCSS source.
Returns:
list: Given SCSS source with all comments removed.
"""
rules = []
for protocole, paths in declarations:
# If there is a protocole (like 'url), drop it
if protocole:
continue
# Unquote and possibly split multiple rule in the same declaration
rules.extend([self.strip_quotes(v.strip())
for v in paths.split(',')])
return list(filter(self.filter_rules, rules)) |
def get_curve_name_by_oid(oid):
"""Return curve name matching specified OID, or raise KeyError."""
for curve_name, info in SUPPORTED_CURVES.items():
if info['oid'] == oid:
return curve_name
raise KeyError('Unknown OID: {!r}'.format(oid)) | Return curve name matching specified OID, or raise KeyError. | Below is the the instruction that describes the task:
### Input:
Return curve name matching specified OID, or raise KeyError.
### Response:
def get_curve_name_by_oid(oid):
"""Return curve name matching specified OID, or raise KeyError."""
for curve_name, info in SUPPORTED_CURVES.items():
if info['oid'] == oid:
return curve_name
raise KeyError('Unknown OID: {!r}'.format(oid)) |
def accented_syllable_to_numbered(s):
"""Convert accented Pinyin syllable *s* to a numbered Pinyin syllable."""
if s[0] == '\u00B7':
lowercase_syllable, case_memory = _lower_case(s[1:])
lowercase_syllable = '\u00B7' + lowercase_syllable
else:
lowercase_syllable, case_memory = _lower_case(s)
numbered_syllable, tone = _parse_accented_syllable(lowercase_syllable)
return _restore_case(numbered_syllable, case_memory) + tone | Convert accented Pinyin syllable *s* to a numbered Pinyin syllable. | Below is the the instruction that describes the task:
### Input:
Convert accented Pinyin syllable *s* to a numbered Pinyin syllable.
### Response:
def accented_syllable_to_numbered(s):
"""Convert accented Pinyin syllable *s* to a numbered Pinyin syllable."""
if s[0] == '\u00B7':
lowercase_syllable, case_memory = _lower_case(s[1:])
lowercase_syllable = '\u00B7' + lowercase_syllable
else:
lowercase_syllable, case_memory = _lower_case(s)
numbered_syllable, tone = _parse_accented_syllable(lowercase_syllable)
return _restore_case(numbered_syllable, case_memory) + tone |
async def packet_receiver(queue):
""" Asynchronous function that processes queue until None is posted in queue """
LOG.info("Entering packet_receiver")
while True:
packet = await queue.get()
if packet is None:
break
LOG.info("Framenumber %s", packet.framenumber)
LOG.info("Exiting packet_receiver") | Asynchronous function that processes queue until None is posted in queue | Below is the the instruction that describes the task:
### Input:
Asynchronous function that processes queue until None is posted in queue
### Response:
async def packet_receiver(queue):
""" Asynchronous function that processes queue until None is posted in queue """
LOG.info("Entering packet_receiver")
while True:
packet = await queue.get()
if packet is None:
break
LOG.info("Framenumber %s", packet.framenumber)
LOG.info("Exiting packet_receiver") |
def split_sources(srcs):
"""
:param srcs: sources
:returns: a pair (split sources, split time) or just the split_sources
"""
from openquake.hazardlib.source import splittable
sources = []
split_time = {} # src.id -> time
for src in srcs:
t0 = time.time()
mag_a, mag_b = src.get_min_max_mag()
min_mag = src.min_mag
if mag_b < min_mag: # discard the source completely
continue
has_serial = hasattr(src, 'serial')
if has_serial:
src.serial = numpy.arange(
src.serial, src.serial + src.num_ruptures)
if not splittable(src):
sources.append(src)
split_time[src.id] = time.time() - t0
continue
if min_mag:
splits = []
for s in src:
s.min_mag = min_mag
mag_a, mag_b = s.get_min_max_mag()
if mag_b < min_mag:
continue
s.num_ruptures = s.count_ruptures()
if s.num_ruptures:
splits.append(s)
else:
splits = list(src)
split_time[src.id] = time.time() - t0
sources.extend(splits)
has_samples = hasattr(src, 'samples')
if len(splits) > 1:
start = 0
for i, split in enumerate(splits):
split.source_id = '%s:%s' % (src.source_id, i)
split.src_group_id = src.src_group_id
split.id = src.id
if has_serial:
nr = split.num_ruptures
split.serial = src.serial[start:start + nr]
start += nr
if has_samples:
split.samples = src.samples
elif splits: # single source
splits[0].id = src.id
if has_serial:
splits[0].serial = src.serial
if has_samples:
splits[0].samples = src.samples
return sources, split_time | :param srcs: sources
:returns: a pair (split sources, split time) or just the split_sources | Below is the the instruction that describes the task:
### Input:
:param srcs: sources
:returns: a pair (split sources, split time) or just the split_sources
### Response:
def split_sources(srcs):
"""
:param srcs: sources
:returns: a pair (split sources, split time) or just the split_sources
"""
from openquake.hazardlib.source import splittable
sources = []
split_time = {} # src.id -> time
for src in srcs:
t0 = time.time()
mag_a, mag_b = src.get_min_max_mag()
min_mag = src.min_mag
if mag_b < min_mag: # discard the source completely
continue
has_serial = hasattr(src, 'serial')
if has_serial:
src.serial = numpy.arange(
src.serial, src.serial + src.num_ruptures)
if not splittable(src):
sources.append(src)
split_time[src.id] = time.time() - t0
continue
if min_mag:
splits = []
for s in src:
s.min_mag = min_mag
mag_a, mag_b = s.get_min_max_mag()
if mag_b < min_mag:
continue
s.num_ruptures = s.count_ruptures()
if s.num_ruptures:
splits.append(s)
else:
splits = list(src)
split_time[src.id] = time.time() - t0
sources.extend(splits)
has_samples = hasattr(src, 'samples')
if len(splits) > 1:
start = 0
for i, split in enumerate(splits):
split.source_id = '%s:%s' % (src.source_id, i)
split.src_group_id = src.src_group_id
split.id = src.id
if has_serial:
nr = split.num_ruptures
split.serial = src.serial[start:start + nr]
start += nr
if has_samples:
split.samples = src.samples
elif splits: # single source
splits[0].id = src.id
if has_serial:
splits[0].serial = src.serial
if has_samples:
splits[0].samples = src.samples
return sources, split_time |
async def export_chat_invite_link(self, chat_id: typing.Union[base.Integer, base.String]) -> base.String:
"""
Use this method to generate a new invite link for a chat; any previously generated link is revoked.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#exportchatinvitelink
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns exported invite link as String on success
:rtype: :obj:`base.String`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.EXPORT_CHAT_INVITE_LINK, payload)
return result | Use this method to generate a new invite link for a chat; any previously generated link is revoked.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#exportchatinvitelink
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns exported invite link as String on success
:rtype: :obj:`base.String` | Below is the the instruction that describes the task:
### Input:
Use this method to generate a new invite link for a chat; any previously generated link is revoked.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#exportchatinvitelink
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns exported invite link as String on success
:rtype: :obj:`base.String`
### Response:
async def export_chat_invite_link(self, chat_id: typing.Union[base.Integer, base.String]) -> base.String:
"""
Use this method to generate a new invite link for a chat; any previously generated link is revoked.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Source: https://core.telegram.org/bots/api#exportchatinvitelink
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:return: Returns exported invite link as String on success
:rtype: :obj:`base.String`
"""
payload = generate_payload(**locals())
result = await self.request(api.Methods.EXPORT_CHAT_INVITE_LINK, payload)
return result |
def __convert_node(node, default_value='', default_flags=vsflags()):
"""Converts a XML node to a JSON equivalent."""
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
converted['switch'] = __get_attribute(node, 'Switch')
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted | Converts a XML node to a JSON equivalent. | Below is the the instruction that describes the task:
### Input:
Converts a XML node to a JSON equivalent.
### Response:
def __convert_node(node, default_value='', default_flags=vsflags()):
"""Converts a XML node to a JSON equivalent."""
name = __get_attribute(node, 'Name')
logging.debug('Found %s named %s', node.tagName, name)
converted = {}
converted['name'] = name
converted['switch'] = __get_attribute(node, 'Switch')
converted['comment'] = __get_attribute(node, 'DisplayName')
converted['value'] = default_value
# Check for the Flags attribute in case it was created during preprocessing
flags = __get_attribute(node, 'Flags')
if flags:
flags = flags.split(',')
else:
flags = default_flags
converted['flags'] = flags
return converted |
def describe_ip(ip_address, source='whatismyip'):
''' a method to get the details associated with an ip address '''
# determine url
if source == 'nekudo':
source_url = 'https://geoip.nekudo.com/api/%s' % ip_address
elif source == 'geoip':
source_url = 'https://freegeoip.net/json/%s' % ip_address
elif source == 'whatismyip':
# http://whatismyipaddress.com/ip-lookup
source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address
else:
raise Exception('describe_ip currently only supports queries to nekudo')
# TODO incorporate geoip module and c dependencies with local database
# http://tech.marksblogg.com/ip-address-lookups-in-python.html
# send request
ip_details = {
'accuracy_radius': 0,
'asn': '',
'assignment': '',
'city': '',
'continent': '',
'country': '',
'hostname': '',
'ip': '',
'isp': '',
'latitude': 0.0,
'longitude': 0.0,
'organization': '',
'postal_code': '',
'region': '',
'timezone': '',
'type': ''
}
import requests
try:
response = requests.get(url=source_url)
except Exception as err:
from labpack.handlers.requests import handle_requests
from requests import Request
request_object = Request(method='GET', url=source_url)
request_details = handle_requests(request_object)
raise Exception(request_details['error'])
# extract response
if source == 'whatismyip':
import re
response_text = response.content.decode()
table_regex = re.compile('<table>\n<tr><th>IP.*?</table>\n<span\sstyle', re.S)
table_search = table_regex.findall(response_text)
if table_search:
table_text = table_search[0]
field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code']
for field in field_list:
field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S)
field_search = field_regex.findall(table_text)
if field_search:
ip_details[field.lower().replace(' ','_')] = field_search[0]
for field in ('longitude', 'latitude'):
if field in ip_details.keys():
coord_regex = re.compile('\-?\d+\.\d+')
coord_search = coord_regex.findall(ip_details[field])
if coord_search:
ip_details[field] = float(coord_search[0])
if 'country' in ip_details.keys():
country_regex = re.compile('([\w\s]+?)($|\s<img)')
country_search = country_regex.findall(ip_details['country'])
if country_search:
ip_details['country'] = country_search[0][0]
for field in ('type', 'assignment'):
if field in ip_details.keys():
link_regex = re.compile('>(.*?)<')
link_search = link_regex.findall(ip_details[field])
if link_search:
ip_details[field] = link_search[0]
if 'state/region' in ip_details.keys():
ip_details['region'] = ip_details['state/region']
del ip_details['state/region']
elif source == 'nekudo':
response_details = response.json()
ip_details['country'] = response_details['country']['name']
ip_details['latitude'] = response_details['location']['latitude']
ip_details['longitude'] = response_details['location']['longitude']
ip_details['accuracy_radius'] = response_details['location']['accuracy_radius']
if response_details['city']:
ip_details['city'] = response_details['city']
ip_details['ip'] = response_details['ip']
for key in response_details.keys():
if key not in ip_details.keys() and key != 'location':
ip_details[key] = response_details[key]
else:
response_details = response.json()
for field in ('city', 'ip', 'latitude', 'longitude'):
ip_details[field] = response_details[field]
ip_details['country'] = response_details['country_name']
ip_details['region'] = response_details['region_name']
ip_details['postal_code'] = response_details['zip_code']
ip_details['timezone'] = response_details['time_zone']
return ip_details | a method to get the details associated with an ip address | Below is the the instruction that describes the task:
### Input:
a method to get the details associated with an ip address
### Response:
def describe_ip(ip_address, source='whatismyip'):
''' a method to get the details associated with an ip address '''
# determine url
if source == 'nekudo':
source_url = 'https://geoip.nekudo.com/api/%s' % ip_address
elif source == 'geoip':
source_url = 'https://freegeoip.net/json/%s' % ip_address
elif source == 'whatismyip':
# http://whatismyipaddress.com/ip-lookup
source_url = 'https://whatismyipaddress.com/ip/%s' % ip_address
else:
raise Exception('describe_ip currently only supports queries to nekudo')
# TODO incorporate geoip module and c dependencies with local database
# http://tech.marksblogg.com/ip-address-lookups-in-python.html
# send request
ip_details = {
'accuracy_radius': 0,
'asn': '',
'assignment': '',
'city': '',
'continent': '',
'country': '',
'hostname': '',
'ip': '',
'isp': '',
'latitude': 0.0,
'longitude': 0.0,
'organization': '',
'postal_code': '',
'region': '',
'timezone': '',
'type': ''
}
import requests
try:
response = requests.get(url=source_url)
except Exception as err:
from labpack.handlers.requests import handle_requests
from requests import Request
request_object = Request(method='GET', url=source_url)
request_details = handle_requests(request_object)
raise Exception(request_details['error'])
# extract response
if source == 'whatismyip':
import re
response_text = response.content.decode()
table_regex = re.compile('<table>\n<tr><th>IP.*?</table>\n<span\sstyle', re.S)
table_search = table_regex.findall(response_text)
if table_search:
table_text = table_search[0]
field_list = [ 'IP', 'Hostname', 'ISP', 'Organization', 'Type', 'ASN', 'Assignment', 'Continent', 'Country', 'State/Region', 'City', 'Latitude', 'Longitude', 'Postal Code']
for field in field_list:
field_regex = re.compile('<tr><th>%s:</th><td>(.*?)</td>' % field, re.S)
field_search = field_regex.findall(table_text)
if field_search:
ip_details[field.lower().replace(' ','_')] = field_search[0]
for field in ('longitude', 'latitude'):
if field in ip_details.keys():
coord_regex = re.compile('\-?\d+\.\d+')
coord_search = coord_regex.findall(ip_details[field])
if coord_search:
ip_details[field] = float(coord_search[0])
if 'country' in ip_details.keys():
country_regex = re.compile('([\w\s]+?)($|\s<img)')
country_search = country_regex.findall(ip_details['country'])
if country_search:
ip_details['country'] = country_search[0][0]
for field in ('type', 'assignment'):
if field in ip_details.keys():
link_regex = re.compile('>(.*?)<')
link_search = link_regex.findall(ip_details[field])
if link_search:
ip_details[field] = link_search[0]
if 'state/region' in ip_details.keys():
ip_details['region'] = ip_details['state/region']
del ip_details['state/region']
elif source == 'nekudo':
response_details = response.json()
ip_details['country'] = response_details['country']['name']
ip_details['latitude'] = response_details['location']['latitude']
ip_details['longitude'] = response_details['location']['longitude']
ip_details['accuracy_radius'] = response_details['location']['accuracy_radius']
if response_details['city']:
ip_details['city'] = response_details['city']
ip_details['ip'] = response_details['ip']
for key in response_details.keys():
if key not in ip_details.keys() and key != 'location':
ip_details[key] = response_details[key]
else:
response_details = response.json()
for field in ('city', 'ip', 'latitude', 'longitude'):
ip_details[field] = response_details[field]
ip_details['country'] = response_details['country_name']
ip_details['region'] = response_details['region_name']
ip_details['postal_code'] = response_details['zip_code']
ip_details['timezone'] = response_details['time_zone']
return ip_details |
def predict(self, X):
r"""Predict labels.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Feature vector.
Returns
-------
pred : numpy array, shape=(n_samples, n_labels)
Predicted labels of given feature vector.
"""
X = np.asarray(X)
if self.clfs_ is None:
raise ValueError("Train before prediction")
if X.shape[1] != self.n_features_:
raise ValueError('Given feature size does not match')
pred = np.zeros((X.shape[0], self.n_labels_))
for i in range(self.n_labels_):
pred[:, i] = self.clfs_[i].predict(X)
return pred.astype(int) | r"""Predict labels.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Feature vector.
Returns
-------
pred : numpy array, shape=(n_samples, n_labels)
Predicted labels of given feature vector. | Below is the the instruction that describes the task:
### Input:
r"""Predict labels.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Feature vector.
Returns
-------
pred : numpy array, shape=(n_samples, n_labels)
Predicted labels of given feature vector.
### Response:
def predict(self, X):
r"""Predict labels.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Feature vector.
Returns
-------
pred : numpy array, shape=(n_samples, n_labels)
Predicted labels of given feature vector.
"""
X = np.asarray(X)
if self.clfs_ is None:
raise ValueError("Train before prediction")
if X.shape[1] != self.n_features_:
raise ValueError('Given feature size does not match')
pred = np.zeros((X.shape[0], self.n_labels_))
for i in range(self.n_labels_):
pred[:, i] = self.clfs_[i].predict(X)
return pred.astype(int) |
def set_state(self, onoff, channel=None):
"""Turn state on/off"""
try:
onoff = bool(onoff)
except Exception as err:
LOG.debug("HelperActorState.set_state: Exception %s" % (err,))
return False
self.writeNodeData("STATE", onoff, channel) | Turn state on/off | Below is the the instruction that describes the task:
### Input:
Turn state on/off
### Response:
def set_state(self, onoff, channel=None):
"""Turn state on/off"""
try:
onoff = bool(onoff)
except Exception as err:
LOG.debug("HelperActorState.set_state: Exception %s" % (err,))
return False
self.writeNodeData("STATE", onoff, channel) |
def verify_jwt_in_request():
"""
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
"""
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key]) | Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid. | Below is the the instruction that describes the task:
### Input:
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
### Response:
def verify_jwt_in_request():
"""
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
"""
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key]) |
def to_next_pedalboard(self):
"""
Change the current :class:`.Pedalboard` for the next pedalboard.
If the current pedalboard is the last in the current :class:`.Bank`,
the current pedalboard is will be the **first of the current Bank**
.. warning::
If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised.
"""
if self.pedalboard is None:
raise CurrentPedalboardError('The current pedalboard is None')
next_index = self.pedalboard.index + 1
if next_index == len(self.bank.pedalboards):
next_index = 0
self.set_pedalboard(self.bank.pedalboards[next_index]) | Change the current :class:`.Pedalboard` for the next pedalboard.
If the current pedalboard is the last in the current :class:`.Bank`,
the current pedalboard is will be the **first of the current Bank**
.. warning::
If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised. | Below is the the instruction that describes the task:
### Input:
Change the current :class:`.Pedalboard` for the next pedalboard.
If the current pedalboard is the last in the current :class:`.Bank`,
the current pedalboard is will be the **first of the current Bank**
.. warning::
If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised.
### Response:
def to_next_pedalboard(self):
"""
Change the current :class:`.Pedalboard` for the next pedalboard.
If the current pedalboard is the last in the current :class:`.Bank`,
the current pedalboard is will be the **first of the current Bank**
.. warning::
If the current :attr:`.pedalboard` is ``None``, a :class:`.CurrentPedalboardError` is raised.
"""
if self.pedalboard is None:
raise CurrentPedalboardError('The current pedalboard is None')
next_index = self.pedalboard.index + 1
if next_index == len(self.bank.pedalboards):
next_index = 0
self.set_pedalboard(self.bank.pedalboards[next_index]) |
def pointOnCircle(cx, cy, radius, angle):
"""
Calculates the coordinates of a point on a circle given the center point,
radius, and angle.
"""
angle = math.radians(angle) - (math.pi / 2)
x = cx + radius * math.cos(angle)
if x < cx:
x = math.ceil(x)
else:
x = math.floor(x)
y = cy + radius * math.sin(angle)
if y < cy:
y = math.ceil(y)
else:
y = math.floor(y)
return (int(x), int(y)) | Calculates the coordinates of a point on a circle given the center point,
radius, and angle. | Below is the the instruction that describes the task:
### Input:
Calculates the coordinates of a point on a circle given the center point,
radius, and angle.
### Response:
def pointOnCircle(cx, cy, radius, angle):
"""
Calculates the coordinates of a point on a circle given the center point,
radius, and angle.
"""
angle = math.radians(angle) - (math.pi / 2)
x = cx + radius * math.cos(angle)
if x < cx:
x = math.ceil(x)
else:
x = math.floor(x)
y = cy + radius * math.sin(angle)
if y < cy:
y = math.ceil(y)
else:
y = math.floor(y)
return (int(x), int(y)) |
def check_dataset(self, dataset_id, project_id=None):
"""Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
"""
dataset = self.get_dataset(dataset_id, project_id)
return bool(dataset) | Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle | Below is the the instruction that describes the task:
### Input:
Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
### Response:
def check_dataset(self, dataset_id, project_id=None):
"""Check to see if a dataset exists.
Parameters
----------
dataset_id : str
Dataset unique id
project_id: str, optional
The project the dataset is in
Returns
-------
bool
True if dataset at `dataset_id` exists, else Fasle
"""
dataset = self.get_dataset(dataset_id, project_id)
return bool(dataset) |
def read(self, request, pk=None):
"""
Mark the message as read (i.e. delete from inbox)
"""
from .settings import stored_messages_settings
backend = stored_messages_settings.STORAGE_BACKEND()
try:
backend.inbox_delete(request.user, pk)
except MessageDoesNotExist as e:
return Response(e.message, status='404')
return Response({'status': 'message marked as read'}) | Mark the message as read (i.e. delete from inbox) | Below is the the instruction that describes the task:
### Input:
Mark the message as read (i.e. delete from inbox)
### Response:
def read(self, request, pk=None):
"""
Mark the message as read (i.e. delete from inbox)
"""
from .settings import stored_messages_settings
backend = stored_messages_settings.STORAGE_BACKEND()
try:
backend.inbox_delete(request.user, pk)
except MessageDoesNotExist as e:
return Response(e.message, status='404')
return Response({'status': 'message marked as read'}) |
def _maybe_call_volatility_fn_and_grads(volatility_fn,
state,
volatility_fn_results=None,
grads_volatility_fn=None,
sample_shape=None,
parallel_iterations=10):
"""Helper which computes `volatility_fn` results and grads, if needed."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
needs_volatility_fn_gradients = grads_volatility_fn is None
# Convert `volatility_fn_results` to a list
if volatility_fn_results is None:
volatility_fn_results = volatility_fn(*state_parts)
volatility_fn_results = (list(volatility_fn_results)
if mcmc_util.is_list_like(volatility_fn_results)
else [volatility_fn_results])
if len(volatility_fn_results) == 1:
volatility_fn_results *= len(state_parts)
if len(state_parts) != len(volatility_fn_results):
raise ValueError('`volatility_fn` should return a tensor or a list '
'of the same length as `current_state`.')
# The shape of 'volatility_parts' needs to have the number of chains as a
# leading dimension. For determinism we broadcast 'volatility_parts' to the
# shape of `state_parts` since each dimension of `state_parts` could have a
# different volatility value.
volatility_fn_results = _maybe_broadcast_volatility(volatility_fn_results,
state_parts)
if grads_volatility_fn is None:
[
_,
grads_volatility_fn,
] = diag_jacobian(
xs=state_parts,
ys=volatility_fn_results,
sample_shape=sample_shape,
parallel_iterations=parallel_iterations,
fn=volatility_fn)
# Compute gradient of `volatility_parts**2`
if needs_volatility_fn_gradients:
grads_volatility_fn = [
2. * g * volatility if g is not None else tf.zeros_like(
fn_arg, dtype=fn_arg.dtype.base_dtype)
for g, volatility, fn_arg in zip(
grads_volatility_fn, volatility_fn_results, state_parts)
]
return volatility_fn_results, grads_volatility_fn | Helper which computes `volatility_fn` results and grads, if needed. | Below is the the instruction that describes the task:
### Input:
Helper which computes `volatility_fn` results and grads, if needed.
### Response:
def _maybe_call_volatility_fn_and_grads(volatility_fn,
state,
volatility_fn_results=None,
grads_volatility_fn=None,
sample_shape=None,
parallel_iterations=10):
"""Helper which computes `volatility_fn` results and grads, if needed."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
needs_volatility_fn_gradients = grads_volatility_fn is None
# Convert `volatility_fn_results` to a list
if volatility_fn_results is None:
volatility_fn_results = volatility_fn(*state_parts)
volatility_fn_results = (list(volatility_fn_results)
if mcmc_util.is_list_like(volatility_fn_results)
else [volatility_fn_results])
if len(volatility_fn_results) == 1:
volatility_fn_results *= len(state_parts)
if len(state_parts) != len(volatility_fn_results):
raise ValueError('`volatility_fn` should return a tensor or a list '
'of the same length as `current_state`.')
# The shape of 'volatility_parts' needs to have the number of chains as a
# leading dimension. For determinism we broadcast 'volatility_parts' to the
# shape of `state_parts` since each dimension of `state_parts` could have a
# different volatility value.
volatility_fn_results = _maybe_broadcast_volatility(volatility_fn_results,
state_parts)
if grads_volatility_fn is None:
[
_,
grads_volatility_fn,
] = diag_jacobian(
xs=state_parts,
ys=volatility_fn_results,
sample_shape=sample_shape,
parallel_iterations=parallel_iterations,
fn=volatility_fn)
# Compute gradient of `volatility_parts**2`
if needs_volatility_fn_gradients:
grads_volatility_fn = [
2. * g * volatility if g is not None else tf.zeros_like(
fn_arg, dtype=fn_arg.dtype.base_dtype)
for g, volatility, fn_arg in zip(
grads_volatility_fn, volatility_fn_results, state_parts)
]
return volatility_fn_results, grads_volatility_fn |
def reset(self):
"""" Reset the state as new """
self.last_usage = None
self.last_collect = None
self.last_metrics = None
self.snapshot_countdown = 0
self.run() | Reset the state as new | Below is the the instruction that describes the task:
### Input:
Reset the state as new
### Response:
def reset(self):
"""" Reset the state as new """
self.last_usage = None
self.last_collect = None
self.last_metrics = None
self.snapshot_countdown = 0
self.run() |
def combine_reports(original, new):
"""Combines two gcov reports for a file into one by adding the number of hits on each line
"""
if original is None:
return new
report = {}
report['name'] = original['name']
report['source_digest'] = original['source_digest']
coverage = []
for original_num, new_num in zip(original['coverage'], new['coverage']):
if original_num is None:
coverage.append(new_num)
elif new_num is None:
coverage.append(original_num)
else:
coverage.append(original_num + new_num)
report['coverage'] = coverage
return report | Combines two gcov reports for a file into one by adding the number of hits on each line | Below is the the instruction that describes the task:
### Input:
Combines two gcov reports for a file into one by adding the number of hits on each line
### Response:
def combine_reports(original, new):
"""Combines two gcov reports for a file into one by adding the number of hits on each line
"""
if original is None:
return new
report = {}
report['name'] = original['name']
report['source_digest'] = original['source_digest']
coverage = []
for original_num, new_num in zip(original['coverage'], new['coverage']):
if original_num is None:
coverage.append(new_num)
elif new_num is None:
coverage.append(original_num)
else:
coverage.append(original_num + new_num)
report['coverage'] = coverage
return report |
def reply(cls, name, *args, **kwargs):
"""Helper method for creating reply messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword Arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REPLY, name, args, mid) | Helper method for creating reply messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword Arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID | Below is the the instruction that describes the task:
### Input:
Helper method for creating reply messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword Arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
### Response:
def reply(cls, name, *args, **kwargs):
"""Helper method for creating reply messages.
Parameters
----------
name : str
The name of the message.
args : list of strings
The message arguments.
Keyword Arguments
-----------------
mid : str or None
Message ID to use or None (default) for no Message ID
"""
mid = kwargs.pop('mid', None)
if len(kwargs) > 0:
raise TypeError('Invalid keyword argument(s): %r' % kwargs)
return cls(cls.REPLY, name, args, mid) |
def loads(s, **kwargs):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
"""
if not isinstance(s, string_type):
raise TypeError("Expected s to be a str, got %s" % type(s))
try:
fp = unicodeIO(s)
except TypeError:
fp = strIO(s)
return parse(fp, **kwargs) | Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object. | Below is the the instruction that describes the task:
### Input:
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
### Response:
def loads(s, **kwargs):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
"""
if not isinstance(s, string_type):
raise TypeError("Expected s to be a str, got %s" % type(s))
try:
fp = unicodeIO(s)
except TypeError:
fp = strIO(s)
return parse(fp, **kwargs) |
def McNelly(rhol, rhog, kl, Cpl, Hvap, sigma, P, Te=None, q=None):
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = \left(0.225\left(\frac{\Delta T_e C_{p,l}}{H_{vap}}\right)^{0.69}
\left(\frac{P k_L}{\sigma}\right)^{0.31}
\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}\right)^{1/0.31}
With `q` specified:
.. math::
h = 0.225\left(\frac{q C_{p,l}}{H_{vap}}\right)^{0.69} \left(\frac{P
k_L}{\sigma}\right)^{0.31}\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
P : float
Saturation pressure of fluid, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Further examples for this function are desired.
Examples
--------
Water boiling, with excess temperature of 4.3 K.
>>> McNelly(Te=4.3, P=101325, Cpl=4180., kl=0.688, sigma=0.0588,
... Hvap=2.25E6, rhol=958., rhog=0.597)
533.8056972951352
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] McNelly M. J.: "A correlation of the rates of heat transfer to n
ucleate boiling liquids," J. Imp Coll. Chem Eng Soc 7:18, 1953.
'''
if Te:
return (0.225*(Te*Cpl/Hvap)**0.69*(P*kl/sigma)**0.31*(rhol/rhog-1.)**0.33
)**(1./0.31)
elif q:
return 0.225*(q*Cpl/Hvap)**0.69*(P*kl/sigma)**0.31*(rhol/rhog-1.)**0.33
else:
raise Exception('Either q or Te is needed for this correlation') | r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = \left(0.225\left(\frac{\Delta T_e C_{p,l}}{H_{vap}}\right)^{0.69}
\left(\frac{P k_L}{\sigma}\right)^{0.31}
\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}\right)^{1/0.31}
With `q` specified:
.. math::
h = 0.225\left(\frac{q C_{p,l}}{H_{vap}}\right)^{0.69} \left(\frac{P
k_L}{\sigma}\right)^{0.31}\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
P : float
Saturation pressure of fluid, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Further examples for this function are desired.
Examples
--------
Water boiling, with excess temperature of 4.3 K.
>>> McNelly(Te=4.3, P=101325, Cpl=4180., kl=0.688, sigma=0.0588,
... Hvap=2.25E6, rhol=958., rhog=0.597)
533.8056972951352
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] McNelly M. J.: "A correlation of the rates of heat transfer to n
ucleate boiling liquids," J. Imp Coll. Chem Eng Soc 7:18, 1953. | Below is the the instruction that describes the task:
### Input:
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = \left(0.225\left(\frac{\Delta T_e C_{p,l}}{H_{vap}}\right)^{0.69}
\left(\frac{P k_L}{\sigma}\right)^{0.31}
\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}\right)^{1/0.31}
With `q` specified:
.. math::
h = 0.225\left(\frac{q C_{p,l}}{H_{vap}}\right)^{0.69} \left(\frac{P
k_L}{\sigma}\right)^{0.31}\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
P : float
Saturation pressure of fluid, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Further examples for this function are desired.
Examples
--------
Water boiling, with excess temperature of 4.3 K.
>>> McNelly(Te=4.3, P=101325, Cpl=4180., kl=0.688, sigma=0.0588,
... Hvap=2.25E6, rhol=958., rhog=0.597)
533.8056972951352
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] McNelly M. J.: "A correlation of the rates of heat transfer to n
ucleate boiling liquids," J. Imp Coll. Chem Eng Soc 7:18, 1953.
### Response:
def McNelly(rhol, rhog, kl, Cpl, Hvap, sigma, P, Te=None, q=None):
r'''Calculates heat transfer coefficient for a evaporator operating
in the nucleate boiling regime according to [2]_ as presented in [1]_.
Either heat flux or excess temperature is required.
With `Te` specified:
.. math::
h = \left(0.225\left(\frac{\Delta T_e C_{p,l}}{H_{vap}}\right)^{0.69}
\left(\frac{P k_L}{\sigma}\right)^{0.31}
\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}\right)^{1/0.31}
With `q` specified:
.. math::
h = 0.225\left(\frac{q C_{p,l}}{H_{vap}}\right)^{0.69} \left(\frac{P
k_L}{\sigma}\right)^{0.31}\left(\frac{\rho_L}{\rho_V}-1\right)^{0.33}
Parameters
----------
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the produced gas [kg/m^3]
kl : float
Thermal conductivity of liquid [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Hvap : float
Heat of vaporization of the fluid at P, [J/kg]
sigma : float
Surface tension of liquid [N/m]
P : float
Saturation pressure of fluid, [Pa]
Te : float, optional
Excess wall temperature, [K]
q : float, optional
Heat flux, [W/m^2]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
Further examples for this function are desired.
Examples
--------
Water boiling, with excess temperature of 4.3 K.
>>> McNelly(Te=4.3, P=101325, Cpl=4180., kl=0.688, sigma=0.0588,
... Hvap=2.25E6, rhol=958., rhog=0.597)
533.8056972951352
References
----------
.. [1] Cao, Eduardo. Heat Transfer in Process Engineering.
McGraw Hill Professional, 2009.
.. [2] McNelly M. J.: "A correlation of the rates of heat transfer to n
ucleate boiling liquids," J. Imp Coll. Chem Eng Soc 7:18, 1953.
'''
if Te:
return (0.225*(Te*Cpl/Hvap)**0.69*(P*kl/sigma)**0.31*(rhol/rhog-1.)**0.33
)**(1./0.31)
elif q:
return 0.225*(q*Cpl/Hvap)**0.69*(P*kl/sigma)**0.31*(rhol/rhog-1.)**0.33
else:
raise Exception('Either q or Te is needed for this correlation') |
def calculate_overlap(self):
"""Create the array that describes how junctions overlap"""
overs = []
if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] # if they dont overlap wont find anything
for i in range(0,len(self.j1)):
for j in range(0,len(self.j2)):
if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance):
overs.append([i,j])
return overs | Create the array that describes how junctions overlap | Below is the the instruction that describes the task:
### Input:
Create the array that describes how junctions overlap
### Response:
def calculate_overlap(self):
"""Create the array that describes how junctions overlap"""
overs = []
if not self.tx_obj1.range.overlaps(self.tx_obj2.range): return [] # if they dont overlap wont find anything
for i in range(0,len(self.j1)):
for j in range(0,len(self.j2)):
if self.j1[i].overlaps(self.j2[j],tolerance=self.tolerance):
overs.append([i,j])
return overs |
def _maybe_handle(self, prefix, handler, path, params, data=None):
"""Apply the handler if the prefix matches."""
if path.startswith(prefix):
relpath = path[len(prefix):]
if data:
handler(relpath, params, data)
else:
handler(relpath, params)
return True
else:
return False | Apply the handler if the prefix matches. | Below is the the instruction that describes the task:
### Input:
Apply the handler if the prefix matches.
### Response:
def _maybe_handle(self, prefix, handler, path, params, data=None):
"""Apply the handler if the prefix matches."""
if path.startswith(prefix):
relpath = path[len(prefix):]
if data:
handler(relpath, params, data)
else:
handler(relpath, params)
return True
else:
return False |
def _validate_fasta_vs_seqres(self):
'''Check that the FASTA and SEQRES sequences agree (they sometimes differ)'''
pdb_id = self.pdb_id
for chain_id, sequence in self.pdb.seqres_sequences.iteritems():
if str(sequence) != self.FASTA[pdb_id][chain_id]:
if self.pdb_id in use_seqres_sequence_for_fasta_sequence:
self.FASTA.replace_sequence(self.pdb_id, chain_id, str(sequence))
elif self.pdb_id in use_fasta_sequence_for_seqres_sequence:
self.pdb.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, self.FASTA[pdb_id][chain_id], self.sequence_types[chain_id])
sequence = self.FASTA[pdb_id][chain_id]
if str(sequence) != self.FASTA[pdb_id][chain_id]:
raise colortext.Exception("The SEQRES and FASTA sequences disagree for chain %s in %s. This can happen but special-case handling (use_seqres_sequence_for_fasta_sequence) should be added to the file containing the %s class." % (chain_id, pdb_id, self.__class__.__name__)) | Check that the FASTA and SEQRES sequences agree (they sometimes differ) | Below is the the instruction that describes the task:
### Input:
Check that the FASTA and SEQRES sequences agree (they sometimes differ)
### Response:
def _validate_fasta_vs_seqres(self):
'''Check that the FASTA and SEQRES sequences agree (they sometimes differ)'''
pdb_id = self.pdb_id
for chain_id, sequence in self.pdb.seqres_sequences.iteritems():
if str(sequence) != self.FASTA[pdb_id][chain_id]:
if self.pdb_id in use_seqres_sequence_for_fasta_sequence:
self.FASTA.replace_sequence(self.pdb_id, chain_id, str(sequence))
elif self.pdb_id in use_fasta_sequence_for_seqres_sequence:
self.pdb.seqres_sequences[chain_id] = Sequence.from_sequence(chain_id, self.FASTA[pdb_id][chain_id], self.sequence_types[chain_id])
sequence = self.FASTA[pdb_id][chain_id]
if str(sequence) != self.FASTA[pdb_id][chain_id]:
raise colortext.Exception("The SEQRES and FASTA sequences disagree for chain %s in %s. This can happen but special-case handling (use_seqres_sequence_for_fasta_sequence) should be added to the file containing the %s class." % (chain_id, pdb_id, self.__class__.__name__)) |
def return_current_uri_page_only(self):
"""
Args:
* None
Returns:
String - Returns the full postpath & semantic components
*NOTE* may not contain the server & port numbers. That depends on
what was provided to the parser.
"""
uri = post_slash("%s%s" % (post_slash(self.current_dir()),
self.slots['page']))
return uri | Args:
* None
Returns:
String - Returns the full postpath & semantic components
*NOTE* may not contain the server & port numbers. That depends on
what was provided to the parser. | Below is the the instruction that describes the task:
### Input:
Args:
* None
Returns:
String - Returns the full postpath & semantic components
*NOTE* may not contain the server & port numbers. That depends on
what was provided to the parser.
### Response:
def return_current_uri_page_only(self):
"""
Args:
* None
Returns:
String - Returns the full postpath & semantic components
*NOTE* may not contain the server & port numbers. That depends on
what was provided to the parser.
"""
uri = post_slash("%s%s" % (post_slash(self.current_dir()),
self.slots['page']))
return uri |
def send_response(self, response_dict):
"""
Encode a response according to the request.
:param dict response_dict: the response to send
:raises: :class:`tornado.web.HTTPError` if no acceptable content
type exists
This method will encode `response_dict` using the most appropriate
encoder based on the :mailheader:`Accept` request header and the
available encoders. The result is written to the client by calling
``self.write`` after setting the response content type using
``self.set_header``.
"""
accept = headers.parse_http_accept_header(
self.request.headers.get('Accept', '*/*'))
try:
selected, _ = algorithms.select_content_type(
accept, _content_types.values())
except errors.NoMatch:
raise web.HTTPError(406,
'no acceptable content type for %s in %r',
accept, _content_types.values(),
reason='Content Type Not Acceptable')
LOGGER.debug('selected %s as outgoing content type', selected)
handler = _content_handlers[str(selected)]
accept = self.request.headers.get('Accept-Charset', '*')
charsets = headers.parse_accept_charset(accept)
charset = charsets[0] if charsets[0] != '*' else None
LOGGER.debug('encoding response body using %r with encoding %s',
handler, charset)
encoding, response_bytes = handler.pack_bytes(response_dict,
encoding=charset)
if encoding: # don't overwrite the value in _content_types
copied = datastructures.ContentType(selected.content_type,
selected.content_subtype,
selected.parameters)
copied.parameters['charset'] = encoding
selected = copied
self.set_header('Content-Type', str(selected))
self.write(response_bytes) | Encode a response according to the request.
:param dict response_dict: the response to send
:raises: :class:`tornado.web.HTTPError` if no acceptable content
type exists
This method will encode `response_dict` using the most appropriate
encoder based on the :mailheader:`Accept` request header and the
available encoders. The result is written to the client by calling
``self.write`` after setting the response content type using
``self.set_header``. | Below is the the instruction that describes the task:
### Input:
Encode a response according to the request.
:param dict response_dict: the response to send
:raises: :class:`tornado.web.HTTPError` if no acceptable content
type exists
This method will encode `response_dict` using the most appropriate
encoder based on the :mailheader:`Accept` request header and the
available encoders. The result is written to the client by calling
``self.write`` after setting the response content type using
``self.set_header``.
### Response:
def send_response(self, response_dict):
"""
Encode a response according to the request.
:param dict response_dict: the response to send
:raises: :class:`tornado.web.HTTPError` if no acceptable content
type exists
This method will encode `response_dict` using the most appropriate
encoder based on the :mailheader:`Accept` request header and the
available encoders. The result is written to the client by calling
``self.write`` after setting the response content type using
``self.set_header``.
"""
accept = headers.parse_http_accept_header(
self.request.headers.get('Accept', '*/*'))
try:
selected, _ = algorithms.select_content_type(
accept, _content_types.values())
except errors.NoMatch:
raise web.HTTPError(406,
'no acceptable content type for %s in %r',
accept, _content_types.values(),
reason='Content Type Not Acceptable')
LOGGER.debug('selected %s as outgoing content type', selected)
handler = _content_handlers[str(selected)]
accept = self.request.headers.get('Accept-Charset', '*')
charsets = headers.parse_accept_charset(accept)
charset = charsets[0] if charsets[0] != '*' else None
LOGGER.debug('encoding response body using %r with encoding %s',
handler, charset)
encoding, response_bytes = handler.pack_bytes(response_dict,
encoding=charset)
if encoding: # don't overwrite the value in _content_types
copied = datastructures.ContentType(selected.content_type,
selected.content_subtype,
selected.parameters)
copied.parameters['charset'] = encoding
selected = copied
self.set_header('Content-Type', str(selected))
self.write(response_bytes) |
def perform_pilon(self):
"""
Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs
"""
for sample in self.metadata:
try:
if sample[self.analysistype].num_contigs > 500 or sample.confindr.contam_status == 'Contaminated':
sample.general.polish = False
else:
sample.general.polish = True
except AttributeError:
sample.general.polish = True | Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs | Below is the the instruction that describes the task:
### Input:
Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs
### Response:
def perform_pilon(self):
"""
Determine if pilon polishing should be attempted. Do not perform polishing if confindr determines that the
sample is contaminated or if there are > 500 contigs
"""
for sample in self.metadata:
try:
if sample[self.analysistype].num_contigs > 500 or sample.confindr.contam_status == 'Contaminated':
sample.general.polish = False
else:
sample.general.polish = True
except AttributeError:
sample.general.polish = True |
def get_capabilities(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n796.
Positional arguments:
data -- bytearray data to read.
Returns:
List.
"""
answers = list()
for i in range(len(data)):
base = i * 8
for bit in range(8):
if not data[i] & (1 << bit):
continue
answers.append(CAPA.get(bit + base, bit))
return answers | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n796.
Positional arguments:
data -- bytearray data to read.
Returns:
List. | Below is the the instruction that describes the task:
### Input:
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n796.
Positional arguments:
data -- bytearray data to read.
Returns:
List.
### Response:
def get_capabilities(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n796.
Positional arguments:
data -- bytearray data to read.
Returns:
List.
"""
answers = list()
for i in range(len(data)):
base = i * 8
for bit in range(8):
if not data[i] & (1 << bit):
continue
answers.append(CAPA.get(bit + base, bit))
return answers |
def ROL(self, a):
"""
Rotates all bits of the register one place left through the C (carry)
bit. This is a 9-bit rotation.
source code forms: ROL Q; ROLA; ROLB
CC bits "HNZVC": -aaas
"""
r = (a << 1) | self.C
self.clear_NZVC()
self.update_NZVC_8(a, a, r)
return r | Rotates all bits of the register one place left through the C (carry)
bit. This is a 9-bit rotation.
source code forms: ROL Q; ROLA; ROLB
CC bits "HNZVC": -aaas | Below is the the instruction that describes the task:
### Input:
Rotates all bits of the register one place left through the C (carry)
bit. This is a 9-bit rotation.
source code forms: ROL Q; ROLA; ROLB
CC bits "HNZVC": -aaas
### Response:
def ROL(self, a):
"""
Rotates all bits of the register one place left through the C (carry)
bit. This is a 9-bit rotation.
source code forms: ROL Q; ROLA; ROLB
CC bits "HNZVC": -aaas
"""
r = (a << 1) | self.C
self.clear_NZVC()
self.update_NZVC_8(a, a, r)
return r |
def images():
"""Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
"""
if request.method == 'POST':
file_upload = request.files['file']
if file_upload:
image = dict()
image['filename'] = secure_filename(file_upload.filename)
full_path = os.path.join(session['img_input_dir'],
image['filename'])
file_upload.save(full_path)
image['uid'] = session['image_uid_counter']
session['image_uid_counter'] += 1
current_app.logger.debug('File %d is saved as %s',
image['uid'],
image['filename'])
session['image_list'].append(image)
return jsonify(ok="true", file=image['filename'], uid=image['uid'])
return jsonify(ok="false")
if request.method == 'GET':
return jsonify(images=session['image_list']) | Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename | Below is the the instruction that describes the task:
### Input:
Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
### Response:
def images():
"""Upload images via REST interface
Check if file upload was successful and sanatize user input.
TODO: return file URL instead of filename
"""
if request.method == 'POST':
file_upload = request.files['file']
if file_upload:
image = dict()
image['filename'] = secure_filename(file_upload.filename)
full_path = os.path.join(session['img_input_dir'],
image['filename'])
file_upload.save(full_path)
image['uid'] = session['image_uid_counter']
session['image_uid_counter'] += 1
current_app.logger.debug('File %d is saved as %s',
image['uid'],
image['filename'])
session['image_list'].append(image)
return jsonify(ok="true", file=image['filename'], uid=image['uid'])
return jsonify(ok="false")
if request.method == 'GET':
return jsonify(images=session['image_list']) |
def customer_source_webhook_handler(event):
"""Handle updates to customer payment-source objects.
Docs: https://stripe.com/docs/api#customer_object-sources.
"""
customer_data = event.data.get("object", {})
source_type = customer_data.get("object", {})
# TODO: handle other types of sources (https://stripe.com/docs/api#customer_object-sources)
if source_type == SourceType.card:
if event.verb.endswith("deleted") and customer_data:
# On customer.source.deleted, we do not delete the object, we merely unlink it.
# customer = Customer.objects.get(id=customer_data["id"])
# NOTE: for now, customer.sources still points to Card
# Also, https://github.com/dj-stripe/dj-stripe/issues/576
models.Card.objects.filter(id=customer_data.get("id", "")).delete()
models.DjstripePaymentMethod.objects.filter(id=customer_data.get("id", "")).delete()
else:
_handle_crud_like_event(target_cls=models.Card, event=event) | Handle updates to customer payment-source objects.
Docs: https://stripe.com/docs/api#customer_object-sources. | Below is the the instruction that describes the task:
### Input:
Handle updates to customer payment-source objects.
Docs: https://stripe.com/docs/api#customer_object-sources.
### Response:
def customer_source_webhook_handler(event):
"""Handle updates to customer payment-source objects.
Docs: https://stripe.com/docs/api#customer_object-sources.
"""
customer_data = event.data.get("object", {})
source_type = customer_data.get("object", {})
# TODO: handle other types of sources (https://stripe.com/docs/api#customer_object-sources)
if source_type == SourceType.card:
if event.verb.endswith("deleted") and customer_data:
# On customer.source.deleted, we do not delete the object, we merely unlink it.
# customer = Customer.objects.get(id=customer_data["id"])
# NOTE: for now, customer.sources still points to Card
# Also, https://github.com/dj-stripe/dj-stripe/issues/576
models.Card.objects.filter(id=customer_data.get("id", "")).delete()
models.DjstripePaymentMethod.objects.filter(id=customer_data.get("id", "")).delete()
else:
_handle_crud_like_event(target_cls=models.Card, event=event) |
def vaddg(v1, v2, ndim):
""" Add two n-dimensional vectors
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vaddg_c.html
:param v1: First vector to be added.
:type v1: list[ndim]
:param v2: Second vector to be added.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: v1+v2
:rtype: list[ndim]
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
vout = stypes.emptyDoubleVector(ndim)
ndim = ctypes.c_int(ndim)
libspice.vaddg_c(v1, v2, ndim, vout)
return stypes.cVectorToPython(vout) | Add two n-dimensional vectors
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vaddg_c.html
:param v1: First vector to be added.
:type v1: list[ndim]
:param v2: Second vector to be added.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: v1+v2
:rtype: list[ndim] | Below is the the instruction that describes the task:
### Input:
Add two n-dimensional vectors
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vaddg_c.html
:param v1: First vector to be added.
:type v1: list[ndim]
:param v2: Second vector to be added.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: v1+v2
:rtype: list[ndim]
### Response:
def vaddg(v1, v2, ndim):
""" Add two n-dimensional vectors
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vaddg_c.html
:param v1: First vector to be added.
:type v1: list[ndim]
:param v2: Second vector to be added.
:type v2: list[ndim]
:param ndim: Dimension of v1 and v2.
:type ndim: int
:return: v1+v2
:rtype: list[ndim]
"""
v1 = stypes.toDoubleVector(v1)
v2 = stypes.toDoubleVector(v2)
vout = stypes.emptyDoubleVector(ndim)
ndim = ctypes.c_int(ndim)
libspice.vaddg_c(v1, v2, ndim, vout)
return stypes.cVectorToPython(vout) |
def play(self, name, streamUrl, encoding = ''):
""" use a multimedia player to play a stream """
self.close()
self.name = name
self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''}
self.muted = False
self.show_volume = True
self.title_prefix = ''
self.playback_is_on = False
self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock)
if logger.isEnabledFor(logging.INFO):
logger.info('Selected Station: "{}"'.format(name))
if encoding:
self._station_encoding = encoding
else:
self._station_encoding = 'utf-8'
opts = []
isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls']
opts = self._buildStartOpts(streamUrl, isPlayList)
self.process = subprocess.Popen(opts, shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, ))
t.start()
# start playback check timer thread
try:
self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler)
self.connection_timeout_thread.start()
except:
self.connection_timeout_thread = None
if (logger.isEnabledFor(logging.ERROR)):
logger.error("playback detection thread start failed")
if logger.isEnabledFor(logging.INFO):
logger.info("Player started") | use a multimedia player to play a stream | Below is the the instruction that describes the task:
### Input:
use a multimedia player to play a stream
### Response:
def play(self, name, streamUrl, encoding = ''):
""" use a multimedia player to play a stream """
self.close()
self.name = name
self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''}
self.muted = False
self.show_volume = True
self.title_prefix = ''
self.playback_is_on = False
self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock)
if logger.isEnabledFor(logging.INFO):
logger.info('Selected Station: "{}"'.format(name))
if encoding:
self._station_encoding = encoding
else:
self._station_encoding = 'utf-8'
opts = []
isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls']
opts = self._buildStartOpts(streamUrl, isPlayList)
self.process = subprocess.Popen(opts, shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, ))
t.start()
# start playback check timer thread
try:
self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler)
self.connection_timeout_thread.start()
except:
self.connection_timeout_thread = None
if (logger.isEnabledFor(logging.ERROR)):
logger.error("playback detection thread start failed")
if logger.isEnabledFor(logging.INFO):
logger.info("Player started") |
def draw(self, x, y, char=None, colour=7, bg=0, thin=False):
"""
Draw a line from drawing cursor to the specified position.
This uses a modified Bressenham algorithm, interpolating twice as many points to
render down to anti-aliased characters when no character is specified,
or uses standard algorithm plotting with the specified character.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
:param char: Optional character to use to draw the line.
:param colour: Optional colour for plotting the line.
:param bg: Optional background colour for plotting the line.
:param thin: Optional width of anti-aliased line.
"""
# Decide what type of line drawing to use.
line_chars = (self._uni_line_chars if self._unicode_aware else
self._line_chars)
# Define line end points.
x0 = self._x
y0 = self._y
x1 = int(round(x * 2, 0))
y1 = int(round(y * 2, 0))
# Remember last point for next line.
self._x = x1
self._y = y1
# Don't bother drawing anything if we're guaranteed to be off-screen
if ((x0 < 0 and x1 < 0) or (x0 >= self.width * 2 and x1 >= self.width * 2) or
(y0 < 0 and y1 < 0) or (y0 >= self.height * 2 and y1 >= self.height * 2)):
return
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
def _get_start_char(cx, cy):
needle = self.get_from(cx, cy)
if needle is not None:
letter, cfg, _, cbg = needle
if colour == cfg and bg == cbg and chr(letter) in line_chars:
return line_chars.find(chr(letter))
return 0
def _fast_fill(start_x, end_x, iy):
next_char = -1
for ix in range(start_x, end_x):
if ix % 2 == 0 or next_char == -1:
next_char = _get_start_char(ix // 2, iy // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
if ix % 2 == 1:
self.print_at(line_chars[next_char], ix // 2, iy // 2, colour, bg=bg)
if end_x % 2 == 1:
self.print_at(line_chars[next_char], end_x // 2, iy // 2, colour, bg=bg)
def _draw_on_x(ix, iy):
err = dx
px = ix - 2
py = iy - 2
next_char = 0
while ix != x1:
if ix < px or ix - px >= 2 or iy < py or iy - py >= 2:
px = ix & ~1
py = iy & ~1
next_char = _get_start_char(px // 2, py // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
err -= 2 * dy
if err < 0:
iy += sy
err += 2 * dx
ix += sx
if char is None:
self.print_at(line_chars[next_char],
px // 2, py // 2, colour, bg=bg)
else:
self.print_at(char, px // 2, py // 2, colour, bg=bg)
def _draw_on_y(ix, iy):
err = dy
px = ix - 2
py = iy - 2
next_char = 0
while iy != y1:
if ix < px or ix - px >= 2 or iy < py or iy - py >= 2:
px = ix & ~1
py = iy & ~1
next_char = _get_start_char(px // 2, py // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
err -= 2 * dx
if err < 0:
ix += sx
err += 2 * dy
iy += sy
if char is None:
self.print_at(line_chars[next_char],
px // 2, py // 2, colour, bg=bg)
else:
self.print_at(char, px // 2, py // 2, colour, bg=bg)
if dy == 0 and thin and char is None:
# Fast-path for polygon filling
_fast_fill(min(x0, x1), max(x0, x1), y0)
elif dx > dy:
_draw_on_x(x0, y0)
if not thin:
_draw_on_x(x0, y0 + 1)
else:
_draw_on_y(x0, y0)
if not thin:
_draw_on_y(x0 + 1, y0) | Draw a line from drawing cursor to the specified position.
This uses a modified Bressenham algorithm, interpolating twice as many points to
render down to anti-aliased characters when no character is specified,
or uses standard algorithm plotting with the specified character.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
:param char: Optional character to use to draw the line.
:param colour: Optional colour for plotting the line.
:param bg: Optional background colour for plotting the line.
:param thin: Optional width of anti-aliased line. | Below is the the instruction that describes the task:
### Input:
Draw a line from drawing cursor to the specified position.
This uses a modified Bressenham algorithm, interpolating twice as many points to
render down to anti-aliased characters when no character is specified,
or uses standard algorithm plotting with the specified character.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
:param char: Optional character to use to draw the line.
:param colour: Optional colour for plotting the line.
:param bg: Optional background colour for plotting the line.
:param thin: Optional width of anti-aliased line.
### Response:
def draw(self, x, y, char=None, colour=7, bg=0, thin=False):
"""
Draw a line from drawing cursor to the specified position.
This uses a modified Bressenham algorithm, interpolating twice as many points to
render down to anti-aliased characters when no character is specified,
or uses standard algorithm plotting with the specified character.
:param x: The column (x coord) for the location to check.
:param y: The line (y coord) for the location to check.
:param char: Optional character to use to draw the line.
:param colour: Optional colour for plotting the line.
:param bg: Optional background colour for plotting the line.
:param thin: Optional width of anti-aliased line.
"""
# Decide what type of line drawing to use.
line_chars = (self._uni_line_chars if self._unicode_aware else
self._line_chars)
# Define line end points.
x0 = self._x
y0 = self._y
x1 = int(round(x * 2, 0))
y1 = int(round(y * 2, 0))
# Remember last point for next line.
self._x = x1
self._y = y1
# Don't bother drawing anything if we're guaranteed to be off-screen
if ((x0 < 0 and x1 < 0) or (x0 >= self.width * 2 and x1 >= self.width * 2) or
(y0 < 0 and y1 < 0) or (y0 >= self.height * 2 and y1 >= self.height * 2)):
return
dx = abs(x1 - x0)
dy = abs(y1 - y0)
sx = -1 if x0 > x1 else 1
sy = -1 if y0 > y1 else 1
def _get_start_char(cx, cy):
needle = self.get_from(cx, cy)
if needle is not None:
letter, cfg, _, cbg = needle
if colour == cfg and bg == cbg and chr(letter) in line_chars:
return line_chars.find(chr(letter))
return 0
def _fast_fill(start_x, end_x, iy):
next_char = -1
for ix in range(start_x, end_x):
if ix % 2 == 0 or next_char == -1:
next_char = _get_start_char(ix // 2, iy // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
if ix % 2 == 1:
self.print_at(line_chars[next_char], ix // 2, iy // 2, colour, bg=bg)
if end_x % 2 == 1:
self.print_at(line_chars[next_char], end_x // 2, iy // 2, colour, bg=bg)
def _draw_on_x(ix, iy):
err = dx
px = ix - 2
py = iy - 2
next_char = 0
while ix != x1:
if ix < px or ix - px >= 2 or iy < py or iy - py >= 2:
px = ix & ~1
py = iy & ~1
next_char = _get_start_char(px // 2, py // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
err -= 2 * dy
if err < 0:
iy += sy
err += 2 * dx
ix += sx
if char is None:
self.print_at(line_chars[next_char],
px // 2, py // 2, colour, bg=bg)
else:
self.print_at(char, px // 2, py // 2, colour, bg=bg)
def _draw_on_y(ix, iy):
err = dy
px = ix - 2
py = iy - 2
next_char = 0
while iy != y1:
if ix < px or ix - px >= 2 or iy < py or iy - py >= 2:
px = ix & ~1
py = iy & ~1
next_char = _get_start_char(px // 2, py // 2)
next_char |= 2 ** abs(ix % 2) * 4 ** (iy % 2)
err -= 2 * dx
if err < 0:
ix += sx
err += 2 * dy
iy += sy
if char is None:
self.print_at(line_chars[next_char],
px // 2, py // 2, colour, bg=bg)
else:
self.print_at(char, px // 2, py // 2, colour, bg=bg)
if dy == 0 and thin and char is None:
# Fast-path for polygon filling
_fast_fill(min(x0, x1), max(x0, x1), y0)
elif dx > dy:
_draw_on_x(x0, y0)
if not thin:
_draw_on_x(x0, y0 + 1)
else:
_draw_on_y(x0, y0)
if not thin:
_draw_on_y(x0 + 1, y0) |
def illumf(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint):
"""
Compute the illumination angles---phase, incidence, and
emission---at a specified point on a target body. Return logical
flags indicating whether the surface point is visible from
the observer's position and whether the surface point is
illuminated.
The target body's surface is represented using topographic data
provided by DSK files, or by a reference ellipsoid.
The illumination source is a specified ephemeris object.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
Visibility flag, Illumination flag
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
ilusrc = stypes.stringToCharP(ilusrc)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.toDoubleVector(spoint)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
phase = ctypes.c_double(0)
incdnc = ctypes.c_double(0)
emissn = ctypes.c_double(0)
visibl = ctypes.c_int()
lit = ctypes.c_int()
libspice.illumf_c(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint,
ctypes.byref(trgepc), srfvec, ctypes.byref(phase),
ctypes.byref(incdnc), ctypes.byref(emissn),
ctypes.byref(visibl), ctypes.byref(lit))
return trgepc.value, stypes.cVectorToPython(srfvec), \
phase.value, incdnc.value, emissn.value, bool(visibl.value), bool(lit.value) | Compute the illumination angles---phase, incidence, and
emission---at a specified point on a target body. Return logical
flags indicating whether the surface point is visible from
the observer's position and whether the surface point is
illuminated.
The target body's surface is represented using topographic data
provided by DSK files, or by a reference ellipsoid.
The illumination source is a specified ephemeris object.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
Visibility flag, Illumination flag
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Compute the illumination angles---phase, incidence, and
emission---at a specified point on a target body. Return logical
flags indicating whether the surface point is visible from
the observer's position and whether the surface point is
illuminated.
The target body's surface is represented using topographic data
provided by DSK files, or by a reference ellipsoid.
The illumination source is a specified ephemeris object.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
Visibility flag, Illumination flag
:rtype: tuple
### Response:
def illumf(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint):
"""
Compute the illumination angles---phase, incidence, and
emission---at a specified point on a target body. Return logical
flags indicating whether the surface point is visible from
the observer's position and whether the surface point is
illuminated.
The target body's surface is represented using topographic data
provided by DSK files, or by a reference ellipsoid.
The illumination source is a specified ephemeris object.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
Visibility flag, Illumination flag
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
ilusrc = stypes.stringToCharP(ilusrc)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.toDoubleVector(spoint)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
phase = ctypes.c_double(0)
incdnc = ctypes.c_double(0)
emissn = ctypes.c_double(0)
visibl = ctypes.c_int()
lit = ctypes.c_int()
libspice.illumf_c(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint,
ctypes.byref(trgepc), srfvec, ctypes.byref(phase),
ctypes.byref(incdnc), ctypes.byref(emissn),
ctypes.byref(visibl), ctypes.byref(lit))
return trgepc.value, stypes.cVectorToPython(srfvec), \
phase.value, incdnc.value, emissn.value, bool(visibl.value), bool(lit.value) |
def _fetch_rrd_meta(self, connection, rrd_path_root, whitelist, field_names, tags):
''' Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname, device_name, rrd_path)
'''
def _in_whitelist(rrd):
path = rrd.replace('<path_rra>/', '')
for p in whitelist:
if fnmatch(path, p):
return True
return False
c = connection.cursor()
and_parameters = " OR ".join(["hsc.field_name = '%s'" % field_name for field_name in field_names])
# Check for the existence of the `host_snmp_cache` table
rrd_query = """
SELECT
h.hostname as hostname,
hsc.field_value as device_name,
dt.data_source_path as rrd_path
FROM data_local dl
JOIN host h on dl.host_id = h.id
JOIN data_template_data dt on dt.local_data_id = dl.id
LEFT JOIN host_snmp_cache hsc on h.id = hsc.host_id
AND dl.snmp_index = hsc.snmp_index
WHERE dt.data_source_path IS NOT NULL
AND dt.data_source_path != ''
AND ({} OR hsc.field_name is NULL) """.format(
and_parameters
)
c.execute(rrd_query)
res = []
for hostname, device_name, rrd_path in c.fetchall():
if not whitelist or _in_whitelist(rrd_path):
if hostname in ('localhost', '127.0.0.1'):
hostname = self.hostname
rrd_path = rrd_path.replace('<path_rra>', rrd_path_root)
device_name = device_name or None
res.append((hostname, device_name, rrd_path))
# Collect stats
num_hosts = len(set([r[0] for r in res]))
self.gauge('cacti.rrd.count', len(res), tags=tags)
self.gauge('cacti.hosts.count', num_hosts, tags=tags)
return res | Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname, device_name, rrd_path) | Below is the the instruction that describes the task:
### Input:
Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname, device_name, rrd_path)
### Response:
def _fetch_rrd_meta(self, connection, rrd_path_root, whitelist, field_names, tags):
''' Fetch metadata about each RRD in this Cacti DB, returning a list of
tuples of (hostname, device_name, rrd_path)
'''
def _in_whitelist(rrd):
path = rrd.replace('<path_rra>/', '')
for p in whitelist:
if fnmatch(path, p):
return True
return False
c = connection.cursor()
and_parameters = " OR ".join(["hsc.field_name = '%s'" % field_name for field_name in field_names])
# Check for the existence of the `host_snmp_cache` table
rrd_query = """
SELECT
h.hostname as hostname,
hsc.field_value as device_name,
dt.data_source_path as rrd_path
FROM data_local dl
JOIN host h on dl.host_id = h.id
JOIN data_template_data dt on dt.local_data_id = dl.id
LEFT JOIN host_snmp_cache hsc on h.id = hsc.host_id
AND dl.snmp_index = hsc.snmp_index
WHERE dt.data_source_path IS NOT NULL
AND dt.data_source_path != ''
AND ({} OR hsc.field_name is NULL) """.format(
and_parameters
)
c.execute(rrd_query)
res = []
for hostname, device_name, rrd_path in c.fetchall():
if not whitelist or _in_whitelist(rrd_path):
if hostname in ('localhost', '127.0.0.1'):
hostname = self.hostname
rrd_path = rrd_path.replace('<path_rra>', rrd_path_root)
device_name = device_name or None
res.append((hostname, device_name, rrd_path))
# Collect stats
num_hosts = len(set([r[0] for r in res]))
self.gauge('cacti.rrd.count', len(res), tags=tags)
self.gauge('cacti.hosts.count', num_hosts, tags=tags)
return res |
def in_hours(self, office=None, when=None):
"""
Finds if it is business hours in the given office.
:param office: Office ID to look up, or None to check if any office is in business hours.
:type office: str or None
:param datetime.datetime when: When to check the office is open, or None for now.
:returns: True if it is business hours, False otherwise.
:rtype: bool
:raises KeyError: If the office is unknown.
"""
if when == None:
when = datetime.now(tz=utc)
if office == None:
for office in self.offices.itervalues():
if office.in_hours(when):
return True
return False
else:
# check specific office
return self.offices[office].in_hours(when) | Finds if it is business hours in the given office.
:param office: Office ID to look up, or None to check if any office is in business hours.
:type office: str or None
:param datetime.datetime when: When to check the office is open, or None for now.
:returns: True if it is business hours, False otherwise.
:rtype: bool
:raises KeyError: If the office is unknown. | Below is the the instruction that describes the task:
### Input:
Finds if it is business hours in the given office.
:param office: Office ID to look up, or None to check if any office is in business hours.
:type office: str or None
:param datetime.datetime when: When to check the office is open, or None for now.
:returns: True if it is business hours, False otherwise.
:rtype: bool
:raises KeyError: If the office is unknown.
### Response:
def in_hours(self, office=None, when=None):
"""
Finds if it is business hours in the given office.
:param office: Office ID to look up, or None to check if any office is in business hours.
:type office: str or None
:param datetime.datetime when: When to check the office is open, or None for now.
:returns: True if it is business hours, False otherwise.
:rtype: bool
:raises KeyError: If the office is unknown.
"""
if when == None:
when = datetime.now(tz=utc)
if office == None:
for office in self.offices.itervalues():
if office.in_hours(when):
return True
return False
else:
# check specific office
return self.offices[office].in_hours(when) |
def start(self, listen_ip=LISTEN_IP, listen_port=0):
"""Start discovery task."""
coro = self.loop.create_datagram_endpoint(
lambda: self, local_addr=(listen_ip, listen_port))
self.task = self.loop.create_task(coro)
return self.task | Start discovery task. | Below is the the instruction that describes the task:
### Input:
Start discovery task.
### Response:
def start(self, listen_ip=LISTEN_IP, listen_port=0):
"""Start discovery task."""
coro = self.loop.create_datagram_endpoint(
lambda: self, local_addr=(listen_ip, listen_port))
self.task = self.loop.create_task(coro)
return self.task |
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if isinstance(value, string_types):
if PY2 and isinstance(value, unicode):
return value.encode("utf-8")
if not PY2 and isinstance(value, str):
return value.encode("utf-8")
return str(value) | Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first. | Below is the the instruction that describes the task:
### Input:
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
### Response:
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if isinstance(value, string_types):
if PY2 and isinstance(value, unicode):
return value.encode("utf-8")
if not PY2 and isinstance(value, str):
return value.encode("utf-8")
return str(value) |
def make_app(httpd_config):
""" Factory for the monitoring webapp.
"""
#mimetypes.add_type('image/vnd.microsoft.icon', '.ico')
# Default paths to serve static file from
htdocs_paths = [
os.path.realpath(os.path.join(config.config_dir, "htdocs")),
os.path.join(os.path.dirname(config.__file__), "data", "htdocs"),
]
return (Router()
.add_route("/", controller=redirect, to="/static/index.html")
.add_route("/favicon.ico", controller=redirect, to="/static/favicon.ico")
.add_route("/static/{filepath:.+}", controller=StaticFolders(htdocs_paths))
.add_route("/json/{action}", controller=JsonController(**httpd_config.json))
) | Factory for the monitoring webapp. | Below is the the instruction that describes the task:
### Input:
Factory for the monitoring webapp.
### Response:
def make_app(httpd_config):
""" Factory for the monitoring webapp.
"""
#mimetypes.add_type('image/vnd.microsoft.icon', '.ico')
# Default paths to serve static file from
htdocs_paths = [
os.path.realpath(os.path.join(config.config_dir, "htdocs")),
os.path.join(os.path.dirname(config.__file__), "data", "htdocs"),
]
return (Router()
.add_route("/", controller=redirect, to="/static/index.html")
.add_route("/favicon.ico", controller=redirect, to="/static/favicon.ico")
.add_route("/static/{filepath:.+}", controller=StaticFolders(htdocs_paths))
.add_route("/json/{action}", controller=JsonController(**httpd_config.json))
) |
def pendingvalidation(self, warnonly=None):
"""Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr)
if warnonly is None and self and self.version:
warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
if self.textvalidation:
while self.offsetvalidationbuffer:
structureelement, textclass = self.offsetvalidationbuffer.pop()
if self.debug: print("[PyNLPl FoLiA DEBUG] Performing offset validation on " + repr(structureelement) + " textclass " + textclass,file=stderr)
#validate offsets
tc = structureelement.textcontent(textclass)
if tc.offset is not None:
try:
tc.getreference(validate=True)
except UnresolvableTextContent:
msg = "Text for " + structureelement.__class__.__name__ + ", ID " + str(structureelement.id) + ", textclass " + textclass + ", has incorrect offset " + str(tc.offset) + " or invalid reference"
print("TEXT VALIDATION ERROR: " + msg,file=sys.stderr)
if not warnonly:
raise | Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool | Below is the the instruction that describes the task:
### Input:
Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
### Response:
def pendingvalidation(self, warnonly=None):
"""Perform any pending validations
Parameters:
warnonly (bool): Warn only (True) or raise exceptions (False). If set to None then this value will be determined based on the document's FoLiA version (Warn only before FoLiA v1.5)
Returns:
bool
"""
if self.debug: print("[PyNLPl FoLiA DEBUG] Processing pending validations (if any)",file=stderr)
if warnonly is None and self and self.version:
warnonly = (checkversion(self.version, '1.5.0') < 0) #warn only for documents older than FoLiA v1.5
if self.textvalidation:
while self.offsetvalidationbuffer:
structureelement, textclass = self.offsetvalidationbuffer.pop()
if self.debug: print("[PyNLPl FoLiA DEBUG] Performing offset validation on " + repr(structureelement) + " textclass " + textclass,file=stderr)
#validate offsets
tc = structureelement.textcontent(textclass)
if tc.offset is not None:
try:
tc.getreference(validate=True)
except UnresolvableTextContent:
msg = "Text for " + structureelement.__class__.__name__ + ", ID " + str(structureelement.id) + ", textclass " + textclass + ", has incorrect offset " + str(tc.offset) + " or invalid reference"
print("TEXT VALIDATION ERROR: " + msg,file=sys.stderr)
if not warnonly:
raise |
def _insert_single_batch_into_database(
batchIndex,
log,
dbTableName,
uniqueKeyList,
dateModified,
replace,
batchSize,
reDatetime,
dateCreated):
"""*summary of function*
**Key Arguments:**
- ``batchIndex`` -- the index of the batch to insert
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
**Return:**
- None
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
usage code
"""
log.debug('starting the ``_insert_single_batch_into_database`` function')
global totalCount
global globalDbConn
global sharedList
batch = sharedList[batchIndex]
reDate = reDatetime
if isinstance(globalDbConn, dict):
# SETUP ALL DATABASE CONNECTIONS
dbConn = database(
log=log,
dbSettings=globalDbConn,
autocommit=False
).connect()
else:
dbConn = globalDbConn
count = batch[1]
if count > totalCount:
count = totalCount
ltotalCount = totalCount
inserted = False
while inserted == False:
if not replace:
insertVerb = "INSERT IGNORE"
else:
insertVerb = "INSERT IGNORE"
uniKeys = set().union(*(d.keys() for d in batch[0]))
tmp = []
tmp[:] = [m.replace(" ", "_").replace(
"-", "_") for m in uniKeys]
uniKeys = tmp
myKeys = '`,`'.join(uniKeys)
vals = [tuple([None if d[k] in ["None", None] else str(d[k])
for k in uniKeys]) for d in batch[0]]
valueString = ("%s, " * len(vals[0]))[:-2]
insertCommand = insertVerb + """ INTO `""" + dbTableName + \
"""` (`""" + myKeys + """`, dateCreated) VALUES (""" + \
valueString + """, NOW())"""
if not dateCreated:
insertCommand = insertCommand.replace(
", dateCreated)", ")").replace(", NOW())", ")")
dup = ""
if replace:
dup = " ON DUPLICATE KEY UPDATE "
for k in uniKeys:
dup = """%(dup)s %(k)s=values(%(k)s),""" % locals()
dup = """%(dup)s updated=1, dateLastModified=NOW()""" % locals()
insertCommand = insertCommand + dup
insertCommand = insertCommand.replace('\\""', '\\" "')
insertCommand = insertCommand.replace('""', "null")
insertCommand = insertCommand.replace('"None"', 'null')
message = ""
# log.debug('adding new data to the %s table; query: %s' %
# (dbTableName, addValue))
try:
message = writequery(
log=log,
sqlQuery=insertCommand,
dbConn=dbConn,
Force=True,
manyValueList=vals
)
except:
theseInserts = []
for aDict in batch[0]:
insertCommand, valueTuple = convert_dictionary_to_mysql_table(
dbConn=dbConn,
log=log,
dictionary=aDict,
dbTableName=dbTableName,
uniqueKeyList=uniqueKeyList,
dateModified=dateModified,
returnInsertOnly=True,
replace=replace,
reDatetime=reDate,
skipChecks=True
)
theseInserts.append(valueTuple)
message = ""
# log.debug('adding new data to the %s table; query: %s' %
# (dbTableName, addValue))
message = writequery(
log=log,
sqlQuery=insertCommand,
dbConn=dbConn,
Force=True,
manyValueList=theseInserts
)
if message == "unknown column":
for aDict in batch:
convert_dictionary_to_mysql_table(
dbConn=dbConn,
log=log,
dictionary=aDict,
dbTableName=dbTableName,
uniqueKeyList=uniqueKeyList,
dateModified=dateModified,
reDatetime=reDate,
replace=replace
)
else:
inserted = True
dbConn.commit()
log.debug('completed the ``_insert_single_batch_into_database`` function')
return "None" | *summary of function*
**Key Arguments:**
- ``batchIndex`` -- the index of the batch to insert
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
**Return:**
- None
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
usage code | Below is the the instruction that describes the task:
### Input:
*summary of function*
**Key Arguments:**
- ``batchIndex`` -- the index of the batch to insert
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
**Return:**
- None
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
usage code
### Response:
def _insert_single_batch_into_database(
batchIndex,
log,
dbTableName,
uniqueKeyList,
dateModified,
replace,
batchSize,
reDatetime,
dateCreated):
"""*summary of function*
**Key Arguments:**
- ``batchIndex`` -- the index of the batch to insert
- ``dbConn`` -- mysql database connection
- ``log`` -- logger
**Return:**
- None
**Usage:**
.. todo::
add usage info
create a sublime snippet for usage
.. code-block:: python
usage code
"""
log.debug('starting the ``_insert_single_batch_into_database`` function')
global totalCount
global globalDbConn
global sharedList
batch = sharedList[batchIndex]
reDate = reDatetime
if isinstance(globalDbConn, dict):
# SETUP ALL DATABASE CONNECTIONS
dbConn = database(
log=log,
dbSettings=globalDbConn,
autocommit=False
).connect()
else:
dbConn = globalDbConn
count = batch[1]
if count > totalCount:
count = totalCount
ltotalCount = totalCount
inserted = False
while inserted == False:
if not replace:
insertVerb = "INSERT IGNORE"
else:
insertVerb = "INSERT IGNORE"
uniKeys = set().union(*(d.keys() for d in batch[0]))
tmp = []
tmp[:] = [m.replace(" ", "_").replace(
"-", "_") for m in uniKeys]
uniKeys = tmp
myKeys = '`,`'.join(uniKeys)
vals = [tuple([None if d[k] in ["None", None] else str(d[k])
for k in uniKeys]) for d in batch[0]]
valueString = ("%s, " * len(vals[0]))[:-2]
insertCommand = insertVerb + """ INTO `""" + dbTableName + \
"""` (`""" + myKeys + """`, dateCreated) VALUES (""" + \
valueString + """, NOW())"""
if not dateCreated:
insertCommand = insertCommand.replace(
", dateCreated)", ")").replace(", NOW())", ")")
dup = ""
if replace:
dup = " ON DUPLICATE KEY UPDATE "
for k in uniKeys:
dup = """%(dup)s %(k)s=values(%(k)s),""" % locals()
dup = """%(dup)s updated=1, dateLastModified=NOW()""" % locals()
insertCommand = insertCommand + dup
insertCommand = insertCommand.replace('\\""', '\\" "')
insertCommand = insertCommand.replace('""', "null")
insertCommand = insertCommand.replace('"None"', 'null')
message = ""
# log.debug('adding new data to the %s table; query: %s' %
# (dbTableName, addValue))
try:
message = writequery(
log=log,
sqlQuery=insertCommand,
dbConn=dbConn,
Force=True,
manyValueList=vals
)
except:
theseInserts = []
for aDict in batch[0]:
insertCommand, valueTuple = convert_dictionary_to_mysql_table(
dbConn=dbConn,
log=log,
dictionary=aDict,
dbTableName=dbTableName,
uniqueKeyList=uniqueKeyList,
dateModified=dateModified,
returnInsertOnly=True,
replace=replace,
reDatetime=reDate,
skipChecks=True
)
theseInserts.append(valueTuple)
message = ""
# log.debug('adding new data to the %s table; query: %s' %
# (dbTableName, addValue))
message = writequery(
log=log,
sqlQuery=insertCommand,
dbConn=dbConn,
Force=True,
manyValueList=theseInserts
)
if message == "unknown column":
for aDict in batch:
convert_dictionary_to_mysql_table(
dbConn=dbConn,
log=log,
dictionary=aDict,
dbTableName=dbTableName,
uniqueKeyList=uniqueKeyList,
dateModified=dateModified,
reDatetime=reDate,
replace=replace
)
else:
inserted = True
dbConn.commit()
log.debug('completed the ``_insert_single_batch_into_database`` function')
return "None" |
def is_elementary_type(elem_type):
"""
Returns True if the type is elementary - not versioned
:param elem_type:
:return:
"""
if not oh.is_type(elem_type, bt.XmrType):
return False
if oh.is_type(elem_type, (bt.UVarintType, bt.IntType, mt.UnicodeType)):
return True
return False | Returns True if the type is elementary - not versioned
:param elem_type:
:return: | Below is the the instruction that describes the task:
### Input:
Returns True if the type is elementary - not versioned
:param elem_type:
:return:
### Response:
def is_elementary_type(elem_type):
"""
Returns True if the type is elementary - not versioned
:param elem_type:
:return:
"""
if not oh.is_type(elem_type, bt.XmrType):
return False
if oh.is_type(elem_type, (bt.UVarintType, bt.IntType, mt.UnicodeType)):
return True
return False |
def get_account_info(self):
"""获得当前账号的信息
查看当前请求方(请求鉴权使用的 AccessKey 的属主)的账号信息。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回用户信息,失败返回None
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/info'.format(self.host)
return http._get_with_qiniu_mac(url, None, self.auth) | 获得当前账号的信息
查看当前请求方(请求鉴权使用的 AccessKey 的属主)的账号信息。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回用户信息,失败返回None
- ResponseInfo 请求的Response信息 | Below is the the instruction that describes the task:
### Input:
获得当前账号的信息
查看当前请求方(请求鉴权使用的 AccessKey 的属主)的账号信息。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回用户信息,失败返回None
- ResponseInfo 请求的Response信息
### Response:
def get_account_info(self):
"""获得当前账号的信息
查看当前请求方(请求鉴权使用的 AccessKey 的属主)的账号信息。
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回用户信息,失败返回None
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/info'.format(self.host)
return http._get_with_qiniu_mac(url, None, self.auth) |
def student_welcome_view(request):
"""Welcome/first run page for students."""
if not request.user.is_student:
return redirect("index")
# context = {"first_login": request.session["first_login"] if "first_login" in request.session else False}
# return render(request, "welcome/old_student.html", context)
return dashboard_view(request, show_welcome=True) | Welcome/first run page for students. | Below is the the instruction that describes the task:
### Input:
Welcome/first run page for students.
### Response:
def student_welcome_view(request):
"""Welcome/first run page for students."""
if not request.user.is_student:
return redirect("index")
# context = {"first_login": request.session["first_login"] if "first_login" in request.session else False}
# return render(request, "welcome/old_student.html", context)
return dashboard_view(request, show_welcome=True) |
def __create_driver_from_browser_config(self):
'''
Reads the config value for browser type.
'''
try:
browser_type = self._config_reader.get(
WebDriverFactory.BROWSER_TYPE_CONFIG)
except KeyError:
_wtflog("%s missing is missing from config file. Using defaults",
WebDriverFactory.BROWSER_TYPE_CONFIG)
browser_type = WebDriverFactory.FIREFOX
# Special Chrome Sauce
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
options.add_argument("always-authorize-plugins")
browser_type_dict = {
self.CHROME: lambda: webdriver.Chrome(
self._config_reader.get(WebDriverFactory.CHROME_DRIVER_PATH),
chrome_options=options),
self.FIREFOX: lambda: webdriver.Firefox(),
self.INTERNETEXPLORER: lambda: webdriver.Ie(),
self.OPERA: lambda: webdriver.Opera(),
self.PHANTOMJS: lambda: self.__create_phantom_js_driver(),
self.SAFARI: lambda: self.__create_safari_driver()
}
try:
return browser_type_dict[browser_type]()
except KeyError:
raise TypeError(
u("Unsupported Browser Type {0}").format(browser_type)) | Reads the config value for browser type. | Below is the the instruction that describes the task:
### Input:
Reads the config value for browser type.
### Response:
def __create_driver_from_browser_config(self):
'''
Reads the config value for browser type.
'''
try:
browser_type = self._config_reader.get(
WebDriverFactory.BROWSER_TYPE_CONFIG)
except KeyError:
_wtflog("%s missing is missing from config file. Using defaults",
WebDriverFactory.BROWSER_TYPE_CONFIG)
browser_type = WebDriverFactory.FIREFOX
# Special Chrome Sauce
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
options.add_argument("always-authorize-plugins")
browser_type_dict = {
self.CHROME: lambda: webdriver.Chrome(
self._config_reader.get(WebDriverFactory.CHROME_DRIVER_PATH),
chrome_options=options),
self.FIREFOX: lambda: webdriver.Firefox(),
self.INTERNETEXPLORER: lambda: webdriver.Ie(),
self.OPERA: lambda: webdriver.Opera(),
self.PHANTOMJS: lambda: self.__create_phantom_js_driver(),
self.SAFARI: lambda: self.__create_safari_driver()
}
try:
return browser_type_dict[browser_type]()
except KeyError:
raise TypeError(
u("Unsupported Browser Type {0}").format(browser_type)) |
async def set_speaker_settings(self, target: str, value: str):
"""Set speaker settings."""
params = {"settings": [{"target": target, "value": value}]}
return await self.services["audio"]["setSpeakerSettings"](params) | Set speaker settings. | Below is the the instruction that describes the task:
### Input:
Set speaker settings.
### Response:
async def set_speaker_settings(self, target: str, value: str):
"""Set speaker settings."""
params = {"settings": [{"target": target, "value": value}]}
return await self.services["audio"]["setSpeakerSettings"](params) |
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))) | Return distribution full name with - replaced with _ | Below is the the instruction that describes the task:
### Input:
Return distribution full name with - replaced with _
### Response:
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))) |
def _compute_rtfilter_map(self):
"""Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in.
"""
rtfilter_map = {}
def get_neigh_filter(neigh):
neigh_filter = rtfilter_map.get(neigh)
# Lazy creation of neighbor RT filter
if neigh_filter is None:
neigh_filter = set()
rtfilter_map[neigh] = neigh_filter
return neigh_filter
# Check if we have to use all paths or just best path
if self._common_config.max_path_ext_rtfilter_all:
# We have to look at all paths for a RtDest
for rtcdest in self._table_manager.get_rtc_table().values():
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC
if neigh is None:
continue
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# We iterate over all destination of the RTC table and for iBGP
# peers we use all known paths' RTs for RT filter and for eBGP
# peers we only consider best-paths' RTs for RT filter
for rtcdest in self._table_manager.get_rtc_table().values():
path = rtcdest.best_path
# If this destination does not have any path, we continue
if not path:
continue
neigh = path.source
# Consider only eBGP peers and ignore NC
if neigh and neigh.is_ebgp_peer():
# For eBGP peers we use only best-path to learn RT filter
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# For iBGP peers we use all known paths to learn RT filter
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC, and eBGP peers
if neigh and not neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
return rtfilter_map | Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in. | Below is the the instruction that describes the task:
### Input:
Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in.
### Response:
def _compute_rtfilter_map(self):
"""Returns neighbor's RT filter (permit/allow filter based on RT).
Walks RT filter tree and computes current RT filters for each peer that
have advertised RT NLRIs.
Returns:
dict of peer, and `set` of rts that a particular neighbor is
interested in.
"""
rtfilter_map = {}
def get_neigh_filter(neigh):
neigh_filter = rtfilter_map.get(neigh)
# Lazy creation of neighbor RT filter
if neigh_filter is None:
neigh_filter = set()
rtfilter_map[neigh] = neigh_filter
return neigh_filter
# Check if we have to use all paths or just best path
if self._common_config.max_path_ext_rtfilter_all:
# We have to look at all paths for a RtDest
for rtcdest in self._table_manager.get_rtc_table().values():
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC
if neigh is None:
continue
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# We iterate over all destination of the RTC table and for iBGP
# peers we use all known paths' RTs for RT filter and for eBGP
# peers we only consider best-paths' RTs for RT filter
for rtcdest in self._table_manager.get_rtc_table().values():
path = rtcdest.best_path
# If this destination does not have any path, we continue
if not path:
continue
neigh = path.source
# Consider only eBGP peers and ignore NC
if neigh and neigh.is_ebgp_peer():
# For eBGP peers we use only best-path to learn RT filter
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
else:
# For iBGP peers we use all known paths to learn RT filter
known_path_list = rtcdest.known_path_list
for path in known_path_list:
neigh = path.source
# We ignore NC, and eBGP peers
if neigh and not neigh.is_ebgp_peer():
neigh_filter = get_neigh_filter(neigh)
neigh_filter.add(path.nlri.route_target)
return rtfilter_map |
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes) else k,
v.decode('utf-8') if isinstance(v, bytes) else v))
return decoded | Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8. | Below is the the instruction that describes the task:
### Input:
Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
### Response:
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes) else k,
v.decode('utf-8') if isinstance(v, bytes) else v))
return decoded |
def stop(ctx, service, editable):
"""Stop a running service daemon."""
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
home = ctx.obj["HOME"]
service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable)
logger.debug("loading {}".format(service))
service = register_service(service_path)
try:
with open(os.path.join(service_path, ARGS_JSON)) as f:
service_args = json.loads(f.read())
except IOError as exc:
logger.debug(str(exc), exc_info=True)
raise click.ClickException("Cannot load service args, are you sure server is running?")
# get our service class instance
service_module = get_service_module(service_path)
service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args)
# prepare runner
runner = myRunner(service_obj,
pidfile=service_path + ".pid",
stdout=open(os.path.join(service_path, "stdout.log"), "ab"),
stderr=open(os.path.join(service_path, "stderr.log"), "ab"))
click.secho("[*] Stopping {}".format(service.name))
try:
runner._stop()
except daemon.runner.DaemonRunnerStopFailureError as exc:
logger.debug(str(exc), exc_info=True)
raise click.ClickException("Unable to stop service, are you sure it is running?") | Stop a running service daemon. | Below is the the instruction that describes the task:
### Input:
Stop a running service daemon.
### Response:
def stop(ctx, service, editable):
"""Stop a running service daemon."""
logger.debug("running command %s (%s)", ctx.command.name, ctx.params,
extra={"command": ctx.command.name, "params": ctx.params})
home = ctx.obj["HOME"]
service_path = plugin_utils.get_plugin_path(home, SERVICES, service, editable)
logger.debug("loading {}".format(service))
service = register_service(service_path)
try:
with open(os.path.join(service_path, ARGS_JSON)) as f:
service_args = json.loads(f.read())
except IOError as exc:
logger.debug(str(exc), exc_info=True)
raise click.ClickException("Cannot load service args, are you sure server is running?")
# get our service class instance
service_module = get_service_module(service_path)
service_obj = service_module.service_class(alert_types=service.alert_types, service_args=service_args)
# prepare runner
runner = myRunner(service_obj,
pidfile=service_path + ".pid",
stdout=open(os.path.join(service_path, "stdout.log"), "ab"),
stderr=open(os.path.join(service_path, "stderr.log"), "ab"))
click.secho("[*] Stopping {}".format(service.name))
try:
runner._stop()
except daemon.runner.DaemonRunnerStopFailureError as exc:
logger.debug(str(exc), exc_info=True)
raise click.ClickException("Unable to stop service, are you sure it is running?") |
def check_cell_type(cell, cell_type):
'''
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
'''
if cell_type == None or cell_type == type(None):
return cell == None or (isinstance(cell, basestring) and not cell)
else:
return isinstance(cell, cell_type) | Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match. | Below is the the instruction that describes the task:
### Input:
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
### Response:
def check_cell_type(cell, cell_type):
'''
Checks the cell type to see if it represents the cell_type passed in.
Args:
cell_type: The type id for a cell match or None for empty match.
'''
if cell_type == None or cell_type == type(None):
return cell == None or (isinstance(cell, basestring) and not cell)
else:
return isinstance(cell, cell_type) |
def _split_params(tag_prefix, tag_suffix):
"Split comma-separated tag_suffix[:-1] and map with _maybe_int"
if tag_suffix[-1:] != ')':
raise ValueError, "unbalanced parenthesis in type %s%s" % (tag_prefix, tag_suffix)
return map(_maybe_int, tag_suffix[:-1].split(',')) | Split comma-separated tag_suffix[:-1] and map with _maybe_int | Below is the the instruction that describes the task:
### Input:
Split comma-separated tag_suffix[:-1] and map with _maybe_int
### Response:
def _split_params(tag_prefix, tag_suffix):
"Split comma-separated tag_suffix[:-1] and map with _maybe_int"
if tag_suffix[-1:] != ')':
raise ValueError, "unbalanced parenthesis in type %s%s" % (tag_prefix, tag_suffix)
return map(_maybe_int, tag_suffix[:-1].split(',')) |
def weather_at_place(self, name):
"""
Queries the OWM Weather API for the currently observed weather at the
specified toponym (eg: "London,uk")
:param name: the location's toponym
:type name: str or unicode
:returns: an *Observation* instance or ``None`` if no weather data is
available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed or *APICallException* when OWM Weather API can not be
reached
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
params = {'q': encoded_name, 'lang': self._language}
uri = http_client.HttpClient.to_url(OBSERVATION_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation'].parse_JSON(json_data) | Queries the OWM Weather API for the currently observed weather at the
specified toponym (eg: "London,uk")
:param name: the location's toponym
:type name: str or unicode
:returns: an *Observation* instance or ``None`` if no weather data is
available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed or *APICallException* when OWM Weather API can not be
reached | Below is the the instruction that describes the task:
### Input:
Queries the OWM Weather API for the currently observed weather at the
specified toponym (eg: "London,uk")
:param name: the location's toponym
:type name: str or unicode
:returns: an *Observation* instance or ``None`` if no weather data is
available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed or *APICallException* when OWM Weather API can not be
reached
### Response:
def weather_at_place(self, name):
"""
Queries the OWM Weather API for the currently observed weather at the
specified toponym (eg: "London,uk")
:param name: the location's toponym
:type name: str or unicode
:returns: an *Observation* instance or ``None`` if no weather data is
available
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed or *APICallException* when OWM Weather API can not be
reached
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
params = {'q': encoded_name, 'lang': self._language}
uri = http_client.HttpClient.to_url(OBSERVATION_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
return self._parsers['observation'].parse_JSON(json_data) |
def _validate_profile(self, bag):
"""
Validate against OCRD BagIt profile (bag-info fields, algos etc)
"""
if not self.profile_validator.validate(bag):
raise Exception(str(self.profile_validator.report)) | Validate against OCRD BagIt profile (bag-info fields, algos etc) | Below is the the instruction that describes the task:
### Input:
Validate against OCRD BagIt profile (bag-info fields, algos etc)
### Response:
def _validate_profile(self, bag):
"""
Validate against OCRD BagIt profile (bag-info fields, algos etc)
"""
if not self.profile_validator.validate(bag):
raise Exception(str(self.profile_validator.report)) |
def _setitem_via_pathlist(external_dict,path_list,value,**kwargs):
'''
y = {'c': {'b': {}}}
_setitem_via_pathlist(y,['c','b'],200)
'''
if('s2n' in kwargs):
s2n = kwargs['s2n']
else:
s2n = 0
if('n2s' in kwargs):
n2s = kwargs['n2s']
else:
n2s = 0
this = external_dict
for i in range(0,path_list.__len__()-1):
key = path_list[i]
if(n2s ==1):
key = str(key)
if(s2n==1):
try:
int(key)
except:
pass
else:
key = int(key)
this = this.__getitem__(key)
this.__setitem__(path_list[-1],value)
return(external_dict) | y = {'c': {'b': {}}}
_setitem_via_pathlist(y,['c','b'],200) | Below is the the instruction that describes the task:
### Input:
y = {'c': {'b': {}}}
_setitem_via_pathlist(y,['c','b'],200)
### Response:
def _setitem_via_pathlist(external_dict,path_list,value,**kwargs):
'''
y = {'c': {'b': {}}}
_setitem_via_pathlist(y,['c','b'],200)
'''
if('s2n' in kwargs):
s2n = kwargs['s2n']
else:
s2n = 0
if('n2s' in kwargs):
n2s = kwargs['n2s']
else:
n2s = 0
this = external_dict
for i in range(0,path_list.__len__()-1):
key = path_list[i]
if(n2s ==1):
key = str(key)
if(s2n==1):
try:
int(key)
except:
pass
else:
key = int(key)
this = this.__getitem__(key)
this.__setitem__(path_list[-1],value)
return(external_dict) |
def bill(request, abbr, session, bill_id):
'''
Context:
- vote_preview_row_template
- abbr
- metadata
- bill
- show_all_sponsors
- sponsors
- sources
- nav_active
Templates:
- billy/web/public/bill.html
- billy/web/public/vote_preview_row.html
'''
# get fixed version
fixed_bill_id = fix_bill_id(bill_id)
# redirect if URL's id isn't fixed id without spaces
if fixed_bill_id.replace(' ', '') != bill_id:
return redirect('bill', abbr=abbr, session=session, bill_id=fixed_bill_id.replace(' ', ''))
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': session,
'bill_id': fixed_bill_id})
if bill is None:
raise Http404(u'no bill found {0} {1} {2}'.format(abbr, session, bill_id))
show_all_sponsors = request.GET.get('show_all_sponsors')
if show_all_sponsors:
sponsors = bill.sponsors_manager
else:
sponsors = bill.sponsors_manager.first_fifteen
return render(
request, templatename('bill'),
dict(vote_preview_row_template=templatename('vote_preview_row'),
abbr=abbr,
metadata=Metadata.get_object(abbr),
bill=bill,
show_all_sponsors=show_all_sponsors,
sponsors=sponsors,
sources=bill['sources'],
nav_active='bills')) | Context:
- vote_preview_row_template
- abbr
- metadata
- bill
- show_all_sponsors
- sponsors
- sources
- nav_active
Templates:
- billy/web/public/bill.html
- billy/web/public/vote_preview_row.html | Below is the the instruction that describes the task:
### Input:
Context:
- vote_preview_row_template
- abbr
- metadata
- bill
- show_all_sponsors
- sponsors
- sources
- nav_active
Templates:
- billy/web/public/bill.html
- billy/web/public/vote_preview_row.html
### Response:
def bill(request, abbr, session, bill_id):
'''
Context:
- vote_preview_row_template
- abbr
- metadata
- bill
- show_all_sponsors
- sponsors
- sources
- nav_active
Templates:
- billy/web/public/bill.html
- billy/web/public/vote_preview_row.html
'''
# get fixed version
fixed_bill_id = fix_bill_id(bill_id)
# redirect if URL's id isn't fixed id without spaces
if fixed_bill_id.replace(' ', '') != bill_id:
return redirect('bill', abbr=abbr, session=session, bill_id=fixed_bill_id.replace(' ', ''))
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr, 'session': session,
'bill_id': fixed_bill_id})
if bill is None:
raise Http404(u'no bill found {0} {1} {2}'.format(abbr, session, bill_id))
show_all_sponsors = request.GET.get('show_all_sponsors')
if show_all_sponsors:
sponsors = bill.sponsors_manager
else:
sponsors = bill.sponsors_manager.first_fifteen
return render(
request, templatename('bill'),
dict(vote_preview_row_template=templatename('vote_preview_row'),
abbr=abbr,
metadata=Metadata.get_object(abbr),
bill=bill,
show_all_sponsors=show_all_sponsors,
sponsors=sponsors,
sources=bill['sources'],
nav_active='bills')) |
def make_safe_url(self, url):
"""Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
"""
# Split the URL into scheme, netloc, path, query and fragment
parts = list(urlsplit(url))
# Clear scheme and netloc and rebuild URL
parts[0] = '' # Empty scheme
parts[1] = '' # Empty netloc (hostname:port)
safe_url = urlunsplit(parts)
return safe_url | Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames. | Below is the the instruction that describes the task:
### Input:
Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
### Response:
def make_safe_url(self, url):
"""Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
"""
# Split the URL into scheme, netloc, path, query and fragment
parts = list(urlsplit(url))
# Clear scheme and netloc and rebuild URL
parts[0] = '' # Empty scheme
parts[1] = '' # Empty netloc (hostname:port)
safe_url = urlunsplit(parts)
return safe_url |
def send_location(self, geo_uri, name, thumb_url=None, **thumb_info):
"""Send a location to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location
for thumb_info
Args:
geo_uri (str): The geo uri representing the location.
name (str): Description for the location.
thumb_url (str): URL to the thumbnail of the location.
thumb_info (): Metadata about the thumbnail, type ImageInfo.
"""
return self.client.api.send_location(self.room_id, geo_uri, name,
thumb_url, thumb_info) | Send a location to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location
for thumb_info
Args:
geo_uri (str): The geo uri representing the location.
name (str): Description for the location.
thumb_url (str): URL to the thumbnail of the location.
thumb_info (): Metadata about the thumbnail, type ImageInfo. | Below is the the instruction that describes the task:
### Input:
Send a location to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location
for thumb_info
Args:
geo_uri (str): The geo uri representing the location.
name (str): Description for the location.
thumb_url (str): URL to the thumbnail of the location.
thumb_info (): Metadata about the thumbnail, type ImageInfo.
### Response:
def send_location(self, geo_uri, name, thumb_url=None, **thumb_info):
"""Send a location to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location
for thumb_info
Args:
geo_uri (str): The geo uri representing the location.
name (str): Description for the location.
thumb_url (str): URL to the thumbnail of the location.
thumb_info (): Metadata about the thumbnail, type ImageInfo.
"""
return self.client.api.send_location(self.room_id, geo_uri, name,
thumb_url, thumb_info) |
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
See: http://developer.github.com/v3/repos/contents/#get-archive-link
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
"""
resp = None
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, stream=True)
if resp and self._boolean(resp, 200, 404):
stream_response_to_file(resp, path)
return True
return False | Get the tarball or zipball archive for this repo at ref.
See: http://developer.github.com/v3/repos/contents/#get-archive-link
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise | Below is the the instruction that describes the task:
### Input:
Get the tarball or zipball archive for this repo at ref.
See: http://developer.github.com/v3/repos/contents/#get-archive-link
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
### Response:
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
See: http://developer.github.com/v3/repos/contents/#get-archive-link
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
"""
resp = None
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, stream=True)
if resp and self._boolean(resp, 200, 404):
stream_response_to_file(resp, path)
return True
return False |
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng) | Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy | Below is the the instruction that describes the task:
### Input:
Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
### Response:
def copyVarStatesFrom(self, particleState, varNames):
"""Copy specific variables from particleState into this particle.
Parameters:
--------------------------------------------------------------
particleState: dict produced by a particle's getState() method
varNames: which variables to copy
"""
# Set this to false if you don't want the variable to move anymore
# after we set the state
allowedToMove = True
for varName in particleState['varStates']:
if varName in varNames:
# If this particle doesn't include this field, don't copy it
if varName not in self.permuteVars:
continue
# Set the best position to the copied position
state = copy.deepcopy(particleState['varStates'][varName])
state['_position'] = state['position']
state['bestPosition'] = state['position']
if not allowedToMove:
state['velocity'] = 0
# Set the state now
self.permuteVars[varName].setState(state)
if allowedToMove:
# Let the particle move in both directions from the best position
# it found previously and set it's initial velocity to a known
# fraction of the total distance.
self.permuteVars[varName].resetVelocity(self._rng) |
def rootnode_solver(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard', smooth='energy',
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
max_levels=10, max_coarse=10,
diagonal_dominance=False, keep=False, **kwargs):
"""Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry is 'nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a "root-node" or "center-node" associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR",
SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix, \
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if (symmetry != 'symmetric') and (symmetry != 'hermitian') and \
(symmetry != 'nonsymmetric'):
raise ValueError('expected \'symmetric\', \'nonsymmetric\' \
or \'hermitian\' for the symmetry parameter ')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Right near nullspace candidates use constant for each variable as default
if B is None:
B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
np.eye(blocksize(A)))
else:
B = np.asarray(B, dtype=A.dtype)
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes B have incorrect \
dimensions for matrix A')
if B.shape[1] < blocksize(A):
raise ValueError('B.shape[1] must be >= the blocksize of A')
# Left near nullspace candidates
if A.symmetry == 'nonsymmetric':
if BH is None:
BH = B.copy()
else:
BH = np.asarray(BH, dtype=A.dtype)
if len(BH.shape) == 1:
BH = BH.reshape(-1, 1)
if BH.shape[1] != B.shape[1]:
raise ValueError('The number of left and right near \
null-space modes B and BH, must be equal')
if BH.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes BH have \
incorrect dimensions for matrix A')
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
improve_candidates =\
levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Construct multilevel structure
levels = []
levels.append(multilevel_solver.level())
levels[-1].A = A # matrix
# Append near nullspace candidates
levels[-1].B = B # right candidates
if A.symmetry == 'nonsymmetric':
levels[-1].BH = BH # left candidates
while len(levels) < max_levels and \
int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
extend_hierarchy(levels, strength, aggregate, smooth,
improve_candidates, diagonal_dominance, keep)
ml = multilevel_solver(levels, **kwargs)
change_smoothers(ml, presmoother, postsmoother)
return ml | Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry is 'nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a "root-node" or "center-node" associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011. | Below is the the instruction that describes the task:
### Input:
Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry is 'nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a "root-node" or "center-node" associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
### Response:
def rootnode_solver(A, B=None, BH=None,
symmetry='hermitian', strength='symmetric',
aggregate='standard', smooth='energy',
presmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
postsmoother=('block_gauss_seidel',
{'sweep': 'symmetric'}),
improve_candidates=('block_gauss_seidel',
{'sweep': 'symmetric',
'iterations': 4}),
max_levels=10, max_coarse=10,
diagonal_dominance=False, keep=False, **kwargs):
"""Create a multilevel solver using root-node based Smoothed Aggregation (SA).
See the notes below, for the major differences with the classical-style
smoothed aggregation solver in aggregation.smoothed_aggregation_solver.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix in CSR or BSR format
B : None, array_like
Right near-nullspace candidates stored in the columns of an NxK array.
K must be >= the blocksize of A (see reference [2011OlScTu]_). The default value
B=None is equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
BH : None, array_like
Left near-nullspace candidates stored in the columns of an NxK array.
BH is only used if symmetry is 'nonsymmetric'. K must be >= the
blocksize of A (see reference [2011OlScTu]_). The default value B=None is
equivalent to choosing the constant over each block-variable,
B=np.kron(np.ones((A.shape[0]/blocksize(A), 1)), np.eye(blocksize(A)))
symmetry : string
'symmetric' refers to both real and complex symmetric
'hermitian' refers to both complex Hermitian and real Hermitian
'nonsymmetric' i.e. nonsymmetric in a hermitian sense
Note that for the strictly real case, symmetric and hermitian are
the same
Note that this flag does not denote definiteness of the operator.
strength : list
Method used to determine the strength of connection between unknowns of
the linear system. Method-specific parameters may be passed in using a
tuple, e.g. strength=('symmetric',{'theta' : 0.25 }). If strength=None,
all nonzero entries of the matrix are considered strong.
aggregate : list
Method used to aggregate nodes.
smooth : list
Method used to smooth the tentative prolongator. Method-specific
parameters may be passed in using a tuple, e.g. smooth=
('energy',{'krylov' : 'gmres'}). Only 'energy' and None are valid
prolongation smoothing options.
presmoother : tuple, string, list
Defines the presmoother for the multilevel cycling. The default block
Gauss-Seidel option defaults to point-wise Gauss-Seidel, if the matrix
is CSR or is a BSR matrix with blocksize of 1. See notes below for
varying this parameter on a per level basis.
postsmoother : tuple, string, list
Same as presmoother, except defines the postsmoother.
improve_candidates : tuple, string, list
The ith entry defines the method used to improve the candidates B on
level i. If the list is shorter than max_levels, then the last entry
will define the method for all levels lower. If tuple or string, then
this single relaxation descriptor defines improve_candidates on all
levels.
The list elements are relaxation descriptors of the form used for
presmoother and postsmoother. A value of None implies no action on B.
max_levels : integer
Maximum number of levels to be used in the multilevel solver.
max_coarse : integer
Maximum number of variables permitted on the coarse grid.
diagonal_dominance : bool, tuple
If True (or the first tuple entry is True), then avoid coarsening
diagonally dominant rows. The second tuple entry requires a
dictionary, where the key value 'theta' is used to tune the diagonal
dominance threshold.
keep : bool
Flag to indicate keeping extra operators in the hierarchy for
diagnostics. For example, if True, then strength of connection (C),
tentative prolongation (T), aggregation (AggOp), and arrays
storing the C-points (Cpts) and F-points (Fpts) are kept at
each level.
Other Parameters
----------------
cycle_type : ['V','W','F']
Structrure of multigrid cycle
coarse_solver : ['splu', 'lu', 'cholesky, 'pinv', 'gauss_seidel', ... ]
Solver used at the coarsest level of the MG hierarchy.
Optionally, may be a tuple (fn, args), where fn is a string such as
['splu', 'lu', ...] or a callable function, and args is a dictionary of
arguments to be passed to fn.
Returns
-------
ml : multilevel_solver
Multigrid hierarchy of matrices and prolongation operators
See Also
--------
multilevel_solver, aggregation.smoothed_aggregation_solver,
classical.ruge_stuben_solver
Notes
-----
- Root-node style SA differs from classical SA primarily by preserving
and identity block in the interpolation operator, P. Each aggregate
has a "root-node" or "center-node" associated with it, and this
root-node is injected from the coarse grid to the fine grid. The
injection corresponds to the identity block.
- Only smooth={'energy', None} is supported for prolongation
smoothing. See reference [2011OlScTu]_ below for more details on why the
'energy' prolongation smoother is the natural counterpart to
root-node style SA.
- The additional parameters are passed through as arguments to
multilevel_solver. Refer to pyamg.multilevel_solver for additional
documentation.
- At each level, four steps are executed in order to define the coarser
level operator.
1. Matrix A is given and used to derive a strength matrix, C.
2. Based on the strength matrix, indices are grouped or aggregated.
3. The aggregates define coarse nodes and a tentative prolongation
operator T is defined by injection
4. The tentative prolongation operator is smoothed by a relaxation
scheme to improve the quality and extent of interpolation from the
aggregates to fine nodes.
- The parameters smooth, strength, aggregate, presmoother, postsmoother
can be varied on a per level basis. For different methods on
different levels, use a list as input so that the i-th entry defines
the method at the i-th level. If there are more levels in the
hierarchy than list entries, the last entry will define the method
for all levels lower.
Examples are:
smooth=[('jacobi', {'omega':1.0}), None, 'jacobi']
presmoother=[('block_gauss_seidel', {'sweep':symmetric}), 'sor']
aggregate=['standard', 'naive']
strength=[('symmetric', {'theta':0.25}), ('symmetric', {'theta':0.08})]
- Predefined strength of connection and aggregation schemes can be
specified. These options are best used together, but aggregation can
be predefined while strength of connection is not.
For predefined strength of connection, use a list consisting of
tuples of the form ('predefined', {'C' : C0}), where C0 is a
csr_matrix and each degree-of-freedom in C0 represents a supernode.
For instance to predefine a three-level hierarchy, use
[('predefined', {'C' : C0}), ('predefined', {'C' : C1}) ].
Similarly for predefined aggregation, use a list of tuples. For
instance to predefine a three-level hierarchy, use [('predefined',
{'AggOp' : Agg0}), ('predefined', {'AggOp' : Agg1}) ], where the
dimensions of A, Agg0 and Agg1 are compatible, i.e. Agg0.shape[1] ==
A.shape[0] and Agg1.shape[1] == Agg0.shape[0]. Each AggOp is a
csr_matrix.
Because this is a root-nodes solver, if a member of the predefined
aggregation list is predefined, it must be of the form
('predefined', {'AggOp' : Agg, 'Cnodes' : Cnodes}).
Examples
--------
>>> from pyamg import rootnode_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import numpy as np
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = np.ones((A.shape[0])) # RHS
>>> ml = rootnode_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
References
----------
.. [1996VaMa] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
.. [2011OlScTu] Olson, L. and Schroder, J. and Tuminaro, R.,
"A general interpolation strategy for algebraic
multigrid using energy minimization", SIAM Journal
on Scientific Computing (SISC), vol. 33, pp.
966--991, 2011.
"""
if not (isspmatrix_csr(A) or isspmatrix_bsr(A)):
try:
A = csr_matrix(A)
warn("Implicit conversion of A to CSR",
SparseEfficiencyWarning)
except BaseException:
raise TypeError('Argument A must have type csr_matrix, \
bsr_matrix, or be convertible to csr_matrix')
A = A.asfptype()
if (symmetry != 'symmetric') and (symmetry != 'hermitian') and \
(symmetry != 'nonsymmetric'):
raise ValueError('expected \'symmetric\', \'nonsymmetric\' \
or \'hermitian\' for the symmetry parameter ')
A.symmetry = symmetry
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
# Right near nullspace candidates use constant for each variable as default
if B is None:
B = np.kron(np.ones((int(A.shape[0]/blocksize(A)), 1), dtype=A.dtype),
np.eye(blocksize(A)))
else:
B = np.asarray(B, dtype=A.dtype)
if len(B.shape) == 1:
B = B.reshape(-1, 1)
if B.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes B have incorrect \
dimensions for matrix A')
if B.shape[1] < blocksize(A):
raise ValueError('B.shape[1] must be >= the blocksize of A')
# Left near nullspace candidates
if A.symmetry == 'nonsymmetric':
if BH is None:
BH = B.copy()
else:
BH = np.asarray(BH, dtype=A.dtype)
if len(BH.shape) == 1:
BH = BH.reshape(-1, 1)
if BH.shape[1] != B.shape[1]:
raise ValueError('The number of left and right near \
null-space modes B and BH, must be equal')
if BH.shape[0] != A.shape[0]:
raise ValueError('The near null-space modes BH have \
incorrect dimensions for matrix A')
# Levelize the user parameters, so that they become lists describing the
# desired user option on each level.
max_levels, max_coarse, strength =\
levelize_strength_or_aggregation(strength, max_levels, max_coarse)
max_levels, max_coarse, aggregate =\
levelize_strength_or_aggregation(aggregate, max_levels, max_coarse)
improve_candidates =\
levelize_smooth_or_improve_candidates(improve_candidates, max_levels)
smooth = levelize_smooth_or_improve_candidates(smooth, max_levels)
# Construct multilevel structure
levels = []
levels.append(multilevel_solver.level())
levels[-1].A = A # matrix
# Append near nullspace candidates
levels[-1].B = B # right candidates
if A.symmetry == 'nonsymmetric':
levels[-1].BH = BH # left candidates
while len(levels) < max_levels and \
int(levels[-1].A.shape[0]/blocksize(levels[-1].A)) > max_coarse:
extend_hierarchy(levels, strength, aggregate, smooth,
improve_candidates, diagonal_dominance, keep)
ml = multilevel_solver(levels, **kwargs)
change_smoothers(ml, presmoother, postsmoother)
return ml |
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch) | Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction. | Below is the the instruction that describes the task:
### Input:
Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
### Response:
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch) |
def fit_mle(self,
init_vals,
print_res=True,
method="BFGS",
loss_tol=1e-06,
gradient_tol=1e-06,
maxiter=1000,
ridge=None,
*args):
"""
Parameters
----------
init_vals : 1D ndarray.
The initial values to start the optimizatin process with. There
should be one value for each utility coefficient, outside intercept
parameter, shape parameter, and nest parameter being estimated.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
method : str, optional.
Should be a valid string which can be passed to
scipy.optimize.minimize. Determines the optimization algorithm
which is used for this problem.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next which is needed to determine
convergence. Default == 1e-06.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default == 1e-06.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If an int,
float or long is passed, then that scalar determines the ridge
penalty for the optimization. Default == None.
Returns
-------
None. Saves estimation results to the model instance.
"""
msg = "This model class' fit_mle method has not been constructed."
raise NotImplementedError(msg) | Parameters
----------
init_vals : 1D ndarray.
The initial values to start the optimizatin process with. There
should be one value for each utility coefficient, outside intercept
parameter, shape parameter, and nest parameter being estimated.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
method : str, optional.
Should be a valid string which can be passed to
scipy.optimize.minimize. Determines the optimization algorithm
which is used for this problem.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next which is needed to determine
convergence. Default == 1e-06.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default == 1e-06.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If an int,
float or long is passed, then that scalar determines the ridge
penalty for the optimization. Default == None.
Returns
-------
None. Saves estimation results to the model instance. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
init_vals : 1D ndarray.
The initial values to start the optimizatin process with. There
should be one value for each utility coefficient, outside intercept
parameter, shape parameter, and nest parameter being estimated.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
method : str, optional.
Should be a valid string which can be passed to
scipy.optimize.minimize. Determines the optimization algorithm
which is used for this problem.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next which is needed to determine
convergence. Default == 1e-06.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default == 1e-06.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If an int,
float or long is passed, then that scalar determines the ridge
penalty for the optimization. Default == None.
Returns
-------
None. Saves estimation results to the model instance.
### Response:
def fit_mle(self,
init_vals,
print_res=True,
method="BFGS",
loss_tol=1e-06,
gradient_tol=1e-06,
maxiter=1000,
ridge=None,
*args):
"""
Parameters
----------
init_vals : 1D ndarray.
The initial values to start the optimizatin process with. There
should be one value for each utility coefficient, outside intercept
parameter, shape parameter, and nest parameter being estimated.
print_res : bool, optional.
Determines whether the timing and initial and final log likelihood
results will be printed as they they are determined.
method : str, optional.
Should be a valid string which can be passed to
scipy.optimize.minimize. Determines the optimization algorithm
which is used for this problem.
loss_tol : float, optional.
Determines the tolerance on the difference in objective function
values from one iteration to the next which is needed to determine
convergence. Default == 1e-06.
gradient_tol : float, optional.
Determines the tolerance on the difference in gradient values from
one iteration to the next which is needed to determine convergence.
Default == 1e-06.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If an int,
float or long is passed, then that scalar determines the ridge
penalty for the optimization. Default == None.
Returns
-------
None. Saves estimation results to the model instance.
"""
msg = "This model class' fit_mle method has not been constructed."
raise NotImplementedError(msg) |
def kind_name(self):
"e.g. 'Gig' or 'Movie'."
return {k:v for (k,v) in self.KIND_CHOICES}[self.kind] | e.g. 'Gig' or 'Movie'. | Below is the the instruction that describes the task:
### Input:
e.g. 'Gig' or 'Movie'.
### Response:
def kind_name(self):
"e.g. 'Gig' or 'Movie'."
return {k:v for (k,v) in self.KIND_CHOICES}[self.kind] |
def historical_rates(self, date, symbols=None):
"""
Get historical rates for any day since `date`.
:param date: a date
:type date: date or str
:param symbols: currency symbols to request specific exchange rates.
:type symbols: list or tuple
:return: the historical rates for any day since `date`.
:rtype: dict
:raises FixerioException: if any error making a request.
"""
try:
if isinstance(date, datetime.date):
# Convert date to ISO 8601 format.
date = date.isoformat()
symbols = symbols or self.symbols
payload = self._create_payload(symbols)
url = BASE_URL + date
response = requests.get(url, params=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as ex:
raise FixerioException(str(ex)) | Get historical rates for any day since `date`.
:param date: a date
:type date: date or str
:param symbols: currency symbols to request specific exchange rates.
:type symbols: list or tuple
:return: the historical rates for any day since `date`.
:rtype: dict
:raises FixerioException: if any error making a request. | Below is the the instruction that describes the task:
### Input:
Get historical rates for any day since `date`.
:param date: a date
:type date: date or str
:param symbols: currency symbols to request specific exchange rates.
:type symbols: list or tuple
:return: the historical rates for any day since `date`.
:rtype: dict
:raises FixerioException: if any error making a request.
### Response:
def historical_rates(self, date, symbols=None):
"""
Get historical rates for any day since `date`.
:param date: a date
:type date: date or str
:param symbols: currency symbols to request specific exchange rates.
:type symbols: list or tuple
:return: the historical rates for any day since `date`.
:rtype: dict
:raises FixerioException: if any error making a request.
"""
try:
if isinstance(date, datetime.date):
# Convert date to ISO 8601 format.
date = date.isoformat()
symbols = symbols or self.symbols
payload = self._create_payload(symbols)
url = BASE_URL + date
response = requests.get(url, params=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as ex:
raise FixerioException(str(ex)) |
def get_tunnel_statistics_input_filter_type_filter_by_gateway_gw_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_statistics = ET.Element("get_tunnel_statistics")
config = get_tunnel_statistics
input = ET.SubElement(get_tunnel_statistics, "input")
filter_type = ET.SubElement(input, "filter-type")
filter_by_gateway = ET.SubElement(filter_type, "filter-by-gateway")
gw_name = ET.SubElement(filter_by_gateway, "gw-name")
gw_name.text = kwargs.pop('gw_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_tunnel_statistics_input_filter_type_filter_by_gateway_gw_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_statistics = ET.Element("get_tunnel_statistics")
config = get_tunnel_statistics
input = ET.SubElement(get_tunnel_statistics, "input")
filter_type = ET.SubElement(input, "filter-type")
filter_by_gateway = ET.SubElement(filter_type, "filter-by-gateway")
gw_name = ET.SubElement(filter_by_gateway, "gw-name")
gw_name.text = kwargs.pop('gw_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def doExperiment(locationModuleWidth,
bumpType,
cellCoordinateOffsets,
initialIncrement,
minAccuracy,
capacityResolution,
capacityPercentageResolution,
featuresPerObject,
objectWidth,
numFeatures,
featureDistribution,
useTrace,
noiseFactor,
moduleNoiseFactor,
numModules,
thresholds,
seed1,
seed2,
anchoringMethod):
"""
Finds the capacity of the specified model and object configuration. The
algorithm has two stages. First it finds an upper bound for the capacity by
repeatedly incrementing the number of objects by initialIncrement. After it
finds a number of objects that is above capacity, it begins the second stage:
performing a binary search over the number of objects to find an exact
capacity.
@param initialIncrement (int)
For example, if this number is 128, this method will test 128 objects, then
256, and so on, until it finds an upper bound, then it will narrow in on the
breaking point. This number can't be incorrect, but the simulation will be
inefficient if it's too low or too high.
@param capacityResolution (int)
The resolution of the capacity. If capacityResolution=1, this method will find
the exact capacity. If the capacityResolution is higher, the method will
return a capacity that is potentially less than the actual capacity.
@param capacityPercentageResolution (float)
An alternate way of specifying the resolution. For example, if
capacityPercentageResolution=0.01, if the actual capacity is 3020 and this
method has reached 3000, it will stop searching, because the next increment
will be less than 1% of 3000.
@param minAccuracy (float)
The recognition success rate that the model must achieve.
"""
if not os.path.exists("traces"):
os.makedirs("traces")
if seed1 != -1:
np.random.seed(seed1)
if seed2 != -1:
random.seed(seed2)
features = [str(i) for i in xrange(numFeatures)]
locationConfigs = []
scale = 40.0
if thresholds == -1:
thresholds = int(math.ceil(numModules * 0.8))
elif thresholds == 0:
thresholds = numModules
perModRange = float((90.0 if bumpType == "square" else 60.0) /
float(numModules))
for i in xrange(numModules):
orientation = (float(i) * perModRange) + (perModRange / 2.0)
config = {
"cellsPerAxis": locationModuleWidth,
"scale": scale,
"orientation": np.radians(orientation),
"activationThreshold": 8,
"initialPermanence": 1.0,
"connectedPermanence": 0.5,
"learningThreshold": 8,
"sampleSize": 10,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.0,
}
if bumpType == "square":
config["cellCoordinateOffsets"] = cellCoordinateOffsets
config["anchoringMethod"] = anchoringMethod
elif bumpType == "gaussian":
config["bumpOverlapMethod"] = "probabilistic"
config["baselineCellsPerAxis"] = 6
else:
raise ValueError("Invalid bumpType", bumpType)
locationConfigs.append(config)
l4Overrides = {
"initialPermanence": 1.0,
"activationThreshold": thresholds,
"reducedBasalThreshold": thresholds,
"minThreshold": numModules,
"sampleSize": numModules,
"cellsPerColumn": 16,
}
numObjects = 0
accuracy = None
allLocationsAreUnique = None
occurrencesConvergenceLog = []
increment = initialIncrement
foundUpperBound = False
while True:
currentNumObjects = numObjects + increment
numFailuresAllowed = currentNumObjects * (1 - minAccuracy)
print "Testing", currentNumObjects
objects = generateObjects(currentNumObjects, featuresPerObject, objectWidth,
numFeatures, featureDistribution)
column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides,
bumpType=bumpType)
exp = PIUNExperiment(column, featureNames=features,
numActiveMinicolumns=10,
noiseFactor=noiseFactor,
moduleNoiseFactor=moduleNoiseFactor)
currentLocsUnique = True
for objectDescription in objects:
objLocsUnique = exp.learnObject(objectDescription)
currentLocsUnique = currentLocsUnique and objLocsUnique
numFailures = 0
try:
if useTrace:
filename = os.path.join(
SCRIPT_DIR,
"traces/capacity-{}-points-{}-cells-{}-objects-{}-feats.html".format(
len(cellCoordinateOffsets)**2, exp.column.L6aModules[0].numberOfCells(),
currentNumObjects, numFeatures)
)
traceFileOut = io.open(filename, "w", encoding="utf8")
traceHandle = trace(traceFileOut, exp, includeSynapses=False)
print "Logging to", filename
for objectDescription in objects:
numSensationsToInference = exp.inferObjectWithRandomMovements(
objectDescription)
if numSensationsToInference is None:
numFailures += 1
if numFailures > numFailuresAllowed:
break
finally:
if useTrace:
traceHandle.__exit__()
traceFileOut.close()
if numFailures < numFailuresAllowed:
numObjects = currentNumObjects
accuracy = float(currentNumObjects - numFailures) / currentNumObjects
allLocationsAreUnique = currentLocsUnique
else:
foundUpperBound = True
if foundUpperBound:
increment /= 2
goalResolution = capacityResolution
if capacityPercentageResolution > 0:
goalResolution = min(goalResolution,
capacityPercentageResolution*numObjects)
if increment < goalResolution:
break
result = {
"numObjects": numObjects,
"accuracy": accuracy,
"allLocationsAreUnique": allLocationsAreUnique,
}
print result
return result | Finds the capacity of the specified model and object configuration. The
algorithm has two stages. First it finds an upper bound for the capacity by
repeatedly incrementing the number of objects by initialIncrement. After it
finds a number of objects that is above capacity, it begins the second stage:
performing a binary search over the number of objects to find an exact
capacity.
@param initialIncrement (int)
For example, if this number is 128, this method will test 128 objects, then
256, and so on, until it finds an upper bound, then it will narrow in on the
breaking point. This number can't be incorrect, but the simulation will be
inefficient if it's too low or too high.
@param capacityResolution (int)
The resolution of the capacity. If capacityResolution=1, this method will find
the exact capacity. If the capacityResolution is higher, the method will
return a capacity that is potentially less than the actual capacity.
@param capacityPercentageResolution (float)
An alternate way of specifying the resolution. For example, if
capacityPercentageResolution=0.01, if the actual capacity is 3020 and this
method has reached 3000, it will stop searching, because the next increment
will be less than 1% of 3000.
@param minAccuracy (float)
The recognition success rate that the model must achieve. | Below is the the instruction that describes the task:
### Input:
Finds the capacity of the specified model and object configuration. The
algorithm has two stages. First it finds an upper bound for the capacity by
repeatedly incrementing the number of objects by initialIncrement. After it
finds a number of objects that is above capacity, it begins the second stage:
performing a binary search over the number of objects to find an exact
capacity.
@param initialIncrement (int)
For example, if this number is 128, this method will test 128 objects, then
256, and so on, until it finds an upper bound, then it will narrow in on the
breaking point. This number can't be incorrect, but the simulation will be
inefficient if it's too low or too high.
@param capacityResolution (int)
The resolution of the capacity. If capacityResolution=1, this method will find
the exact capacity. If the capacityResolution is higher, the method will
return a capacity that is potentially less than the actual capacity.
@param capacityPercentageResolution (float)
An alternate way of specifying the resolution. For example, if
capacityPercentageResolution=0.01, if the actual capacity is 3020 and this
method has reached 3000, it will stop searching, because the next increment
will be less than 1% of 3000.
@param minAccuracy (float)
The recognition success rate that the model must achieve.
### Response:
def doExperiment(locationModuleWidth,
bumpType,
cellCoordinateOffsets,
initialIncrement,
minAccuracy,
capacityResolution,
capacityPercentageResolution,
featuresPerObject,
objectWidth,
numFeatures,
featureDistribution,
useTrace,
noiseFactor,
moduleNoiseFactor,
numModules,
thresholds,
seed1,
seed2,
anchoringMethod):
"""
Finds the capacity of the specified model and object configuration. The
algorithm has two stages. First it finds an upper bound for the capacity by
repeatedly incrementing the number of objects by initialIncrement. After it
finds a number of objects that is above capacity, it begins the second stage:
performing a binary search over the number of objects to find an exact
capacity.
@param initialIncrement (int)
For example, if this number is 128, this method will test 128 objects, then
256, and so on, until it finds an upper bound, then it will narrow in on the
breaking point. This number can't be incorrect, but the simulation will be
inefficient if it's too low or too high.
@param capacityResolution (int)
The resolution of the capacity. If capacityResolution=1, this method will find
the exact capacity. If the capacityResolution is higher, the method will
return a capacity that is potentially less than the actual capacity.
@param capacityPercentageResolution (float)
An alternate way of specifying the resolution. For example, if
capacityPercentageResolution=0.01, if the actual capacity is 3020 and this
method has reached 3000, it will stop searching, because the next increment
will be less than 1% of 3000.
@param minAccuracy (float)
The recognition success rate that the model must achieve.
"""
if not os.path.exists("traces"):
os.makedirs("traces")
if seed1 != -1:
np.random.seed(seed1)
if seed2 != -1:
random.seed(seed2)
features = [str(i) for i in xrange(numFeatures)]
locationConfigs = []
scale = 40.0
if thresholds == -1:
thresholds = int(math.ceil(numModules * 0.8))
elif thresholds == 0:
thresholds = numModules
perModRange = float((90.0 if bumpType == "square" else 60.0) /
float(numModules))
for i in xrange(numModules):
orientation = (float(i) * perModRange) + (perModRange / 2.0)
config = {
"cellsPerAxis": locationModuleWidth,
"scale": scale,
"orientation": np.radians(orientation),
"activationThreshold": 8,
"initialPermanence": 1.0,
"connectedPermanence": 0.5,
"learningThreshold": 8,
"sampleSize": 10,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.0,
}
if bumpType == "square":
config["cellCoordinateOffsets"] = cellCoordinateOffsets
config["anchoringMethod"] = anchoringMethod
elif bumpType == "gaussian":
config["bumpOverlapMethod"] = "probabilistic"
config["baselineCellsPerAxis"] = 6
else:
raise ValueError("Invalid bumpType", bumpType)
locationConfigs.append(config)
l4Overrides = {
"initialPermanence": 1.0,
"activationThreshold": thresholds,
"reducedBasalThreshold": thresholds,
"minThreshold": numModules,
"sampleSize": numModules,
"cellsPerColumn": 16,
}
numObjects = 0
accuracy = None
allLocationsAreUnique = None
occurrencesConvergenceLog = []
increment = initialIncrement
foundUpperBound = False
while True:
currentNumObjects = numObjects + increment
numFailuresAllowed = currentNumObjects * (1 - minAccuracy)
print "Testing", currentNumObjects
objects = generateObjects(currentNumObjects, featuresPerObject, objectWidth,
numFeatures, featureDistribution)
column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides,
bumpType=bumpType)
exp = PIUNExperiment(column, featureNames=features,
numActiveMinicolumns=10,
noiseFactor=noiseFactor,
moduleNoiseFactor=moduleNoiseFactor)
currentLocsUnique = True
for objectDescription in objects:
objLocsUnique = exp.learnObject(objectDescription)
currentLocsUnique = currentLocsUnique and objLocsUnique
numFailures = 0
try:
if useTrace:
filename = os.path.join(
SCRIPT_DIR,
"traces/capacity-{}-points-{}-cells-{}-objects-{}-feats.html".format(
len(cellCoordinateOffsets)**2, exp.column.L6aModules[0].numberOfCells(),
currentNumObjects, numFeatures)
)
traceFileOut = io.open(filename, "w", encoding="utf8")
traceHandle = trace(traceFileOut, exp, includeSynapses=False)
print "Logging to", filename
for objectDescription in objects:
numSensationsToInference = exp.inferObjectWithRandomMovements(
objectDescription)
if numSensationsToInference is None:
numFailures += 1
if numFailures > numFailuresAllowed:
break
finally:
if useTrace:
traceHandle.__exit__()
traceFileOut.close()
if numFailures < numFailuresAllowed:
numObjects = currentNumObjects
accuracy = float(currentNumObjects - numFailures) / currentNumObjects
allLocationsAreUnique = currentLocsUnique
else:
foundUpperBound = True
if foundUpperBound:
increment /= 2
goalResolution = capacityResolution
if capacityPercentageResolution > 0:
goalResolution = min(goalResolution,
capacityPercentageResolution*numObjects)
if increment < goalResolution:
break
result = {
"numObjects": numObjects,
"accuracy": accuracy,
"allLocationsAreUnique": allLocationsAreUnique,
}
print result
return result |
def get_relname_and_parent(self, treepos):
"""Return the (relation name, parent ID) tuple that a node is in.
Return None if this node is not in a relation.
"""
node = self.dgtree[treepos]
node_type = get_node_type(node)
assert node_type in (TreeNodeTypes.relation_node, TreeNodeTypes.leaf_node)
parent_pos = self.get_parent_treepos(treepos)
if parent_pos is None: # a root node has no upward relation
return None, None
else:
parent_label = self.get_parent_label(treepos)
grandparent_pos = self.get_parent_treepos(parent_pos)
if grandparent_pos is None:
# a tree with only one EDU/leaf and a 'N' parent but no relation
return None, None
else:
grandparent_id = self.get_node_id(grandparent_pos)
grandparent_label = self.get_parent_label(parent_pos)
reltype = self.get_reltype(grandparent_label)
if reltype == 'rst':
if parent_label == 'N':
return 'span', grandparent_id
elif parent_label == 'S':
cousins_pos = self.get_cousins_treepos(treepos)
assert len(cousins_pos) == 1
cousin_id = self.get_node_id(cousins_pos[0])
return grandparent_label, cousin_id
elif reltype == 'multinuc':
return grandparent_label, grandparent_id | Return the (relation name, parent ID) tuple that a node is in.
Return None if this node is not in a relation. | Below is the the instruction that describes the task:
### Input:
Return the (relation name, parent ID) tuple that a node is in.
Return None if this node is not in a relation.
### Response:
def get_relname_and_parent(self, treepos):
"""Return the (relation name, parent ID) tuple that a node is in.
Return None if this node is not in a relation.
"""
node = self.dgtree[treepos]
node_type = get_node_type(node)
assert node_type in (TreeNodeTypes.relation_node, TreeNodeTypes.leaf_node)
parent_pos = self.get_parent_treepos(treepos)
if parent_pos is None: # a root node has no upward relation
return None, None
else:
parent_label = self.get_parent_label(treepos)
grandparent_pos = self.get_parent_treepos(parent_pos)
if grandparent_pos is None:
# a tree with only one EDU/leaf and a 'N' parent but no relation
return None, None
else:
grandparent_id = self.get_node_id(grandparent_pos)
grandparent_label = self.get_parent_label(parent_pos)
reltype = self.get_reltype(grandparent_label)
if reltype == 'rst':
if parent_label == 'N':
return 'span', grandparent_id
elif parent_label == 'S':
cousins_pos = self.get_cousins_treepos(treepos)
assert len(cousins_pos) == 1
cousin_id = self.get_node_id(cousins_pos[0])
return grandparent_label, cousin_id
elif reltype == 'multinuc':
return grandparent_label, grandparent_id |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.