code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def rename(self, arr, new_name=True):
name_in_me = arr.psy.arr_name in self.arr_names
if not name_in_me:
return arr, False
elif name_in_me and not self._contains_array(arr):
if new_name is False:
raise ValueError(
"Array name %s is already in use! Set the `new_name` "
"parameter to None for renaming!" % arr.psy.arr_name)
elif new_name is True:
new_name = new_name if isstring(new_name) else 'arr{0}'
arr.psy.arr_name = self.next_available_name(new_name)
return arr, True
return arr, None | Rename an array to find a name that isn't already in the list
Parameters
----------
arr: InteractiveBase
A :class:`InteractiveArray` or :class:`InteractiveList` instance
whose name shall be checked
new_name: bool or str
If False, and the ``arr_name`` attribute of the new array is
already in the list, a ValueError is raised.
If True and the ``arr_name`` attribute of the new array is not
already in the list, the name is not changed. Otherwise, if the
array name is already in use, `new_name` is set to 'arr{0}'.
If not True, this will be used for renaming (if the array name of
`arr` is in use or not). ``'{0}'`` is replaced by a counter
Returns
-------
InteractiveBase
`arr` with changed ``arr_name`` attribute
bool or None
True, if the array has been renamed, False if not and None if the
array is already in the list
Raises
------
ValueError
If it was impossible to find a name that isn't already in the list
ValueError
If `new_name` is False and the array is already in the list |
def job(func_or_queue=None, connection=None, *args, **kwargs):
if callable(func_or_queue):
func = func_or_queue
queue = 'default'
else:
func = None
queue = func_or_queue or 'default'
if not isinstance(queue, basestring):
queue = unicode(queue)
try:
queue = get_queue(queue)
if connection is None:
connection = queue.connection
except KeyError:
pass
decorator = _job(queue, connection=connection, *args, **kwargs)
if func:
return decorator(func)
return decorator | The same as RQ's job decorator, but it works automatically works out
the ``connection`` argument from RQ_QUEUES.
And also, it allows simplified ``@job`` syntax to put job into
default queue. |
def iter_commit_activity(self, number=-1, etag=None):
url = self._build_url('stats', 'commit_activity', base_url=self._api)
return self._iter(int(number), url, dict, etag=etag) | Iterate over last year of commit activity by week.
See: http://developer.github.com/v3/repos/statistics/
:param int number: (optional), number of weeks to return. Default -1
will return all of the weeks.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of dictionaries
.. note:: All statistics methods may return a 202. On those occasions,
you will not receive any objects. You should store your
iterator and check the new ``last_status`` attribute. If it
is a 202 you should wait before re-requesting.
.. versionadded:: 0.7 |
def coltype_as_typeengine(coltype: Union[VisitableType,
TypeEngine]) -> TypeEngine:
if isinstance(coltype, TypeEngine):
return coltype
return coltype() | Instances of SQLAlchemy column types are subclasses of ``TypeEngine``.
It's possible to specify column types either as such instances, or as the
class type. This function ensures that such classes are converted to
instances.
To explain: you can specify columns like
.. code-block:: python
a = Column("a", Integer)
b = Column("b", Integer())
c = Column("c", String(length=50))
isinstance(Integer, TypeEngine) # False
isinstance(Integer(), TypeEngine) # True
isinstance(String(length=50), TypeEngine) # True
type(Integer) # <class 'sqlalchemy.sql.visitors.VisitableType'>
type(Integer()) # <class 'sqlalchemy.sql.sqltypes.Integer'>
type(String) # <class 'sqlalchemy.sql.visitors.VisitableType'>
type(String(length=50)) # <class 'sqlalchemy.sql.sqltypes.String'>
This function coerces things to a :class:`TypeEngine`. |
def extract(pattern, string, *, assert_equal=False, one=False,
condense=False, default=None, default_if_multiple=True,
default_if_none=True):
if isinstance(pattern, str):
output = get_content(pattern, string)
else:
output = []
for p in pattern:
output += get_content(p, string)
output = process_output(output, one=one, condense=condense,
default=default,
default_if_multiple=default_if_multiple,
default_if_none=default_if_none)
if assert_equal:
assert_output(output, assert_equal)
else:
return output | Used to extract a given regex pattern from a string, given several options |
def write_dag(self):
if not self.__nodes_finalized:
for node in self.__nodes:
node.finalize()
self.write_concrete_dag()
self.write_abstract_dag() | Write either a dag or a dax. |
def deserialize_durable_record_to_current_model(record, current_model):
if record.get(EVENT_TOO_BIG_FLAG):
return get_full_current_object(record['dynamodb']['Keys']['arn']['S'], current_model)
new_image = remove_durable_specific_fields(record['dynamodb']['NewImage'])
data = {}
for item, value in new_image.items():
data[item] = DESER.deserialize(value)
return current_model(**data) | Utility function that will take a Durable Dynamo event record and turn it into the proper Current Dynamo object.
This will properly deserialize the ugly Dynamo datatypes away.
:param record:
:param current_model:
:return: |
def namedb_account_transaction_save(cur, address, token_type, new_credit_value, new_debit_value, block_id, vtxindex, txid, existing_account):
if existing_account is None:
existing_account = {}
accounts_insert = {
'address': address,
'type': token_type,
'credit_value': '{}'.format(new_credit_value),
'debit_value': '{}'.format(new_debit_value),
'lock_transfer_block_id': existing_account.get('lock_transfer_block_id', 0),
'receive_whitelisted': existing_account.get('receive_whitelisted', True),
'metadata': existing_account.get('metadata', None),
'block_id': block_id,
'txid': txid,
'vtxindex': vtxindex
}
try:
query, values = namedb_insert_prepare(cur, accounts_insert, 'accounts')
except Exception as e:
log.exception(e)
log.fatal('FATAL: failed to append account history record for {} at ({},{})'.format(address, block_id, vtxindex))
os.abort()
namedb_query_execute(cur, query, values)
return True | Insert the new state of an account at a particular point in time.
The data must be for a never-before-seen (txid,block_id,vtxindex) set in the accounts table, but must
correspond to an entry in the history table.
If existing_account is not None, then copy all other remaining fields from it.
Return True on success
Raise an Exception on error |
def last_modified(self) -> Optional[datetime.datetime]:
httpdate = self._headers.get(hdrs.LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None | The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object. |
def build_body(self):
_increase_indent()
body_array = [x.build() for x in self.iterable]
nl = '\n' if self.append_extra_newline else ''
if len(self.iterable) >= 1:
body = self.join_body_on.join(body_array) + nl
else:
body = ''
_decrease_indent()
return body | Builds the body of a syslog-ng configuration object. |
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE):
names, factory = self.extra_criteria()
factory = _CriteriaFactory(model, factory)
results = batch_eval_multi_worker(sess, factory, [advx, y],
batch_size=batch_size, devices=devices)
names = ['correctness', 'confidence'] + names
out = dict(safe_zip(names, results))
return out | Returns a dictionary mapping the name of each criterion to a NumPy
array containing the value of that criterion for each adversarial
example.
Subclasses can add extra criteria by implementing the `extra_criteria`
method.
:param sess: tf.session.Session
:param model: cleverhans.model.Model
:param adv_x: numpy array containing the adversarial examples made so far
by earlier work in the bundling process
:param y: numpy array containing true labels
:param batch_size: int, batch size |
def date_tuple(ovls):
day = month = year = 0
for o in ovls:
if 'day' in o.props:
day = o.value
if 'month' in o.props:
month = o.value
if 'year' in o.props:
year = o.value
if 'date' in o.props:
day, month, year = [(o or n) for o, n in zip((day, month,
year), o.value)]
return (day, month, year) | We should have a list of overlays from which to extract day month
year. |
def get_source(self, key, name_spaces=None, default_prefix=''):
source = self.source or key
prefix = default_prefix
if name_spaces and self.name_space and self.name_space in name_spaces:
prefix = ''.join([name_spaces[self.name_space], ':'])
return ''.join([prefix, source]) | Generates the dictionary key for the serialized representation
based on the instance variable source and a provided key.
:param str key: name of the field in model
:returns: self.source or key |
def wrap_xblock(self, block, view, frag, context):
if hasattr(self, 'wrap_child'):
log.warning("wrap_child is deprecated in favor of wrap_xblock and wrap_aside %s", self.__class__)
return self.wrap_child(block, view, frag, context)
extra_data = {'name': block.name} if block.name else {}
return self._wrap_ele(block, view, frag, extra_data) | Creates a div which identifies the xblock and writes out the json_init_args into a script tag.
If there's a `wrap_child` method, it calls that with a deprecation warning.
The default implementation creates a frag to wraps frag w/ a div identifying the xblock. If you have
javascript, you'll need to override this impl |
def asscipy(self):
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype) | Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format> |
def rotate(self, rad):
s, c = [f(rad) for f in (math.sin, math.cos)]
x, y = (c * self.x - s * self.y, s * self.x + c * self.y)
return Point(x, y) | Rotate counter-clockwise by rad radians.
Positive y goes *up,* as in traditional mathematics.
Interestingly, you can use this in y-down computer graphics, if
you just remember that it turns clockwise, rather than
counter-clockwise.
The new position is returned as a new Point. |
def _set_id(self, Id, is_added, index):
if is_added:
self.selected_ids.add(Id)
else:
self.selected_ids.remove(Id)
self.dataChanged.emit(index, index) | Update selected_ids and emit dataChanged |
def get_service_display_name(name):
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return win32.GetServiceDisplayName(hSCManager, name) | Get the service display name for the given service name.
@see: L{get_service}
@type name: str
@param name: Service unique name. You can get this value from the
C{ServiceName} member of the service descriptors returned by
L{get_services} or L{get_active_services}.
@rtype: str
@return: Service display name. |
async def post_data(self, path, data=None, headers=None, timeout=None):
url = self.base_url + path
_LOGGER.debug('POST URL: %s', url)
self._log_data(data, False)
resp = None
try:
resp = await self._session.post(
url, headers=headers, data=data,
timeout=DEFAULT_TIMEOUT if timeout is None else timeout)
if resp.content_length is not None:
resp_data = await resp.read()
else:
resp_data = None
self._log_data(resp_data, True)
return resp_data, resp.status
except Exception as ex:
if resp is not None:
resp.close()
raise ex
finally:
if resp is not None:
await resp.release() | Perform a POST request. |
def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None,
old_style=False, for_msgpack=False, should_redact=False):
serializer = StoneToPythonPrimitiveSerializer(
caller_permissions, alias_validators, for_msgpack, old_style, should_redact)
return serializer.encode(data_type, obj) | Encodes an object into a JSON-compatible dict based on its type.
Args:
data_type (Validator): Validator for obj.
obj (object): Object to be serialized.
caller_permissions (list): The list of raw-string caller permissions
with which to serialize.
Returns:
An object that when passed to json.dumps() will produce a string
giving the JSON-encoded object.
See json_encode() for additional information about validation. |
def snapshot_folder():
logger.info("Snapshot folder")
try:
stdout = subprocess.check_output(["git", "show", "-s", "--format=%cI", "HEAD"])
except subprocess.CalledProcessError as e:
logger.error("Error: {}".format(e.output.decode('ascii', 'ignore').strip()))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Error: {}".format(e))
sys.exit(2)
ds = stdout.decode('ascii', 'ignore').strip()
dt = datetime.fromisoformat(ds)
utc = dt - dt.utcoffset()
return utc.strftime("%Y%m%d_%H%M%S") | Use the commit date in UTC as folder name |
def to_df_CSV(self, tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':
return self.to_df(method='CSV', tempfile=tempfile, tempkeep=tempkeep, **kwargs) | Export this SAS Data Set to a Pandas Data Frame via CSV file
:param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
:param kwargs:
:return: Pandas data frame
:rtype: 'pd.DataFrame' |
def GetTransPosition(df,field,dic,refCol="transcript_id"):
try:
gen=str(int(df[field]))
transid=df[refCol]
bases=dic.get(transid).split(",")
bases=bases.index(str(gen))+1
except:
bases=np.nan
return bases | Maps a genome position to transcript positon"
:param df: a Pandas dataframe
:param field: the head of the column containing the genomic position
:param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'}
:param refCol: header of the reference column with IDs, eg. 'transcript_id'
:returns: position on transcript |
def set_modifier_mapping(self, keycodes):
r = request.SetModifierMapping(display = self.display,
keycodes = keycodes)
return r.status | Set the keycodes for the eight modifiers X.Shift, X.Lock,
X.Control, X.Mod1, X.Mod2, X.Mod3, X.Mod4 and X.Mod5. keycodes
should be a eight-element list where each entry is a list of the
keycodes that should be bound to that modifier.
If any changed
key is logically in the down state, X.MappingBusy is returned and
the mapping is not changed. If the mapping violates some server
restriction, X.MappingFailed is returned. Otherwise the mapping
is changed and X.MappingSuccess is returned. |
def _badlink(info, base):
tip = _resolved(os.path.join(base, os.path.dirname(info.name)))
return _badpath(info.linkname, base=tip) | Links are interpreted relative to the directory containing the link |
def default(cls):
with enamlnative.imports():
for impl in [
TornadoEventLoop,
TwistedEventLoop,
BuiltinEventLoop,
]:
if impl.available():
print("Using {} event loop!".format(impl))
return impl()
raise RuntimeError("No event loop implementation is available. "
"Install tornado or twisted.") | Get the first available event loop implementation
based on which packages are installed. |
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.host, self.port))
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
self.connected = True | Connects to Scratch. |
def recognize_array(self, byte_array):
if type(byte_array) != bytes:
raise TypeError("Expected a byte array (string in Python 2, bytes in Python 3)")
pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))
ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
json_data = _convert_from_charp(json_data)
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj | This causes OpenALPR to attempt to recognize an image passed in as a byte array.
:param byte_array: This should be a string (Python 2) or a bytes object (Python 3)
:return: An OpenALPR analysis in the form of a response dictionary |
async def _register(self):
if self.registered:
self.logger.debug("skipping cap registration, already registered!")
return
await self.rawmsg('CAP', 'LS', '302')
await super()._register() | Hijack registration to send a CAP LS first. |
def _update_lock_icon(self):
icon = ima.icon('lock') if self.locked else ima.icon('lock_open')
self.locked_button.setIcon(icon)
tip = _("Unlock") if self.locked else _("Lock")
self.locked_button.setToolTip(tip) | Update locked state icon |
def _remove_watch_block(self, wb):
if (self._wbslock == None):
self._wbslock = Lock()
self._wbslock.acquire()
self._wbs.remove(wb)
if len(self._wbs) == 0:
self._stop_enqueue_thread()
self._stop_dequeue_thread()
self._wbslock.release() | Internal method to remove a watch block for stopping event monitoring. |
def _startMqtt(self):
LOGGER.info('Connecting to MQTT... {}:{}'.format(self._server, self._port))
try:
self._mqttc.connect_async('{}'.format(self._server), int(self._port), 10)
self._mqttc.loop_forever()
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
LOGGER.error("MQTT Connection error: {}".format(message), exc_info=True) | The client start method. Starts the thread for the MQTT Client
and publishes the connected message. |
def _docstring(self):
s = '
' + "\n"
return s | Generate a docstring for the generated source file.
:return: new docstring
:rtype: str |
def load(self, data):
branches = defaultdict(list)
for row in data:
branches[row[-4]].append(row)
childbranch = self.db._childbranch
branch2do = deque(['trunk'])
store = self._store
while branch2do:
branch = branch2do.popleft()
for row in branches[branch]:
store(*row, planning=False, loading=True)
if branch in childbranch:
branch2do.extend(childbranch[branch]) | Add a bunch of data. Must be in chronological order.
But it doesn't need to all be from the same branch, as long as
each branch is chronological of itself. |
def on_mode_button(self, my_button, state):
if state:
self.controller.set_mode(my_button.get_label()) | Notify the controller of a new mode setting. |
def highlight(text: str, color_code: int, bold: bool=False) -> str:
return '{}\033[{}m{}\033[0m'.format(
'\033[1m' if bold else '',
color_code,
text,) | Wraps the given string with terminal color codes.
Args:
text: The content to highlight.
color_code: The color to highlight with, e.g. 'shelltools.RED'.
bold: Whether to bold the content in addition to coloring.
Returns:
The highlighted string. |
def motion_sensor(self, enabled):
if enabled is True:
value = CONST.SETTINGS_MOTION_POLICY_ON
elif enabled is False:
value = CONST.SETTINGS_MOTION_POLICY_OFF
else:
raise SkybellException(ERROR.INVALID_SETTING_VALUE,
(CONST.SETTINGS_MOTION_POLICY, enabled))
self._set_setting({CONST.SETTINGS_MOTION_POLICY: value}) | Set the motion sensor state. |
def _check_sensor_platform_consistency(self, sensor):
ref_sensor = SENSORS.get(self.platform, None)
if ref_sensor and not sensor == ref_sensor:
logger.error('Sensor-Platform mismatch: {} is not a payload '
'of {}. Did you choose the correct reader?'
.format(sensor, self.platform)) | Make sure sensor and platform are consistent
Args:
sensor (str) : Sensor name from YAML dataset definition
Raises:
ValueError if they don't match |
def appendData(self, xdata, ydata, color='b', legendstr=None):
item = self.plot(xdata, ydata, pen=color)
if legendstr is not None:
self.legend.addItem(item, legendstr)
return item | Adds the data to the plot
:param xdata: index values for data, plotted on x-axis
:type xdata: numpy.ndarray
:param ydata: value data to plot, dimension must match xdata
:type ydata: numpy.ndarray |
def run(env):
os.putenv('CIPR_PACKAGES', env.package_dir)
os.putenv('CIPR_PROJECT', env.project_directory)
cmd = AND(
clom.cd(path.dirname(env.project_directory)),
clom[CORONA_SIMULATOR_PATH](path.basename(env.project_directory))
)
try:
cmd.shell.execute()
except KeyboardInterrupt:
pass | Run current project in the Corona Simulator |
def get_stripped_file_lines(filename):
try:
lines = open(filename).readlines()
except FileNotFoundError:
fatal("Could not open file: {!r}".format(filename))
return [line.strip() for line in lines] | Return lines of a file with whitespace removed |
def many_from_config(config):
result = []
credentials = config.get_section("credentials")
if len(credentials) > 0:
sections_names = credentials.get_section_names()
for section in sections_names:
credential = credentials.get_section(section)
result.append(CredentialParams(credential))
else:
credential = config.get_section("credential")
result.append(CredentialParams(credential))
return result | Retrieves all CredentialParams from configuration parameters
from "credentials" section. If "credential" section is present instead,
than it returns a list with only one CredentialParams.
:param config: a configuration parameters to retrieve credentials
:return: a list of retrieved CredentialParams |
def can_delete_assets(self):
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['assetHints']['canDelete'] | Tests if this user can delete ``Assets``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an ``Asset``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer delete
operations to an unauthorized user.
:return: ``false`` if ``Asset`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.* |
def circle_right(self, radius_m, velocity=VELOCITY, angle_degrees=360.0):
distance = 2 * radius_m * math.pi * angle_degrees / 360.0
flight_time = distance / velocity
self.start_circle_right(radius_m, velocity)
time.sleep(flight_time)
self.stop() | Go in circle, clock wise
:param radius_m: The radius of the circle (meters)
:param velocity: The velocity along the circle (meters/second)
:param angle_degrees: How far to go in the circle (degrees)
:return: |
def fix_jp2_image(image, bit_depth):
if bit_depth in [8, 16]:
return image
if bit_depth == 15:
try:
return image >> 1
except TypeError:
raise IOError('Failed to read JPEG 2000 image correctly. Most likely reason is that Pillow did not '
'install OpenJPEG library correctly. Try reinstalling Pillow from a wheel')
raise ValueError('Bit depth {} of jp2 image is currently not supported. '
'Please raise an issue on package Github page'.format(bit_depth)) | Because Pillow library incorrectly reads JPEG 2000 images with 15-bit encoding this function corrects the
values in image.
:param image: image read by opencv library
:type image: numpy array
:param bit_depth: bit depth of jp2 image encoding
:type bit_depth: int
:return: corrected image
:rtype: numpy array |
def update_policy(self):
self.demonstration_buffer.update_buffer.shuffle()
batch_losses = []
num_batches = min(len(self.demonstration_buffer.update_buffer['actions']) //
self.n_sequences, self.batches_per_epoch)
for i in range(num_batches):
update_buffer = self.demonstration_buffer.update_buffer
start = i * self.n_sequences
end = (i + 1) * self.n_sequences
mini_batch = update_buffer.make_mini_batch(start, end)
run_out = self.policy.update(mini_batch, self.n_sequences)
loss = run_out['policy_loss']
batch_losses.append(loss)
if len(batch_losses) > 0:
self.stats['Losses/Cloning Loss'].append(np.mean(batch_losses))
else:
self.stats['Losses/Cloning Loss'].append(0) | Updates the policy. |
def getattribute(model, item):
elements = item.split('.')
element = elements.pop(0)
try:
attr = getattr(model, element, None)
except:
return
if attr is None:
return
if callable(attr):
try:
attr = attr()
except:
return
if elements:
return getattribute(attr, '.'.join(elements))
return attr | Chained lookup of item on model
If item has dots (eg: 'foo.bar.baz'), recursively call getattribute():
e = getattr(model, 'foo')
e = getattr(e, 'bar')
e = getattr(e, 'baz')
At each step, check if e is a callable, and if so, use e() |
def from_int(i):
point = ECPointAffine.from_int(bitcoin_curve, i)
return PublicKey.from_point(point) | Generates a public key object from an integer.
Note:
This assumes that the upper 32 bytes of the integer
are the x component of the public key point and the
lower 32 bytes are the y component.
Args:
i (Bignum): A 512-bit integer representing the public
key point on the secp256k1 curve.
Returns:
PublicKey: A PublicKey object. |
def get_request_token(self, request):
if self.oauth == 'oauth1':
oauth = OAuth1Session(self.consumer_key, client_secret=self.consumer_secret)
request_token = oauth.fetch_request_token(self.REQ_TOKEN)
request.session['oauth_token'] = request_token['oauth_token']
request.session['oauth_token_secret'] = request_token['oauth_token_secret']
return request_token
else:
callback_url = self.callback_url(request)
oauth = OAuth2Session(client_id=self.consumer_key, redirect_uri=callback_url, scope=self.scope)
authorization_url, state = oauth.authorization_url(self.AUTH_URL)
return authorization_url | request the token to the external service |
def fw_update(self, data, fw_name=None):
LOG.debug("FW Update %s", data)
self._fw_update(fw_name, data) | Top level FW update function. |
def bind(self, sock):
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn | Wrap and return the given socket. |
def lock(self, lease_time=-1):
return self._encode_invoke(lock_lock_codec, invocation_timeout=MAX_SIZE, lease_time=to_millis(lease_time),
thread_id=thread_id(), reference_id=self.reference_id_generator.get_and_increment()) | Acquires the lock. If a lease time is specified, lock will be released after this lease time.
If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies
dormant until the lock has been acquired.
:param lease_time: (long), time to wait before releasing the lock (optional). |
def matchSubset(**kwargs):
ret = []
for m in self.matches:
allMatched = True
for k,v in iteritems(kwargs):
mVal = getattr(m, k)
try:
if v == mVal or v in mVal: continue
except Exception: pass
allMatched = False
break
if allMatched: ret.append(m)
return ret | extract matches from player's entire match history given matching criteria kwargs |
def get_environment(default=DEVELOPMENT, detectors=None, detectors_opts=None, use_envfiles=True):
detectors_opts = detectors_opts or {}
if detectors is None:
detectors = DETECTORS.keys()
env = None
for detector in detectors:
opts = detectors_opts.get(detector, {})
detector = get_detector(detector)
detector = detector(**opts)
env_name = detector.probe()
if env_name:
env = get_type(env_name)
break
if env is None and default is not None:
env = get_type(default)
if env is not None:
env = env()
use_envfiles and env.update_from_envfiles()
return env | Returns current environment type object.
:param str|Environment|None default: Default environment type or alias.
:param list[Detector] detectors: List of environment detectors to be used in chain.
If not set, default builtin chain is used.
:param dict detectors_opts: Detectors options dictionary.
Where keys are detector names and values are keyword arguments dicts.
:param bool use_envfiles: Whether to set environment variables (if not already set)
using data from .env files.
:rtype: Environment|None |
def _file_name(fname, base_dir):
fname = fname.replace("\\", "\\\\")
if base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(base_dir, fname)
return fname | Convert a relative filename if we have a base directory. |
def check_variable_features(self, ds):
ret_val = []
feature_list = ['point', 'timeSeries', 'trajectory', 'profile', 'timeSeriesProfile', 'trajectoryProfile']
feature_type = getattr(ds, 'featureType', None)
if feature_type not in feature_list:
return []
feature_type_map = {
'point': [
'point'
],
'timeSeries': [
'timeseries',
'multi-timeseries-orthogonal',
'multi-timeseries-incomplete',
],
'trajectory': [
'cf-trajectory',
'single-trajectory',
],
'profile': [
'profile-orthogonal',
'profile-incomplete'
],
'timeSeriesProfile': [
'timeseries-profile-single-station',
'timeseries-profile-multi-station',
'timeseries-profile-single-ortho-time',
'timeseries-profile-multi-ortho-time',
'timeseries-profile-ortho-depth',
'timeseries-profile-incomplete'
],
'trajectoryProfile': [
'trajectory-profile-orthogonal',
'trajectory-profile-incomplete'
]
}
for name in self._find_geophysical_vars(ds):
variable_feature = cfutil.guess_feature_type(ds, name)
if variable_feature is None:
continue
matching_feature = TestCtx(BaseCheck.MEDIUM,
self.section_titles['9.1'])
matching_feature.assert_true(variable_feature in feature_type_map[feature_type],
'{} is not a {}, it is detected as a {}'
''.format(name, feature_type, variable_feature))
ret_val.append(matching_feature.to_result())
return ret_val | Checks the variable feature types match the dataset featureType attribute
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results |
def pypi_link(pkg_filename):
root = 'https://files.pythonhosted.org/packages/source'
name, sep, rest = pkg_filename.partition('-')
parts = root, name[0], name, pkg_filename
return '/'.join(parts) | Given the filename, including md5 fragment, construct the
dependency link for PyPI. |
def show_active_only(self, state):
query = self._copy()
query.active_only = state
return query | Set active only to true or false on a copy of this query |
def id_unique(dict_id, name, lineno):
if dict_id in name_dict:
global error_occurred
error_occurred = True
print(
"ERROR - {0:s} definition {1:s} at line {2:d} conflicts with {3:s}"
.format(name, dict_id, lineno, name_dict[dict_id]))
return False
else:
return True | Returns True if dict_id not already used. Otherwise, invokes error |
def find_by_title(self, title):
for entry in self.entries:
if entry.title == title:
return entry
raise EntryNotFoundError("Entry not found for title: %s" % title) | Find an entry by exact title.
:raise: EntryNotFoundError |
def join_dicts(dicts, delimiter=None, keep_all=False):
if not dicts:
return {}
if keep_all:
all_keys = set(chain(*(d.keys() for d in dicts)))
else:
all_keys = set(dicts[0])
for d in dicts[1:]:
all_keys.intersection_update(d)
ret = {}
for key in all_keys:
vals = {hashable(d.get(key, None)) for d in dicts} - {None}
if len(vals) == 1:
ret[key] = next(iter(vals))
elif delimiter is None:
ret[key] = vals
else:
ret[key] = delimiter.join(map(str, vals))
return ret | Join multiple dictionaries into one
Parameters
----------
dicts: list of dict
A list of dictionaries
delimiter: str
The string that shall be used as the delimiter in case that there
are multiple values for one attribute in the arrays. If None, they
will be returned as sets
keep_all: bool
If True, all formatoptions are kept. Otherwise only the intersection
Returns
-------
dict
The combined dictionary |
def solve_sparse(self, B):
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols) | Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix |
def clear_deadline(self):
if (self.get_deadline_metadata().is_read_only() or
self.get_deadline_metadata().is_required()):
raise errors.NoAccess()
self._my_map['deadline'] = self._deadline_default | Clears the deadline.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def add_association_to_graph(self):
Assoc.add_association_to_graph(self)
if self.onset is not None and self.onset != '':
self.graph.addTriple(self.assoc_id, self.globaltt['onset'], self.onset)
if self.frequency is not None and self.frequency != '':
self.graph.addTriple(
self.assoc_id, self.globaltt['frequency'], self.frequency)
return | The reified relationship between a disease and a phenotype is decorated
with some provenance information.
This makes the assumption that both the disease and phenotype
are classes.
:param g:
:return: |
def combine_elem(ind1, ind2):
def inner(seq):
shift = 2**16
if seq[ind1] < 0:
seq[ind1] += shift
if seq[ind2] < 0:
seq[ind2] += shift
return (seq[ind1] << 16) | seq[ind2]
return inner | Create a function to combine two specified product-specific blocks into a single int. |
def soundexCode(self, char):
lang = get_language(char)
try:
if lang == "en_US":
return _soundex_map["soundex_en"][charmap[lang].index(char)]
else:
return _soundex_map["soundex"][charmap[lang].index(char)]
except:
pass
return 0 | Return the soundex code for given character
:param char:
Character whose soundex code is needed
:return:
Returns soundex code if character is found in charmap
else returns 0 |
def delete(self, url):
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url) | Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request. |
def mutagen_call(action, path, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except mutagen.MutagenError as exc:
log.debug(u'%s failed: %s', action, six.text_type(exc))
raise UnreadableFileError(path, six.text_type(exc))
except Exception as exc:
log.debug(u'%s', traceback.format_exc())
log.error(u'uncaught Mutagen exception in %s: %s', action, exc)
raise MutagenError(path, exc) | Call a Mutagen function with appropriate error handling.
`action` is a string describing what the function is trying to do,
and `path` is the relevant filename. The rest of the arguments
describe the callable to invoke.
We require at least Mutagen 1.33, where `IOError` is *never* used,
neither for internal parsing errors *nor* for ordinary IO error
conditions such as a bad filename. Mutagen-specific parsing errors and IO
errors are reraised as `UnreadableFileError`. Other exceptions
raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. |
def get_ccle_cna(gene_list, cell_lines):
profile_data = get_profile_data(ccle_study, gene_list,
'COPY_NUMBER_ALTERATION', 'all')
profile_data = dict((key, value) for key, value in profile_data.items()
if key in cell_lines)
return profile_data | Return a dict of CNAs in given genes and cell lines from CCLE.
CNA values correspond to the following alterations
-2 = homozygous deletion
-1 = hemizygous deletion
0 = neutral / no change
1 = gain
2 = high level amplification
Parameters
----------
gene_list : list[str]
A list of HGNC gene symbols to get mutations in
cell_lines : list[str]
A list of CCLE cell line names to get mutations for.
Returns
-------
profile_data : dict[dict[int]]
A dict keyed to cases containing a dict keyed to genes
containing int |
def html(self):
if self.description:
tooltip = 'tooltip="{}"'.format(self.description)
else:
tooltip = ''
return entry_html(
title=self.title,
thumbnail=self.thumbnail,
link=self.html_link,
tooltip=tooltip) | Return html for a the entry |
def get_upstream(self, type_):
if isinstance(self, type_):
return self
elif self.upstream and isinstance(self.upstream, type_):
return self.upstream
elif self.upstream:
return self.upstream.get_upstream(type_)
else:
return None | Return self, or an upstream, that has the given class type.
This is typically used to find upstream s that impoement the RemoteInterface |
def date_decoder(dic):
if '__date__' in dic:
try:
d = datetime.date(**{c: v for c, v in dic.items() if not c == "__date__"})
except (TypeError, ValueError):
raise json.JSONDecodeError("Corrupted date format !", str(dic), 1)
elif '__datetime__' in dic:
try:
d = datetime.datetime(**{c: v for c, v in dic.items() if not c == "__datetime__"})
except (TypeError, ValueError):
raise json.JSONDecodeError("Corrupted datetime format !", str(dic), 1)
else:
return dic
return d | Add python types decoding. See JsonEncoder |
def unit(self):
unit = ncVarUnit(self._ncVar)
fieldNames = self._ncVar.dtype.names
if hasattr(unit, '__len__') and len(unit) == len(fieldNames):
idx = fieldNames.index(self.nodeName)
return unit[idx]
else:
return unit | Returns the unit attribute of the underlying ncdf variable.
If the units has a length (e.g is a list) and has precisely one element per field,
the unit for this field is returned. |
def extract_journal_reference(line, override_kbs_files=None):
kbs = get_kbs(custom_kbs_files=override_kbs_files)
references, dummy_m, dummy_c, dummy_co = parse_reference_line(line, kbs)
for elements in references:
for el in elements:
if el['type'] == 'JOURNAL':
return el | Extract the journal reference from string.
Extracts the journal reference from string and parses for specific
journal information. |
def get_prime(bits, k=64):
if bits % 8 != 0 or bits == 0:
raise ValueError("bits must be >= 0 and divisible by 8")
while True:
n = int.from_bytes(os.urandom(bits // 8), "big")
if is_prime(n, k):
return n | Return a random prime up to a certain length.
This function uses random.SystemRandom. |
def parse_response(service, response, search_type):
_LOG.debug('Parse response "%s" from service "%s" of type "%s"', response,
service, search_type)
items = []
if 'searchResult' in response:
response = response['searchResult']
elif 'getMetadataResult' in response:
response = response['getMetadataResult']
else:
raise ValueError('"response" should contain either the key '
'"searchResult" or "getMetadataResult"')
search_metadata = {
'number_returned': response['count'],
'total_matches': None,
'search_type': search_type,
'update_id': None,
}
for result_type in ('mediaCollection', 'mediaMetadata'):
result_type_proper = result_type[0].upper() + result_type[1:]
raw_items = response.get(result_type, [])
if isinstance(raw_items, OrderedDict):
raw_items = [raw_items]
for raw_item in raw_items:
class_key = result_type_proper + raw_item['itemType'].title()
cls = get_class(class_key)
items.append(cls.from_music_service(service, raw_item))
return SearchResult(items, **search_metadata) | Parse the response to a music service query and return a SearchResult
Args:
service (MusicService): The music service that produced the response
response (OrderedDict): The response from the soap client call
search_type (str): A string that indicates the search type that the
response is from
Returns:
SearchResult: A SearchResult object |
def poke(self, session, address, width, data):
if width == 8:
return self.poke_8(session, address, data)
elif width == 16:
return self.poke_16(session, address, data)
elif width == 32:
return self.poke_32(session, address, data)
elif width == 64:
return self.poke_64(session, address, data)
raise ValueError('%s is not a valid size. Valid values are 8, 16, 32, or 64' % width) | Writes an 8, 16, 32, or 64-bit value from the specified address.
Corresponds to viPoke* functions of the VISA library.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param width: Number of bits to read.
:param data: Data to be written to the bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode` |
def close_window(self):
if self.undocked_window is not None:
self.undocked_window.close()
self.undocked_window = None
self.undock_action.setDisabled(False)
self.close_plugin_action.setDisabled(False) | Close QMainWindow instance that contains this plugin. |
def linkify_s_by_module(self, modules):
for i in self:
links_list = strip_and_uniq(i.modules)
new = []
for name in [e for e in links_list if e]:
module = modules.find_by_name(name)
if module is not None and module.uuid not in new:
new.append(module)
else:
i.add_error("Error: the module %s is unknown for %s" % (name, i.get_name()))
i.modules = new | Link modules to items
:param modules: Modules object (list of all the modules found in the configuration)
:type modules: alignak.objects.module.Modules
:return: None |
def remove_modifier(self, index, m_type=XenaModifierType.standard):
if m_type == XenaModifierType.standard:
current_modifiers = OrderedDict(self.modifiers)
del current_modifiers[index]
self.set_attributes(ps_modifiercount=0)
self.del_objects_by_type('modifier')
else:
current_modifiers = OrderedDict(self.xmodifiers)
del current_modifiers[index]
self.set_attributes(ps_modifierextcount=0)
self.del_objects_by_type('xmodifier')
for modifier in current_modifiers.values():
self.add_modifier(m_type,
mask=modifier.mask, action=modifier.action, repeat=modifier.repeat,
min_val=modifier.min_val, step=modifier.step, max_val=modifier.max_val) | Remove modifier.
:param m_type: modifier type - standard or extended.
:param index: index of modifier to remove. |
def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf"):
with open(input, "rb") as f:
G = pickle.load(f)
G.map_concepts_to_indicators(min_temporal_res="month")
G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT")
G.set_indicator("UN/events/human/agriculture/food_production",
"Historical Production (Maize)", "DSSAT")
G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET")
G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO")
G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com")
G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI")
with open(output, "wb") as f:
pickle.dump(G, f) | Create a CAG with mapped indicators |
def merge(self, other):
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict() | Add requirements from 'other' metadata.rb into this one. |
def destroy_label(self, label, callback=dummy_progress_cb):
current = 0
total = self.index.get_nb_docs()
self.index.start_destroy_label(label)
while True:
(op, doc) = self.index.continue_destroy_label()
if op == 'end':
break
callback(current, total, self.LABEL_STEP_DESTROYING, doc)
current += 1
self.index.end_destroy_label() | Remove the label 'label' from all the documents. Takes care of updating
the index. |
def type_inherits_of_type(inheriting_type, base_type):
assert isinstance(inheriting_type, type) or isclass(inheriting_type)
assert isinstance(base_type, type) or isclass(base_type)
if inheriting_type == base_type:
return True
else:
if len(inheriting_type.__bases__) != 1:
return False
return type_inherits_of_type(inheriting_type.__bases__[0], base_type) | Checks whether inheriting_type inherits from base_type
:param str inheriting_type:
:param str base_type:
:return: True is base_type is base of inheriting_type |
def add(self, host_value):
host_obj = self._host_factory(host_value)
if self._get_match(host_obj) is not None:
return
self._add_new(host_obj) | Add the given value to the collection.
:param host: an ip address or a hostname
:raises InvalidHostError: raised when the given value
is not a valid ip address nor a hostname |
def add_item(self, item, field_name=None):
field_name = self._choose_field_name(field_name)
related_manager = getattr(item, field_name)
related_manager.add(self) | Add the item to the specified section.
Intended for use with items of settings.ARMSTRONG_SECTION_ITEM_MODEL.
Behavior on other items is undefined. |
def fetchGroupInfo(self, *group_ids):
threads = self.fetchThreadInfo(*group_ids)
groups = {}
for id_, thread in threads.items():
if thread.type == ThreadType.GROUP:
groups[id_] = thread
else:
raise FBchatUserError("Thread {} was not a group".format(thread))
return groups | Get groups' info from IDs, unordered
:param group_ids: One or more group ID(s) to query
:return: :class:`models.Group` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed |
def setting(self, opt, val):
opt = opt.encode()
if isinstance(val, basestring):
fluid_settings_setstr(self.settings, opt, val)
elif isinstance(val, int):
fluid_settings_setint(self.settings, opt, val)
elif isinstance(val, float):
fluid_settings_setnum(self.settings, opt, val) | change an arbitrary synth setting, type-smart |
def _encode_batched_op_msg(
operation, command, docs, check_keys, ack, opts, ctx):
buf = StringIO()
to_send, _ = _batched_op_msg_impl(
operation, command, docs, check_keys, ack, opts, ctx, buf)
return buf.getvalue(), to_send | Encode the next batched insert, update, or delete operation
as OP_MSG. |
def ifaces(cls, name):
ifaces = Iface.list({'vlan_id': cls.usable_id(name)})
ret = []
for iface in ifaces:
ret.append(Iface.info(iface['id']))
return ret | Get vlan attached ifaces. |
def use_comparative_catalog_view(self):
self._catalog_view = COMPARATIVE
for session in self._get_provider_sessions():
try:
session.use_comparative_catalog_view()
except AttributeError:
pass | Pass through to provider CatalogLookupSession.use_comparative_catalog_view |
def tags(self, resource_id=None):
resource = self.copy()
resource._request_entity = 'tag'
resource._request_uri = '{}/tags'.format(resource._request_uri)
if resource_id is not None:
resource._request_uri = '{}/{}'.format(
resource._request_uri, self.tcex.safetag(resource_id)
)
return resource | Tag endpoint for this resource with optional tag name.
This method will set the resource endpoint for working with Tags. The
HTTP GET method will return all tags applied to this resource or if a
resource id (tag name) is provided it will return the provided tag if
it has been applied, which could be useful to verify a tag is applied.
The provided resource_id (tag) can be applied to this resource using
the HTTP POST method. The HTTP DELETE method will remove the provided
tag from this resource.
**Example Endpoints URI's**
+--------------+------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+============================================================+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
Args:
resource_id (Optional [string]): The resource id (tag name). |
def folding_model_gradient(rvec, rcut):
r
rnorm = np.linalg.norm(rvec)
if rnorm == 0.0:
return np.zeros(rvec.shape)
r = rnorm - rcut
if r < 0.0:
return -5.0 * r * rvec / rnorm
return (1.5 * r - 2.0) * rvec / rnorm | r"""computes the potential's gradient at point rvec |
def get_name_DID_info(self, name):
db = get_db_state(self.working_dir)
did_info = db.get_name_DID_info(name)
if did_info is None:
return {'error': 'No such name', 'http_status': 404}
return did_info | Get a name's DID info
Returns None if not found |
def astype(self, dtype):
if dtype not in _supported_dtypes:
raise ValueError('Datatype %s not supported. Supported types are %s' % (dtype, _supported_dtypes))
pixeltype = _npy_to_itk_map[dtype]
return self.clone(pixeltype) | Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double |
def execute_java_for_coverage(self, targets, *args, **kwargs):
distribution = self.preferred_jvm_distribution_for_targets(targets)
actual_executor = SubprocessExecutor(distribution)
return distribution.execute_java(*args, executor=actual_executor, **kwargs) | Execute java for targets directly and don't use the test mixin.
This execution won't be wrapped with timeouts and other test mixin code common
across test targets. Used for coverage instrumentation. |
def _readSentence(self):
reply_word, words = self.protocol.readSentence()
words = dict(parseWord(word) for word in words)
return reply_word, words | Read one sentence and parse words.
:returns: Reply word, dict with attribute words. |
def _sendMsg(self, type, msg):
if self.ALERT_STATUS and type in self.ALERT_TYPES:
self._configMailer()
self._MAILER.send(self.MAILER_FROM, self.ALERT_EMAIL, self.ALERT_SUBJECT, msg) | Send Alert Message To Emails |
def listen(self):
while self._listen:
key = u''
key = self.term.inkey(timeout=0.2)
try:
if key.code == KEY_ENTER:
self.on_enter(key=key)
elif key.code in (KEY_DOWN, KEY_UP):
self.on_key_arrow(key=key)
elif key.code == KEY_ESCAPE or key == chr(3):
self.on_exit(key=key)
elif key != '':
self.on_key(key=key)
except KeyboardInterrupt:
self.on_exit(key=key) | Blocking call on widgets. |
def every_match(self, callback, **kwargs):
if len(kwargs) == 0:
raise ArgumentError("You must specify at least one message field to wait on")
spec = MessageSpec(**kwargs)
responder = self._add_waiter(spec, callback)
return (spec, responder) | Invoke callback every time a matching message is received.
The callback will be invoked directly inside process_message so that
you can guarantee that it has been called by the time process_message
has returned.
The callback can be removed by a call to remove_waiter(), passing the
handle object returned by this call to identify it.
Args:
callback (callable): A callable function that will be called as
callback(message) whenever a matching message is received.
Returns:
object: An opaque handle that can be passed to remove_waiter().
This handle is the only way to remove this callback if you no
longer want it to be called. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.