code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def event(self, coro):
if not asyncio.iscoroutinefunction(coro):
raise TypeError('event registered must be a coroutine function')
setattr(self, coro.__name__, coro)
log.debug('%s has successfully been registered as an event', coro.__name__)
return coro | A decorator that registers an event to listen to.
You can find more info about the events on the :ref:`documentation below <discord-api-events>`.
The events must be a |corourl|_, if not, :exc:`TypeError` is raised.
Example
---------
.. code-block:: python3
@client.event
async def on_ready():
print('Ready!')
Raises
--------
TypeError
The coroutine passed is not actually a coroutine. |
def get_texts_and_labels(sentence_chunk):
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels | Given a sentence chunk, extract original texts and labels. |
def _initalize_tree(self, position, momentum, slice_var, stepsize):
position_bar, momentum_bar, _ = self.simulate_dynamics(self.model, position, momentum, stepsize,
self.grad_log_pdf).get_proposed_values()
_, logp_bar = self.grad_log_pdf(position_bar, self.model).get_gradient_log_pdf()
hamiltonian = logp_bar - 0.5 * np.dot(momentum_bar, momentum_bar)
candidate_set_size = slice_var < np.exp(hamiltonian)
accept_set_bool = hamiltonian > np.log(slice_var) - 10000
return position_bar, momentum_bar, candidate_set_size, accept_set_bool | Initalizes root node of the tree, i.e depth = 0 |
def get_existing_pipelines(self):
url = "{0}/applications/{1}/pipelineConfigs".format(API_URL, self.app_name)
resp = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert resp.ok, 'Failed to lookup pipelines for {0}: {1}'.format(self.app_name, resp.text)
return resp.json() | Get existing pipeline configs for specific application.
Returns:
str: Pipeline config json |
def _catch_exceptions(self, exctype, value, tb):
self.error('Uncaught exception', exc_info=(exctype, value, tb))
print_exception_formatted(exctype, value, tb) | Catches all exceptions and logs them. |
def create_translation_field(translated_field, language):
cls_name = translated_field.__class__.__name__
if not isinstance(translated_field, tuple(SUPPORTED_FIELDS.keys())):
raise ImproperlyConfigured("%s is not supported by Linguist." % cls_name)
translation_class = field_factory(translated_field.__class__)
kwargs = get_translation_class_kwargs(translated_field.__class__)
return translation_class(
translated_field=translated_field, language=language, **kwargs
) | Takes the original field, a given language, a decider model and return a
Field class for model. |
def upgradeCatalog1to2(oldCatalog):
newCatalog = oldCatalog.upgradeVersion('tag_catalog', 1, 2,
tagCount=oldCatalog.tagCount)
tags = newCatalog.store.query(Tag, Tag.catalog == newCatalog)
tagNames = tags.getColumn("name").distinct()
for t in tagNames:
_TagName(store=newCatalog.store, catalog=newCatalog, name=t)
return newCatalog | Create _TagName instances which version 2 of Catalog automatically creates
for use in determining the tagNames result, but which version 1 of Catalog
did not create. |
def has_open_file(self, file_object):
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers]) | Return True if the given file object is in the list of open files.
Args:
file_object: The FakeFile object to be checked.
Returns:
`True` if the file is open. |
def setImagePlotAutoRangeOn(self, axisNumber):
setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.yAxisRangeCti, axisNumber) | Sets the image plot's auto-range on for the axis with number axisNumber.
:param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). |
def amax(data, axis=None, mapper=None, blen=None, storage=None,
create='array', **kwargs):
return reduce_axis(data, axis=axis, reducer=np.amax,
block_reducer=np.maximum, mapper=mapper,
blen=blen, storage=storage, create=create, **kwargs) | Compute the maximum value. |
def send_audio(self, url, name, **audioinfo):
return self.client.api.send_content(self.room_id, url, name, "m.audio",
extra_information=audioinfo) | Send a pre-uploaded audio to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-audio
for audioinfo
Args:
url (str): The mxc url of the audio.
name (str): The filename of the audio.
audioinfo (): Extra information about the audio. |
def load_data(self, data):
dates = fill_date_range(self.start_date, self.end_date)
for row, date in zip(data, dates):
data = {'date': date}
if self.add:
for elem, vals in zip(self.parameter, row):
data['elem'] = elem
for add, val in zip(['value'] + self.add, vals):
data[add] = val
yield data
else:
for elem, val in zip(self.parameter, row):
if elem.isdigit():
elem = "e%s" % elem
data[elem] = val
yield data | MultiStnData data results are arrays without explicit dates;
Infer time series based on start date. |
def _get_bottom_line_coordinates(self):
rect_x, rect_y, rect_width, rect_height = self.rect
start_point = rect_x, rect_y + rect_height
end_point = rect_x + rect_width, rect_y + rect_height
return start_point, end_point | Returns start and stop coordinates of bottom line |
def get_api_user_key(self, api_dev_key, username=None, password=None):
username = username or get_config('pastebin', 'api_user_name')
password = password or get_config('pastebin', 'api_user_password')
if username and password:
data = {
'api_user_name': username,
'api_user_password': password,
'api_dev_key': api_dev_key,
}
urlencoded_data = urllib.urlencode(data)
req = urllib2.Request('http://pastebin.com/api/api_login.php',
urlencoded_data)
response = urllib2.urlopen(req)
user_key = response.read()
logging.debug("User key: %s" % user_key)
return user_key
else:
logging.info("Pastebin: not using any user key")
return "" | Get api user key to enable posts from user accounts if username
and password available.
Not getting an api_user_key means that the posts will be "guest" posts |
def adjust_datetime_to_timezone(value, from_tz, to_tz=None):
if to_tz is None:
to_tz = settings.TIME_ZONE
if value.tzinfo is None:
if not hasattr(from_tz, "localize"):
from_tz = pytz.timezone(smart_str(from_tz))
value = from_tz.localize(value)
return value.astimezone(pytz.timezone(smart_str(to_tz))) | Given a ``datetime`` object adjust it according to the from_tz timezone
string into the to_tz timezone string. |
def get(self):
u
inputHookFunc = c_void_p.from_address(self.inputHookPtr).value
Cevent = INPUT_RECORD()
count = DWORD(0)
while 1:
if inputHookFunc:
call_function(inputHookFunc, ())
status = self.ReadConsoleInputW(self.hin,
byref(Cevent), 1, byref(count))
if status and count.value == 1:
e = event(self, Cevent)
return e | u'''Get next event from queue. |
def serve_forever(self, poll_interval=0.5):
while self.is_alive:
self.handle_request()
time.sleep(poll_interval) | Cycle for webserer |
def request_get_user(self, user_ids) -> dict:
method_params = {'user_ids': user_ids}
response = self.session.send_method_request('users.get', method_params)
self.check_for_errors('users.get', method_params, response)
return response | Method to get users by ID, do not need authorization |
def render_html(input_text, **context):
global g_parser
if g_parser is None:
g_parser = Parser()
return g_parser.format(input_text, **context) | A module-level convenience method that creates a default bbcode parser,
and renders the input string as HTML. |
def _smooth_best_span_estimates(self):
self._smoothed_best_spans = smoother.perform_smooth(self.x,
self._best_span_at_each_point,
MID_SPAN) | Apply a MID_SPAN smooth to the best span estimates at each observation. |
def find_by_id(self, user, params={}, **options):
path = "/users/%s" % (user)
return self.client.get(path, params, **options) | Returns the full user record for the single user with the provided ID.
Parameters
----------
user : {String} An identifier for the user. Can be one of an email address,
the globally unique identifier for the user, or the keyword `me`
to indicate the current user making the request.
[params] : {Object} Parameters for the request |
def build(self):
self._calculate_average_field_lengths()
self._create_field_vectors()
self._create_token_set()
return Index(
inverted_index=self.inverted_index,
field_vectors=self.field_vectors,
token_set=self.token_set,
fields=list(self._fields.keys()),
pipeline=self.search_pipeline,
) | Builds the index, creating an instance of `lunr.Index`.
This completes the indexing process and should only be called once all
documents have been added to the index. |
def set_xlim(self, xlims, dx, xscale, reverse=False):
self._set_axis_limits('x', xlims, dx, xscale, reverse)
return | Set x limits for plot.
This will set the limits for the x axis
for the specific plot.
Args:
xlims (len-2 list of floats): The limits for the axis.
dx (float): Amount to increment by between the limits.
xscale (str): Scale of the axis. Either `log` or `lin`.
reverse (bool, optional): If True, reverse the axis tick marks. Default is False. |
def header_size(self):
if not self.header:
return 0
max_entry = max(self.header.values(),
key=lambda val: val['offset'])
return max_entry['offset'] + max_entry['value'].nbytes | Size of `file`'s header in bytes.
The size of the header is determined from `header`. If this is not
possible (i.e., before the header has been read), 0 is returned. |
def make_fixed_temp_multi_apec(kTs, name_template='apec%d', norm=None):
total_model = None
sub_models = []
for i, kT in enumerate(kTs):
component = ui.xsapec(name_template % i)
component.kT = kT
ui.freeze(component.kT)
if norm is not None:
component.norm = norm
sub_models.append(component)
if total_model is None:
total_model = component
else:
total_model = total_model + component
return total_model, sub_models | Create a model summing multiple APEC components at fixed temperatures.
*kTs*
An iterable of temperatures for the components, in keV.
*name_template* = 'apec%d'
A template to use for the names of each component; it is string-formatted
with the 0-based component number as an argument.
*norm* = None
An initial normalization to be used for every component, or None to use
the Sherpa default.
Returns:
A tuple ``(total_model, sub_models)``, where *total_model* is a Sherpa
model representing the sum of the APEC components and *sub_models* is
a list of the individual models.
This function creates a vector of APEC model components and sums them.
Their *kT* parameters are set and then frozen (using
:func:`sherpa.astro.ui.freeze`), so that upon exit from this function, the
amplitude of each component is the only free parameter. |
def makeSequenceAbsolute(relVSequence, minV, maxV):
return [(value * (maxV - minV)) + minV for value in relVSequence] | Makes every value in a sequence absolute |
def _soap_client_call(method_name, *args):
soap_client = _build_soap_client()
soap_args = _convert_soap_method_args(*args)
if PYSIMPLESOAP_1_16_2:
return getattr(soap_client, method_name)(*soap_args)
else:
return getattr(soap_client, method_name)(soap_client, *soap_args) | Wrapper to call SoapClient method |
def create_grupo_l3(self):
return GrupoL3(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of grupo_l3 services facade. |
def ensure_sequence_filter(data):
if not isinstance(data, (list, tuple, set, dict)):
return [data]
return data | Ensure sequenced data.
**sequence**
ensure that parsed data is a sequence
.. code-block:: jinja
{% set my_string = "foo" %}
{% set my_list = ["bar", ] %}
{% set my_dict = {"baz": "qux"} %}
{{ my_string|sequence|first }}
{{ my_list|sequence|first }}
{{ my_dict|sequence|first }}
will be rendered as:
.. code-block:: yaml
foo
bar
baz |
def add_state(self, state, storage_load=False):
state_id = super(BarrierConcurrencyState, self).add_state(state)
if not storage_load and not self.__init_running and not state.state_id == UNIQUE_DECIDER_STATE_ID:
for o_id, o in list(state.outcomes.items()):
if not o_id == -1 and not o_id == -2:
self.add_transition(state.state_id, o_id, self.states[UNIQUE_DECIDER_STATE_ID].state_id, None)
return state_id | Overwrite the parent class add_state method
Add automatic transition generation for the decider_state.
:param state: The state to be added
:return: |
def put(self, artifact):
artifact = M2Coordinate.create(artifact)
if artifact.rev is None:
raise self.MissingVersion('Cannot pin an artifact to version "None"! {}'.format(artifact))
key = self._key(artifact)
previous = self._artifacts_to_versions.get(key)
self._artifacts_to_versions[key] = artifact
if previous != artifact:
self._id = None | Adds the given coordinate to the set, using its version to pin it.
If this set already contains an artifact with the same coordinates other than the version, it is
replaced by the new artifact.
:param M2Coordinate artifact: the artifact coordinate. |
def get_total_supply(self) -> int:
func = InvokeFunction('totalSupply')
response = self.__sdk.get_network().send_neo_vm_transaction_pre_exec(self.__hex_contract_address, None, func)
try:
total_supply = ContractDataParser.to_int(response['Result'])
except SDKException:
total_supply = 0
return total_supply | This interface is used to call the TotalSupply method in ope4
that return the total supply of the oep4 token.
:return: the total supply of the oep4 token. |
def update(self, a, b, c, d):
self.table.ravel()[:] = [a, b, c, d]
self.N = self.table.sum() | Update contingency table with new values without creating a new object. |
def _create_interface_specification(schema_graph, graphql_types, hidden_classes, cls_name):
def interface_spec():
abstract_inheritance_set = (
superclass_name
for superclass_name in sorted(list(schema_graph.get_inheritance_set(cls_name)))
if (superclass_name not in hidden_classes and
schema_graph.get_element_by_class_name(superclass_name).abstract)
)
return [
graphql_types[x]
for x in abstract_inheritance_set
if x not in hidden_classes
]
return interface_spec | Return a function that specifies the interfaces implemented by the given type. |
def get(self, cls, rid):
self.validate_record_type(cls)
rows = self.db.select(cls, where={ID: rid}, limit=1)
if not rows:
raise KeyError('No {} record with id {}'.format(cls, rid))
return rows[0] | Return record of given type with key `rid`
>>> s = teststore()
>>> s.create('tstoretest', {'id': '1', 'name': 'Toto'})
>>> r = s.get('tstoretest', '1')
>>> r['name']
'Toto'
>>> s.get('badcls', '1')
Traceback (most recent call last):
...
ValueError: Unsupported record type "badcls"
>>> s.get('tstoretest', '2')
Traceback (most recent call last):
...
KeyError: 'No tstoretest record with id 2' |
async def read_reply(self):
code = 500
messages = []
go_on = True
while go_on:
try:
line = await self.readline()
except ValueError as e:
code = 500
go_on = False
else:
try:
code = int(line[:3])
except ValueError as e:
raise ConnectionResetError("Connection lost.") from e
else:
go_on = line[3:4] == b"-"
message = line[4:].strip(b" \t\r\n").decode("ascii")
messages.append(message)
full_message = "\n".join(messages)
return code, full_message | Reads a reply from the server.
Raises:
ConnectionResetError: If the connection with the server is lost
(we can't read any response anymore). Or if the server
replies without a proper return code.
Returns:
(int, str): A (code, full_message) 2-tuple consisting of:
- server response code ;
- server response string corresponding to response code
(multiline responses are returned in a single string). |
def named_objs(objlist, namesdict=None):
objs = OrderedDict()
if namesdict is not None:
objtoname = {hashable(v): k for k, v in namesdict.items()}
for obj in objlist:
if namesdict is not None and hashable(obj) in objtoname:
k = objtoname[hashable(obj)]
elif hasattr(obj, "name"):
k = obj.name
elif hasattr(obj, '__name__'):
k = obj.__name__
else:
k = as_unicode(obj)
objs[k] = obj
return objs | Given a list of objects, returns a dictionary mapping from
string name for the object to the object itself. Accepts
an optional name,obj dictionary, which will override any other
name if that item is present in the dictionary. |
def block_events(self):
BaseObject.block_events(self)
for i in range(self._widget.topLevelItemCount()):
self._widget.topLevelItem(i).param.blockSignals(True)
return self | Special version of block_events that loops over all tree elements. |
def realign(self, cut_off, chains_to_skip = set()):
if cut_off != self.cut_off:
self.cut_off = cut_off
for c in self.chains:
if c not in chains_to_skip:
self.clustal_matches[c] = None
self.substring_matches[c] = None
if self.alignment.get(c):
del self.alignment[c]
if self.seqres_to_uniparc_sequence_maps.get(c):
del self.seqres_to_uniparc_sequence_maps[c]
self._align_with_clustal(chains_to_skip = chains_to_skip)
self._align_with_substrings(chains_to_skip = chains_to_skip)
self._check_alignments(chains_to_skip = chains_to_skip)
self._get_residue_mapping(chains_to_skip = chains_to_skip) | Alter the cut-off and run alignment again. This is much quicker than creating a new PDBUniParcSequenceAligner
object as the UniParcEntry creation etc. in the constructor does not need to be repeated.
The chains_to_skip argument (a Set) allows us to skip chains that were already matched which speeds up the alignment even more. |
def GetCampaignFeeds(client, feed, placeholder_type):
campaign_feed_service = client.GetService('CampaignFeedService', 'v201809')
campaign_feeds = []
more_pages = True
selector = {
'fields': ['CampaignId', 'MatchingFunction', 'PlaceholderTypes'],
'predicates': [
{
'field': 'Status',
'operator': 'EQUALS',
'values': ['ENABLED']
},
{
'field': 'FeedId',
'operator': 'EQUALS',
'values': [feed['id']]
},
{
'field': 'PlaceholderTypes',
'operator': 'CONTAINS_ANY',
'values': [placeholder_type]
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
while more_pages:
page = campaign_feed_service.get(selector)
if 'entries' in page:
campaign_feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries'])
return campaign_feeds | Get a list of Feed Item Ids used by a campaign via a given Campaign Feed.
Args:
client: an AdWordsClient instance.
feed: a Campaign Feed.
placeholder_type: the Placeholder Type.
Returns:
A list of Feed Item Ids. |
def _pfp__notify_update(self, child=None):
if getattr(self, "_pfp__union_update_other_children", True):
self._pfp__union_update_other_children = False
new_data = child._pfp__build()
new_stream = bitwrap.BitwrappedStream(six.BytesIO(new_data))
for other_child in self._pfp__children:
if other_child is child:
continue
if isinstance(other_child, Array) and other_child.is_stringable():
other_child._pfp__set_value(new_data)
else:
other_child._pfp__parse(new_stream)
new_stream.seek(0)
self._pfp__no_update_other_children = True
super(Union, self)._pfp__notify_update(child=child) | Handle a child with an updated value |
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
if copy:
mgr = mgr.copy()
if dtype is not None:
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr | passed a manager and a axes dict |
def getActors(self):
cl = vtk.vtkPropCollection()
self.GetActors(cl)
self.actors = []
cl.InitTraversal()
for i in range(self.GetNumberOfPaths()):
act = vtk.vtkActor.SafeDownCast(cl.GetNextProp())
if act.GetPickable():
self.actors.append(act)
return self.actors | Unpack a list of ``vtkActor`` objects from a ``vtkAssembly``. |
def MGMT_COMM_GET(self, Addr='ff02::1', TLVs=[]):
print '%s call MGMT_COMM_GET' % self.port
try:
cmd = 'commissioner mgmtget'
if len(TLVs) != 0:
tlvs = "".join(hex(tlv).lstrip("0x").zfill(2) for tlv in TLVs)
cmd += ' binary '
cmd += tlvs
print cmd
return self.__sendCommand(cmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("MGMT_COMM_GET() Error: " + str(e)) | send MGMT_COMM_GET command
Returns:
True: successful to send MGMT_COMM_GET
False: fail to send MGMT_COMM_GET |
def _get_session(self):
if self._session is None:
session = self._session = self._database.session()
session.create()
return self._session | Create session as needed.
.. note::
Caller is responsible for cleaning up the session after
all partitions have been processed. |
def run_clients():
clients = request.form.get('clients')
if not clients:
return jsonify({'Error': 'no clients provided'})
result = {}
for client_id in clients.split(','):
if client_id not in drivers:
init_client(client_id)
init_timer(client_id)
result[client_id] = get_client_info(client_id)
return jsonify(result) | Force create driver for client |
def get_conf_update(self):
dyn_conf = self.get_collection_rules()
if not dyn_conf:
return self.get_conf_file()
version = dyn_conf.get('version', None)
if version is None:
raise ValueError("ERROR: Could not find version in json")
dyn_conf['file'] = self.collection_rules_file
logger.debug("Success reading config")
config_hash = hashlib.sha1(json.dumps(dyn_conf).encode('utf-8')).hexdigest()
logger.debug('sha1 of config: %s', config_hash)
return dyn_conf | Get updated config from URL, fallback to local file if download fails. |
def mkdir(dir_path,
user=None,
group=None,
mode=None):
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
makedirs_perms(directory, user, group, mode)
return True | Ensure that a directory is available.
CLI Example:
.. code-block:: bash
salt '*' file.mkdir /opt/jetty/context |
def offset(self, offset_value):
new_instance = deepcopy(self)
new_instance.poly_funct.coef[0] += offset_value
return new_instance | Return a copy of self, shifted a constant offset.
Parameters
----------
offset_value : float
Number of pixels to shift the CCDLine. |
def close(self: Any) -> None:
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None | Close any files linked to this object |
def is_inside_bounds(value, params):
if value in params:
return True
else:
if params.ndim == 1:
return params.contains_all(np.ravel(value))
else:
bcast_value = np.broadcast_arrays(*value)
stacked_value = np.vstack(bcast_value)
flat_value = stacked_value.reshape(params.ndim, -1)
return params.contains_all(flat_value) | Return ``True`` if ``value`` is contained in ``params``.
This method supports broadcasting in the sense that for
``params.ndim >= 2``, if more than one value is given, the inputs
are broadcast against each other.
Parameters
----------
value : `array-like`
Value(s) to be checked. For several inputs, the final bool
tells whether all inputs pass the check or not.
params : `IntervalProd`
Set in which the value is / the values are supposed to lie.
Returns
-------
is_inside_bounds : bool
``True`` is all values lie in ``params``, ``False`` otherwise.
Examples
--------
Check a single point:
>>> params = odl.IntervalProd([0, 0], [1, 2])
>>> is_inside_bounds([0, 0], params)
True
>>> is_inside_bounds([0, -1], params)
False
Using broadcasting:
>>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None]
>>> pts_ax1 = np.array([2, 0, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
True
>>> pts_ax1 = np.array([-2, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
False |
def write(self, frames):
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0) | Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data. |
def normalize_scheme(path, ext):
path = addextension(path, ext)
parsed = urlparse(path)
if parsed.scheme:
return path
else:
import os
dirname, filename = os.path.split(path)
if not os.path.isabs(dirname):
dirname = os.path.abspath(dirname)
path = os.path.join(dirname, filename)
return "file://" + path | Normalize scheme for paths related to hdfs |
def pack(name, root, path=None, pack_format='tar', compress='bzip2'):
if pack_format == 'tar':
_tar(name, root, path, compress) | Pack up a directory structure, into a specific format
CLI Examples:
.. code-block:: bash
salt myminion genesis.pack centos /root/centos
salt myminion genesis.pack centos /root/centos pack_format='tar' |
def check_plate_compatibility(tool, source_plate, sink_plate):
if sink_plate == source_plate.parent:
return None
if sink_plate.meta_data_id == source_plate.meta_data_id:
if sink_plate.is_sub_plate(source_plate):
return None
return "Sink plate {} is not a simplification of source plate {}".format(
sink_plate.plate_id, source_plate.plate_id)
meta_data_diff = set(source_plate.ancestor_meta_data_ids) - set(sink_plate.ancestor_meta_data_ids)
if len(meta_data_diff) == 1:
if tool.aggregation_meta_data not in meta_data_diff:
return "Aggregate tool meta data ({}) " \
"does not match the diff between source and sink plates ({})".format(
tool.aggregation_meta_data, list(meta_data_diff)[0])
else:
return "{} not in source's parent plates".format(sink_plate.plate_id) | Checks whether the source and sink plate are compatible given the tool
:param tool: The tool
:param source_plate: The source plate
:param sink_plate: The sink plate
:return: Either an error, or None
:type tool: Tool
:type source_plate: Plate
:type sink_plate: Plate
:rtype: None | str |
def eval(self, packet):
result = None
terms = None
if self._when is None or self._when.eval(packet):
result = self._equation.eval(packet)
return result | Returns the result of evaluating this DNToEUConversion in the
context of the given Packet. |
def beta_array(C, HIGHSCALE, *args, **kwargs):
beta_odict = beta(C, HIGHSCALE, *args, **kwargs)
return np.hstack([np.asarray(b).ravel() for b in beta_odict.values()]) | Return the beta functions of all SM parameters and SMEFT Wilson
coefficients as a 1D numpy array. |
def list_tables(self, dataset):
request = self.client.tables().list(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
maxResults=1000)
response = request.execute()
while response is not None:
for t in response.get('tables', []):
yield t['tableReference']['tableId']
request = self.client.tables().list_next(request, response)
if request is None:
break
response = request.execute() | Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset |
def maybe_timeout_options(self):
if self._exit_timeout_start_time:
return NailgunProtocol.TimeoutOptions(self._exit_timeout_start_time, self._exit_timeout)
else:
return None | Implements the NailgunProtocol.TimeoutProvider interface. |
def register_predictor(cls, name):
def decorator(subclass):
cls._predictors[name.lower()] = subclass
subclass.name = name.lower()
return subclass
return decorator | Register method to keep list of predictors. |
def _default_service_formatter(
service_url,
width,
height,
background,
foreground,
options
):
image_tmp = '{service_url}/{width}x{height}/{background}/{foreground}/'
image_url = image_tmp.format(
service_url=service_url,
width=width,
height=height,
background=background,
foreground=foreground
)
if options:
image_url += '?' + urlencode(options)
return image_url | Generate an image URL for a service |
def exec_command(self, command):
m = Message()
m.add_byte(cMSG_CHANNEL_REQUEST)
m.add_int(self.remote_chanid)
m.add_string("exec")
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event() | Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
:param str command: a shell command to execute.
:raises:
`.SSHException` -- if the request was rejected or the channel was
closed |
def get_mesos_task(task_name):
tasks = get_mesos_tasks()
if tasks is not None:
for task in tasks:
if task['name'] == task_name:
return task
return None | Get a mesos task with a specific task name |
def op_count(cls, crawler, stage=None):
if stage:
total_ops = conn.get(make_key(crawler, stage))
else:
total_ops = conn.get(make_key(crawler, "total_ops"))
return unpack_int(total_ops) | Total operations performed for this crawler |
def scan(self):
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj) | Trigger the wifi interface to scan. |
def _get_process_cwd(pid):
cmd = 'lsof -a -p {0} -d cwd -Fn'.format(pid)
data = common.shell_process(cmd)
if not data is None:
lines = str(data).split('\n')
if len(lines) > 1:
return lines[1][1:] or None
return None | Returns the working directory for the provided process identifier.
`pid`
System process identifier.
Returns string or ``None``.
Note this is used as a workaround, since `psutil` isn't consistent on
being able to provide this path in all cases, especially MacOS X. |
def key(self, *args, _prefix=None, **kwargs):
if kwargs:
raise NotImplementedError(
'kwarg cache keys not implemented')
return (_prefix,) + tuple(args) | Get the cache key for the given function args.
Kwargs:
prefix: A constant to prefix to the key. |
def ScriptHash(self):
if self._scriptHash is None:
self._scriptHash = Crypto.ToScriptHash(self.Script, unhex=False)
return self._scriptHash | Get the script hash.
Returns:
UInt160: |
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK) | Returns whether a path names an existing executable file. |
def get_variable_for_feature(self, feature_key, variable_key):
feature = self.feature_key_map.get(feature_key)
if not feature:
self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key)
return None
if variable_key not in feature.variables:
self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key)
return None
return feature.variables.get(variable_key) | Get the variable with the given variable key for the given feature.
Args:
feature_key: The key of the feature for which we are getting the variable.
variable_key: The key of the variable we are getting.
Returns:
Variable with the given key in the given variation. |
def _is_potential_multi_index(columns):
return (len(columns) and not isinstance(columns, MultiIndex) and
all(isinstance(c, tuple) for c in columns)) | Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
Returns
-------
boolean : Whether or not columns could become a MultiIndex |
def pivot(self, index, **kwargs):
try:
df = self._pivot(index, **kwargs)
return pd.pivot_table(self.df, index=kwargs["index"], **kwargs)
except Exception as e:
self.err(e, "Can not pivot dataframe") | Pivots a dataframe |
def _rotate(degrees:uniform):
"Rotate image by `degrees`."
angle = degrees * math.pi / 180
return [[cos(angle), -sin(angle), 0.],
[sin(angle), cos(angle), 0.],
[0. , 0. , 1.]] | Rotate image by `degrees`. |
def parse_profile_from_hcard(hcard: str, handle: str):
from federation.entities.diaspora.entities import DiasporaProfile
doc = html.fromstring(hcard)
profile = DiasporaProfile(
name=_get_element_text_or_none(doc, ".fn"),
image_urls={
"small": _get_element_attr_or_none(doc, ".entity_photo_small .photo", "src"),
"medium": _get_element_attr_or_none(doc, ".entity_photo_medium .photo", "src"),
"large": _get_element_attr_or_none(doc, ".entity_photo .photo", "src"),
},
public=True if _get_element_text_or_none(doc, ".searchable") == "true" else False,
id=handle,
handle=handle,
guid=_get_element_text_or_none(doc, ".uid"),
public_key=_get_element_text_or_none(doc, ".key"),
)
return profile | Parse all the fields we can from a hCard document to get a Profile.
:arg hcard: HTML hcard document (str)
:arg handle: User handle in username@domain.tld format
:returns: ``federation.entities.diaspora.entities.DiasporaProfile`` instance |
def get_title(self, obj):
search_title = self.get_model_config_value(obj, 'search_title')
if not search_title:
return super().get_title(obj)
return search_title.format(**obj.__dict__) | Set search entry title for object |
def access_token(self):
access_token = self.session.get(self.access_token_key)
if access_token:
if not self.expires_at:
return access_token
timestamp = time.time()
if self.expires_at - timestamp > 60:
return access_token
self.fetch_access_token()
return self.session.get(self.access_token_key) | WeChat access token |
def descendants(self, unroll=False, skip_not_present=True, in_post_order=False):
for child in self.children(unroll, skip_not_present):
if in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order)
yield child
if not in_post_order:
yield from child.descendants(unroll, skip_not_present, in_post_order) | Returns an iterator that provides nodes for all descendants of this
component.
Parameters
----------
unroll : bool
If True, any children that are arrays are unrolled.
skip_not_present : bool
If True, skips children whose 'ispresent' property is set to False
in_post_order : bool
If True, descendants are walked using post-order traversal
(children first) rather than the default pre-order traversal
(parents first).
Yields
------
:class:`~Node`
All descendant nodes of this component |
def get_env(self):
env = super(KubeSpawner, self).get_env()
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env | Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env |
def error_wrapper(fn, error_class):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
six.reraise(error_class, error_class(e), sys.exc_info()[2])
return wrapper | Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch. |
def collect_analysis(using):
python_analysis = defaultdict(dict)
for index in registry.indexes_for_connection(using):
python_analysis.update(index._doc_type.mapping._collect_analysis())
return stringer(python_analysis) | generate the analysis settings from Python land |
def _mock_request(self, **kwargs):
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation) | A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined. |
def _deep_different(left, right, entry):
left = chunk_zip_entry(left, entry)
right = chunk_zip_entry(right, entry)
for ldata, rdata in zip_longest(left, right):
if ldata != rdata:
return True
return False | checks that entry is identical between ZipFile instances left and
right |
def _read_data_type_2(self, length):
if length != 20:
raise ProtocolError(f'{self.alias}: [Typeno 2] invalid format')
_resv = self._read_fileng(4)
_home = self._read_fileng(16)
data = dict(
ip=ipaddress.ip_address(_home),
)
return data | Read IPv6-Route Type 2 data.
Structure of IPv6-Route Type 2 data [RFC 6275]:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Home Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 route.next Next Header
1 8 route.length Header Extensive Length
2 16 route.type Routing Type
3 24 route.seg_left Segments Left
4 32 - Reserved
8 64 route.ip Home Address |
def add_login_attempt_to_db(request, login_valid,
get_username=get_username_from_request,
username=None):
if not config.STORE_ACCESS_ATTEMPTS:
return
username = username or get_username(request)
user_agent = request.META.get('HTTP_USER_AGENT', '<unknown>')[:255]
ip_address = get_ip(request)
http_accept = request.META.get('HTTP_ACCEPT', '<unknown>')
path_info = request.META.get('PATH_INFO', '<unknown>')
if config.USE_CELERY:
from .tasks import add_login_attempt_task
add_login_attempt_task.delay(user_agent, ip_address, username,
http_accept, path_info, login_valid)
else:
store_login_attempt(user_agent, ip_address, username,
http_accept, path_info, login_valid) | Create a record for the login attempt If using celery call celery
task, if not, call the method normally |
def destroy(self, id):
path = '{}/destroy'.format(id)
url = utils.urljoin(self.url, path)
response = self.session.post(url)
return response.ok | Destroy a group.
:param str id: a group ID
:return: ``True`` if successful
:rtype: bool |
def _idx_table_by_num(tables):
logger_jsons.info("enter idx_table_by_num")
_tables = []
for name, table in tables.items():
try:
tmp = _idx_col_by_num(table)
_tables.append(tmp)
except Exception as e:
logger_jsons.error("idx_table_by_num: {}".format(e))
logger_jsons.info("exit idx_table_by_num")
return _tables | Switch tables to index-by-number
:param dict tables: Metadata
:return list _tables: Metadata |
def formatBodyNode(root,path):
body = root
body.name = "body"
body.weight = calcFnWeight(body)
body.path = path
body.pclass = None
return body | Format the root node for use as the body node. |
def _update_seek(self, offset, whence):
with self._seek_lock:
if whence == SEEK_SET:
self._seek = offset
elif whence == SEEK_CUR:
self._seek += offset
elif whence == SEEK_END:
self._seek = offset + self._size
else:
raise ValueError('whence value %s unsupported' % whence)
return self._seek | Update seek value.
Args:
offset (int): Offset.
whence (int): Whence.
Returns:
int: Seek position. |
def tags_with_text(xml, tags=None):
if tags is None:
tags = []
for element in xml:
if element.text is not None:
tags.append(element)
elif len(element) > 0:
tags_with_text(element, tags)
else:
message = 'Unknown XML structure: {}'.format(element)
raise ValueError(message)
return tags | Return a list of tags that contain text retrieved recursively from an
XML tree. |
def get_protein_substitution_language() -> ParserElement:
parser_element = psub_tag + nest(
amino_acid(PSUB_REFERENCE),
ppc.integer(PSUB_POSITION),
amino_acid(PSUB_VARIANT),
)
parser_element.setParseAction(_handle_psub)
return parser_element | Build a protein substitution parser. |
def title(self):
tmp = c.namemap_lookup(self.id) if c.namemap_lookup(self.id) is not None else self._title
return secure_filename(tmp) | get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used. |
def get_my_credits(self, access_token=None, user_id=None):
if access_token:
self.req.credential.set_token(access_token)
if user_id:
self.req.credential.set_user_id(user_id)
if not self.check_credentials():
raise CredentialsError('credentials invalid')
else:
user_data_url = '/users/' + self.req.credential.get_user_id()
user_data = self.req.get(user_data_url)
if "credit" in user_data:
if "promotionalCodesUsed" in user_data["credit"]:
del user_data["credit"]["promotionalCodesUsed"]
if "lastRefill" in user_data["credit"]:
del user_data["credit"]["lastRefill"]
return user_data["credit"]
return {} | Get the credits by user to use in the QX Platform |
def residuals(self, pars, x, y, order):
return y - self.fourier_series(pars, x, order) | Residual of Fourier Series.
Parameters
----------
pars : array_like
Fourier series parameters.
x : array_like
An array of date.
y : array_like
An array of true values to fit.
order : int
An order of Fourier Series. |
def token(cls: Type[CLTVType], timestamp: int) -> CLTVType:
cltv = cls()
cltv.timestamp = str(timestamp)
return cltv | Return CLTV instance from timestamp
:param timestamp: Timestamp
:return: |
def list_templates(self) -> List[str]:
result = set()
for loader in self._loaders():
for template in loader.list_templates():
result.add(str(template))
return list(result) | Returns a list of all avilable templates in environment.
This considers the loaders on the :attr:`app` and blueprints. |
def read_bytes(self, start_position: int, size: int) -> bytes:
return bytes(self._bytes[start_position:start_position + size]) | Read a value from memory and return a fresh bytes instance |
def fillna(self, value):
if not is_scalar(value):
raise TypeError('Value to replace with is not a valid scalar')
return Series(weld_replace(self.weld_expr,
self.weld_type,
default_missing_data_literal(self.weld_type),
value),
self.index,
self.dtype,
self.name) | Returns Series with missing values replaced with value.
Parameters
----------
value : {int, float, bytes, bool}
Scalar value to replace missing values with.
Returns
-------
Series
With missing values replaced. |
def point_dist2(p1, p2):
v = vector(p1, p2)
return np.dot(v, v) | compute the square of the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The square of the euclidian distance between the points. |
async def delete_pattern(self, pattern, count=None):
cursor = '0'
count_deleted = 0
while cursor != 0:
cursor, identities = await self.client.scan(
cursor=cursor, match=pattern, count=count
)
count_deleted += await self.client.delete(*identities)
return count_deleted | delete cache according to pattern in redis,
delete `count` keys each time |
def do_phonefy(self, query, **kwargs):
results = []
test = self.check_phonefy(query, kwargs)
if test:
r = {
"type": "i3visio.phone",
"value": self.platformName + " - " + query,
"attributes": []
}
try:
aux = {
"type": "i3visio.uri",
"value": self.createURL(query, mode="phonefy"),
"attributes": []
}
r["attributes"].append(aux)
except:
pass
aux = {
"type": "i3visio.platform",
"value": self.platformName,
"attributes": []
}
r["attributes"].append(aux)
r["attributes"] += self.process_phonefy(test)
results.append(r)
return results | Verifying a phonefy query in this platform.
This might be redefined in any class inheriting from Platform.
Args:
-----
query: The element to be searched.
Return:
-------
A list of elements to be appended. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.