code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def evaluate_at(self, eval_at, testcases, mode=None):
self.eval_at = eval_at
self.log.eval_at = eval_at
if mode is None:
if self.context_mode is None or (self.context_mode.has_key('choose_m') and self.context_mode['choose_m']):
mode = 'inverse'
else:
... | Sets the evaluation interation indices.
:param list eval_at: iteration indices where an evaluation should be performed
:param numpy.array testcases: testcases used for evaluation |
def relpath(self, path, start=None):
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_se... | We mostly rely on the native implementation and adapt the
path separator. |
def _parse_mods(mods):
if isinstance(mods, six.string_types):
mods = [item.strip() for item in mods.split(',') if item.strip()]
return mods | Parse modules. |
def _handle_requests_params(self, kwargs):
requests_params = kwargs.pop('requests_params', {})
for param in requests_params:
if param in kwargs:
error_message = 'Requests Parameter %r collides with a load'\
' parameter of the same name.' % param
... | Validate parameters that will be passed to the requests verbs.
This method validates that there is no conflict in the names of the
requests_params passed to the function and the other kwargs. It also
ensures that the required request parameters for the object are
added to the request p... |
def record(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Timestamp not initialized')
tmp = ((1 << 16) - 1) & self.tz
newtz = tmp & 0xff
newtimetype = ((tmp >> 8) & 0x0f) | (self.timetype << 4)
return struct.pack(self.FMT, newtz, new... | A method to generate the string representing this UDF Timestamp.
Parameters:
None.
Returns:
A string representing this UDF Timestamp. |
def _remove_overlaps(in_file, out_dir, data):
out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file)))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
with open(in_file) as in_handle:
... | Remove regions that overlap with next region, these result in issues with PureCN. |
def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Compare the value monitored to its best and maybe reduce lr."
current = self.get_monitor_value()
if current is None: return
if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0
else:
... | Compare the value monitored to its best and maybe reduce lr. |
def parse_dsn(dsn):
parsed_dsn = urlparse(dsn)
parsed_path = parse_path(parsed_dsn.path)
return {
'scheme': parsed_dsn.scheme,
'sender': parsed_dsn.username,
'token': parsed_dsn.password,
'domain': parsed_dsn.hostname,
'port': parsed_dsn.port or 80,
'version':... | Parse dsn string. |
def _key_to_address(key):
key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1)
key_parts.extend([''] * (_MAX_KEY_PARTS - len(key_parts)))
return SETTINGS_NAMESPACE + ''.join(_short_hash(x) for x in key_parts) | Creates the state address for a given setting key. |
def flightmode_colours():
from MAVProxy.modules.lib.grapher import flightmode_colours
mapping = {}
idx = 0
for (mode,t0,t1) in flightmodes:
if not mode in mapping:
mapping[mode] = flightmode_colours[idx]
idx += 1
if idx >= len(flightmode_colours):
... | return mapping of flight mode to colours |
def import_task(self, img, cont, img_format=None, img_name=None):
return self._tasks_manager.create("import", img=img, cont=cont,
img_format=img_format, img_name=img_name) | Creates a task to import the specified image from the swift container
named in the 'cont' parameter. The new image will be named the same as
the object in the container unless you specify a value for the
'img_name' parameter.
By default it is assumed that the image is in 'vhd' format; i... |
def find_user(self, username=None, email=None):
if username:
return (
self.get_session.query(self.user_model)
.filter(func.lower(self.user_model.username) == func.lower(username))
.first()
)
elif email:
return (
... | Finds user by username or email |
def get(self, section, option, default = None):
if self.has_section(section):
try:
return self.config[section][option].get('value', None)
except KeyError:
if default == None:
raise NoOptionError(option)
else:
return default
else:
raise NoSectionError(section) | Returns the option's value converted into it's intended type. If default is specified, return that on failure, else raise NoOptionError. |
def adapt_files(solver):
print("adapting {0}'s files".format(solver))
root = os.path.join('solvers', solver)
for arch in to_extract[solver]:
arch = os.path.join(root, arch)
extract_archive(arch, solver, put_inside=True)
for fnames in to_move[solver]:
old = os.path.join(root, fnam... | Rename and remove files whenever necessary. |
def fillna(self, value=None, method=None, limit=None):
if ((method is None and value is None) or
(method is not None and value is not None)):
raise ValueError("Must specify one of 'method' or 'value'.")
elif method is not None:
msg = "fillna with 'method' requires... | Fill missing values with `value`.
Parameters
----------
value : scalar, optional
method : str, optional
.. warning::
Using 'method' will result in high memory use,
as all `fill_value` methods will be converted to
an in-memory nd... |
def _get_lrs(self, indices):
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_... | Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices. |
def huffman_encode(cls, s):
i = 0
ibl = 0
for c in s:
val, bl = cls._huffman_encode_char(c)
i = (i << bl) + val
ibl += bl
padlen = 8 - (ibl % 8)
if padlen != 8:
val, bl = cls._huffman_encode_char(EOS())
i = (i << padlen)... | huffman_encode returns the bitstring and the bitlength of the
bitstring representing the string provided as a parameter
@param str s: the string to encode
@return (int, int): the bitstring of s and its bitlength
@raise AssertionError |
def _call(self, x, out=None):
if out is None:
out = x[self.index].copy()
else:
out.assign(x[self.index])
return out | Project ``x`` onto the subspace. |
def setup_prj_page(self, ):
self.prj_seq_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.prj_atype_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
self.prj_dep_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)... | Create and set the model on the project page
:returns: None
:rtype: None
:raises: None |
def to_dict(self):
mydict = {'id': self.id, 'name': self.name,
'description': self.description,
'kbtype': self.kbtype}
if self.kbtype == 'd':
mydict.update((self.kbdefs.to_dict() if self.kbdefs else {}) or {})
return mydict | Return a dict representation of KnwKB. |
def delete_insight(self, project_key, insight_id):
projectOwner, projectId = parse_dataset_key(project_key)
try:
self._insights_api.delete_insight(projectOwner,
projectId,
insight_id)
except _... | Delete an existing insight.
:params project_key: Project identifier, in the form of
projectOwner/projectId
:type project_key: str
:params insight_id: Insight unique id
:type insight_id: str
:raises RestApiException: If a server error occurs
Examples
----... |
def dump(obj, fp):
encoder = ArffEncoder()
generator = encoder.iter_encode(obj)
last_row = next(generator)
for row in generator:
fp.write(last_row + u'\n')
last_row = row
fp.write(last_row)
return fp | Serialize an object representing the ARFF document to a given file-like
object.
:param obj: a dictionary.
:param fp: a file-like object. |
def power(base, exp):
return _ufunc_helper(
base,
exp,
op.broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar) | Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the ar... |
def gradient(poly):
return differential(poly, chaospy.poly.collection.basis(1, 1, poly.dim)) | Gradient of a polynomial.
Args:
poly (Poly) : polynomial to take gradient of.
Returns:
(Poly) : The resulting gradient.
Examples:
>>> q0, q1, q2 = chaospy.variable(3)
>>> poly = 2*q0 + q1*q2
>>> print(chaospy.gradient(poly))
[2, q2, q1] |
async def get_alarms():
async with aiohttp.ClientSession() as session:
ghlocalapi = Alarms(LOOP, session, IPADDRESS)
await ghlocalapi.get_alarms()
print("Alarms:", ghlocalapi.alarms) | Get alarms and timers from GH. |
def winddir_text(pts):
"Convert wind direction from 0..15 to compass point text"
global _winddir_text_array
if pts is None:
return None
if not isinstance(pts, int):
pts = int(pts + 0.5) % 16
if not _winddir_text_array:
_ = pywws.localisation.translation.ugettext
_wind... | Convert wind direction from 0..15 to compass point text |
def _format_metric_name(self, m_name, cfunc):
try:
aggr = CFUNC_TO_AGGR[cfunc]
except KeyError:
aggr = cfunc.lower()
try:
m_name = CACTI_TO_DD[m_name]
if aggr != 'avg':
m_name += '.{}'.format(aggr)
return m_name
... | Format a cacti metric name into a Datadog-friendly name |
def fit_gaussian(x, y, yerr, p0):
try:
popt, pcov = curve_fit(gaussian, x, y, sigma=yerr, p0=p0, absolute_sigma=True)
except RuntimeError:
return [0],[0]
return popt, pcov | Fit a Gaussian to the data |
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]:
f1_scores = []
for gold_index, gold_item in enumerate(gold):
max_f1 = 0.0
max_index = None
best_alignment: Tuple[Set[str], Set[str]] = (set(), set())
if predicted:
for pred_index, pred_it... | Takes gold and predicted answer sets and first finds a greedy 1-1 alignment
between them and gets maximum metric values over all the answers |
def _on_report(_loop, adapter, conn_id, report):
conn_string = None
if conn_id is not None:
conn_string = adapter._get_property(conn_id, 'connection_string')
if isinstance(report, BroadcastReport):
adapter.notify_event_nowait(conn_string, 'broadcast', report)
elif conn_string is not None... | Callback when a report is received. |
def calc_progress(self, completed_count, total_count):
self.logger.debug(
"calc_progress(%s, %s)",
completed_count,
total_count,
)
current_time = time.time()
time_spent = current_time - self.start_time
self.logger.debug("Progress time spent: %s... | Calculate the percentage progress and estimated remaining time based on
the current number of items completed of the total.
Returns a tuple of ``(percentage_complete, seconds_remaining)``. |
def createService(self, createServiceParameter,
description=None,
tags="Feature Service",
snippet=None):
url = "%s/createService" % self.location
val = createServiceParameter.value
params = {
"f" : "json",
... | The Create Service operation allows users to create a hosted
feature service. You can use the API to create an empty hosted
feaure service from feature service metadata JSON.
Inputs:
createServiceParameter - create service object |
def _get_best_prediction(self, record, train=True):
if not self.trees:
return
best = (+1e999999, None)
for tree in self.trees:
best = min(best, (tree.mae.mean, tree))
_, best_tree = best
prediction, tree_mae = best_tree.predict(record, train=train)
... | Gets the prediction from the tree with the lowest mean absolute error. |
def check_in(choices, **params):
for p in params:
if params[p] not in choices:
raise ValueError(
"{} value {} not recognized. Choose from {}".format(
p, params[p], choices)) | Checks parameters are in a list of allowed parameters
Parameters
----------
choices : array-like, accepted values
params : object
Named arguments, parameters to be checked
Raises
------
ValueError : unacceptable choice of parameters |
def write_frames(self, frames_out):
self.check_for_errors()
self._connection.write_frames(self.channel_id, frames_out) | Write multiple pamqp frames from the current channel.
:param list frames_out: A list of pamqp frames.
:return: |
def connections(self):
conn = lambda x: str(x).replace('connection:', '')
return [conn(name) for name in self.sections()] | Returns all of the loaded connections names as a list |
def circuit_to_pyquil(circuit: Circuit) -> pyquil.Program:
prog = pyquil.Program()
for elem in circuit.elements:
if isinstance(elem, Gate) and elem.name in QUIL_GATES:
params = list(elem.params.values()) if elem.params else []
prog.gate(elem.name, params, elem.qubits)
eli... | Convert a QuantumFlow circuit to a pyQuil program |
def getBounds(self, tzinfo=None):
if self.resolution >= datetime.timedelta(days=1) \
and tzinfo is not None:
time = self._time.replace(tzinfo=tzinfo)
else:
time = self._time
return (
min(self.fromDatetime(time), self.fromDatetime(self._time)),
... | Return a pair describing the bounds of self.
This returns a pair (min, max) of Time instances. It is not quite the
same as (self, self + self.resolution). This is because timezones are
insignificant for instances with a resolution greater or equal to 1
day.
To illustrate the pr... |
def Analyze(self, hashes):
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = interface.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses | Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper. |
def check_url (aggregate):
while True:
try:
aggregate.urlqueue.join(timeout=30)
break
except urlqueue.Timeout:
aggregate.remove_stopped_threads()
if not any(aggregate.get_check_threads()):
break | Helper function waiting for URL queue. |
def save(self, t, base=0, heap=False):
c, k = _keytuple(t)
if k and k not in _typedefs:
_typedefs[k] = self
if c and c not in _typedefs:
if t.__module__ in _builtin_modules:
k = _kind_ignored
else:
k = self.k... | Save this typedef plus its class typedef. |
def cleanup_virtualenv(bare=True):
if not bare:
click.echo(crayons.red("Environment creation aborted."))
try:
vistir.path.rmtree(project.virtualenv_location)
except OSError as e:
click.echo(
"{0} An error occurred while removing {1}!".format(
crayons.red("... | Removes the virtualenv directory from the system. |
def calendar_dates(self, val):
self._calendar_dates = val
if val is not None and not val.empty:
self._calendar_dates_g = self._calendar_dates.groupby(
["service_id", "date"]
)
else:
self._calendar_dates_g = None | Update ``self._calendar_dates_g``
if ``self.calendar_dates`` changes. |
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, six.string_types):
... | Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The fragment needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
- "ppa... |
def _check_certificate(self):
if (self.file_name.startswith("jdk-") and self.repo == "sbo" and
self.downder == "wget"):
certificate = (' --no-check-certificate --header="Cookie: '
'oraclelicense=accept-securebackup-cookie"')
self.msg.template(78... | Check for certificates options for wget |
def _filter_data(self, pattern):
removed = []
filtered = []
for param in self.data:
if not param[0].startswith(pattern):
filtered.append(param)
else:
removed.append(param)
self.data = filtered
return removed | Removes parameters which match the pattern from the config data |
def context(self, name):
data = self._context(name)
context = data.get("context")
if context:
return context
assert self.load_path
context_path = os.path.join(self.load_path, "contexts", "%s.rxt" % name)
context = ResolvedContext.load(context_path)
dat... | Get a context.
Args:
name (str): Name to store the context under.
Returns:
`ResolvedContext` object. |
def get_following(self, auth_secret):
result = {pytwis_constants.ERROR_KEY: None}
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
following_zset_key... | Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
... |
def get_copy_folder_location():
copy_settings_path = 'Library/Application Support/Copy Agent/config.db'
copy_home = None
copy_settings = os.path.join(os.environ['HOME'], copy_settings_path)
if os.path.isfile(copy_settings):
database = sqlite3.connect(copy_settings)
if database:
... | Try to locate the Copy folder.
Returns:
(str) Full path to the current Copy folder |
def iq_handler(type_, payload_cls, *, with_send_reply=False):
if (not hasattr(payload_cls, "TAG") or
(aioxmpp.IQ.CHILD_MAP.get(payload_cls.TAG) is not
aioxmpp.IQ.payload.xq_descriptor) or
payload_cls not in aioxmpp.IQ.payload._classes):
raise ValueError(
"{!r... | Register the decorated function or coroutine function as IQ request
handler.
:param type_: IQ type to listen for
:type type_: :class:`~.IQType`
:param payload_cls: Payload XSO class to listen for
:type payload_cls: :class:`~.XSO` subclass
:param with_send_reply: Whether to pass a function to se... |
def get_splitext_basename(path):
basename = foundations.common.get_first_item(os.path.splitext(os.path.basename(os.path.normpath(path))))
LOGGER.debug("> Splitext basename: '{0}'.".format(basename))
return basename | Gets the basename of a path without its extension.
Usage::
>>> get_splitext_basename("/Users/JohnDoe/Documents/Test.txt")
u'Test'
:param path: Path to extract the basename without extension.
:type path: unicode
:return: Splitext basename.
:rtype: unicode |
def as_statements(lines: Iterator[str]) -> Iterator[str]:
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield ' '.join(parts)
p... | Create an iterator that transforms lines into sql statements.
Statements within the lines must end with ";"
The last statement will be included even if it does not end in ';'
>>> list(as_statements(['select * from', '-- comments are filtered', 't;']))
['select * from t']
>>> list(as_statements(['... |
def retrieve(customer_id):
http_client = HttpClient()
response, __ = http_client.get(routes.url(routes.CUSTOMER_RESOURCE, resource_id=customer_id))
return resources.Customer(**response) | Retrieve a customer from its id.
:param customer_id: The customer id
:type customer_id: string
:return: The customer resource
:rtype: resources.Customer |
def read(self, size):
raw_read = super(USBRawDevice, self).read
received = bytearray()
while not len(received) >= size:
resp = raw_read(self.RECV_CHUNK)
received.extend(resp)
return bytes(received) | Read raw bytes from the instrument.
:param size: amount of bytes to be sent to the instrument
:type size: integer
:return: received bytes
:return type: bytes |
def _start_thread(self):
self._stopping_event = Event()
self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,))
self._enqueueing_thread.start() | Start an enqueueing thread. |
def _createEmptyJobGraphForJob(self, jobStore, command=None, predecessorNumber=0):
self._config = jobStore.config
return jobStore.create(JobNode.fromJob(self, command=command,
predecessorNumber=predecessorNumber)) | Create an empty job for the job. |
def docsfor(self, rel):
prefix, _rel = rel.split(':')
if prefix in self.curies:
doc_url = uritemplate.expand(self.curies[prefix], {'rel': _rel})
else:
doc_url = rel
print('opening', doc_url)
webbrowser.open(doc_url) | Obtains the documentation for a link relation. Opens in a webbrowser
window |
def search_suggestion(self, query):
response = self._call(
mc_calls.QuerySuggestion,
query
)
suggested_queries = response.body.get('suggested_queries', [])
return [
suggested_query['suggestion_string']
for suggested_query in suggested_queries
] | Get search query suggestions for query.
Parameters:
query (str): Search text.
Returns:
list: Suggested query strings. |
def remove(self, oid):
uri = self._resources[oid].uri
del self._resources[oid]
del self._hmc.all_resources[uri] | Remove a faked resource from this manager.
Parameters:
oid (string):
The object ID of the resource (e.g. value of the 'object-uri'
property). |
def _handle_error(self, data, params):
error = data.get('error', 'API call failed')
mode = params.get('mode')
raise SabnzbdApiException(error, mode=mode) | Handle an error response from the SABnzbd API |
def _setup_xauth(self):
handle, filename = tempfile.mkstemp(prefix='PyVirtualDisplay.',
suffix='.Xauthority')
self._xauth_filename = filename
os.close(handle)
self._old_xauth = {}
self._old_xauth['AUTHFILE'] = os.getenv('AUTHFILE')
... | Set up the Xauthority file and the XAUTHORITY environment variable. |
def get_qemu_info(path, backing_chain=False, fail_on_error=True):
cmd = ['qemu-img', 'info', '--output=json', path]
if backing_chain:
cmd.insert(-1, '--backing-chain')
result = run_command_with_validation(
cmd, fail_on_error, msg='Failed to get info for {}'.format(path)
)
return json... | Get info on a given qemu disk
Args:
path(str): Path to the required disk
backing_chain(boo): if true, include also info about
the image predecessors.
Return:
object: if backing_chain == True then a list of dicts else a dict |
def copy(self):
if self._global_condition is not None:
raise SimStateError("global condition was not cleared before state.copy().")
c_plugins = self._copy_plugins()
state = SimState(project=self.project, arch=self.arch, plugins=c_plugins, options=self.options.copy(),
... | Returns a copy of the state. |
def _match_exists(self, searchable):
position_searchable = self.get_position_searchable()
for pos,val in position_searchable.iteritems():
if val == searchable:
return pos
return False | Make sure the searchable description doesn't already exist |
def extract_objects(self, fname, type_filter=None):
objects = []
if fname in self.object_cache:
objects = self.object_cache[fname]
else:
with io.open(fname, 'rt', encoding='utf-8') as fh:
text = fh.read()
objects = parse_verilog(text)
self.object_cache[fname] = objects
... | Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file. |
def get_synth_input_fn(height, width, num_channels, num_classes):
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
images = tf.zeros((batch_size, height, width, num_channels), tf.float32)
labels = tf.zeros((batch_size, num_classes), tf.int32)
return tf.data.Dataset.from_tensors((images, l... | Returns an input function that returns a dataset with zeroes.
This is useful in debugging input pipeline performance, as it removes all
elements of file reading and image preprocessing.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used t... |
def bootstrap(score_objs, n_boot=1000):
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)
return all_samples.sum(axis=1) | Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): ... |
def _get_calling_module(self):
for frame in inspect.stack():
mod = inspect.getmodule(frame[0])
logger.debug(f'calling module: {mod}')
if mod is not None:
mod_name = mod.__name__
if mod_name != __name__:
return mod | Get the last module in the call stack that is not this module or ``None`` if
the call originated from this module. |
def lookup(self, req, parent, name):
self.reply_err(req, errno.ENOENT) | Look up a directory entry by name and get its attributes.
Valid replies:
reply_entry
reply_err |
def get_comment_create_data(self):
user_model = get_user_model()
return dict(
content_type=ContentType.objects.get_for_model(self.target_object),
object_pk=force_text(self.target_object._get_pk_val()),
text=self.cleaned_data["text"],
user=user_model.object... | Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model. |
def _process_state_change_events():
sdp_state = SDPState()
service_states = get_service_state_list()
state_events = sdp_state.get_event_queue(subscriber=__service_name__)
state_is_off = sdp_state.current_state == 'off'
counter = 0
while True:
time.sleep(0.1)
if not state_is_off:
... | Process events relating to the overall state of SDP.
This function starts and event loop which continually checks for
and responds to SDP state change events. |
def get_object(self, identifier, include_inactive=False):
query = {'_id': identifier}
if not include_inactive:
query['active'] = True
cursor = self.collection.find(query)
if cursor.count() > 0:
return self.from_dict(cursor.next())
else:
return ... | Retrieve object with given identifier from the database.
Parameters
----------
identifier : string
Unique object identifier
include_inactive : Boolean
Flag indicating whether inactive (i.e., deleted) object should be
included in the search (i.e., retu... |
def require(self, lock, guard_func, *guard_args, **guard_kw):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
if self.granted(lock):
self.msg('Granted {}'.format(lock))
return f(*args, **kw)
if guard_func(*guar... | Decorate a function to be run only when a lock is acquired.
The lock is requested if the guard function returns True.
The decorated function is called if the lock has been granted. |
def get_resourcegroupitems(group_id, scenario_id, **kwargs):
rgi_qry = db.DBSession.query(ResourceGroupItem).\
filter(ResourceGroupItem.scenario_id==scenario_id)
if group_id is not None:
rgi_qry = rgi_qry.filter(ResourceGroupItem.group_id==group_id)
rgi = rgi_qry.all()
return rgi | Get all the items in a group, in a scenario. If group_id is None, return
all items across all groups in the scenario. |
def publish_topology_opened(self, topology_id):
event = TopologyOpenedEvent(topology_id)
for subscriber in self.__topology_listeners:
try:
subscriber.opened(event)
except Exception:
_handle_exception() | Publish a TopologyOpenedEvent to all topology listeners.
:Parameters:
- `topology_id`: A unique identifier for the topology this server
is a part of. |
def uncompress_file(inputfile, filename):
zipfile = gzip.GzipFile(fileobj=inputfile, mode="rb")
try:
outputfile = create_spooled_temporary_file(fileobj=zipfile)
finally:
zipfile.close()
new_basename = os.path.basename(filename).replace('.gz', '')
return outputfile, new_basename | Uncompress this file using gzip and change its name.
:param inputfile: File to compress
:type inputfile: ``file`` like object
:param filename: File's name
:type filename: ``str``
:returns: Tuple with file and new file's name
:rtype: :class:`tempfile.SpooledTemporaryFile`, ``str`` |
def load_api_folder(api_folder_path):
api_definition_mapping = {}
api_items_mapping = load_folder_content(api_folder_path)
for api_file_path, api_items in api_items_mapping.items():
if isinstance(api_items, list):
for api_item in api_items:
key, api_dict = api_item.popite... | load api definitions from api folder.
Args:
api_folder_path (str): api files folder.
api file should be in the following format:
[
{
"api": {
"def": "api_login",
"request": {},
... |
def to_html(self, codebase):
body = ''
for section in ('params', 'options', 'exceptions'):
val = getattr(self, section)
if val:
body += '<h5>%s</h5>\n<dl class = "%s">%s</dl>' % (
printable(section), section,
'\n'.j... | Convert this `FunctionDoc` to HTML. |
def get_relations_cnt(self):
return cx.Counter([e.relation for es in self.exts for e in es]) | Get the set of all relations. |
def timdef(action, item, lenout, value=None):
action = stypes.stringToCharP(action)
item = stypes.stringToCharP(item)
lenout = ctypes.c_int(lenout)
if value is None:
value = stypes.stringToCharP(lenout)
else:
value = stypes.stringToCharP(value)
libspice.timdef_c(action, item, len... | Set and retrieve the defaults associated with calendar input strings.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/timdef_c.html
:param action: the kind of action to take "SET" or "GET".
:type action: str
:param item: the default item of interest.
:type item: str
:param lenout: the ... |
def is_obsoleted_by_pid(pid):
return d1_gmn.app.models.ScienceObject.objects.filter(
obsoleted_by__did=pid
).exists() | Return True if ``pid`` is referenced in the obsoletedBy field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain. |
def remove_prefix(self, prefix):
self._req('prefix remove %s' % prefix)
time.sleep(1)
self._req('netdataregister') | Remove network prefix. |
def register_computer_view(request):
if request.method == "POST":
form = ComputerRegistrationForm(request.POST)
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
obj.save()
messages.success(request, "Successfully ... | Register a computer. |
def random_possible_hands(self):
missing = self.missing_values()
other_dominoes = [d for p, h in enumerate(self.hands) for d in h if p != self.turn]
while True:
shuffled_dominoes = (d for d in random.sample(other_dominoes, len(other_dominoes)))
hands = []
for ... | Returns random possible hands for all players, given the information
known by the player whose turn it is. This information includes the
current player's hand, the sizes of the other players' hands, and the
moves played by every player, including the passes.
:return: a list of possible ... |
def number_of_interactions(self, u=None, v=None, t=None):
if t is None:
if u is None:
return int(self.size())
elif u is not None and v is not None:
if v in self._succ[u]:
return 1
else:
return 0
... | Return the number of interaction between two nodes at time t.
Parameters
----------
u, v : nodes, optional (default=all interaction)
If u and v are specified, return the number of interaction between
u and v. Otherwise return the total number of all interaction.
... |
def prepend_status(func):
@ft.wraps(func)
def wrapper(self, *args, **kwargs):
res = func(self, *args, **kwargs)
if self.status is not StepResult.UNSET:
res = "[{status}]".format(status=self.status.name) + res
return res
return wrapper | Prepends the output of `func` with the status. |
def get_nodes():
cfg_file = "/etc/nago/nago.ini"
config = ConfigParser.ConfigParser()
config.read(cfg_file)
result = {}
for section in config.sections():
if section in ['main']:
continue
token = section
node = Node(token)
for key, value in config.items(tok... | Returns all nodes in a list of dicts format |
def get_time_remaining_estimate(self):
if IOPSGetTimeRemainingEstimate is not None:
estimate = float(IOPSGetTimeRemainingEstimate())
if estimate == -1.0:
return common.TIME_REMAINING_UNKNOWN
elif estimate == -2.0:
return common.TIME_REMAINING_U... | In Mac OS X 10.7+
Uses IOPSGetTimeRemainingEstimate to get time remaining estimate.
In Mac OS X 10.6
IOPSGetTimeRemainingEstimate is not available.
If providing power source type is AC, returns TIME_REMAINING_UNLIMITED.
Otherwise looks through all power sources returned by IOPSG... |
def get_buffer( self ):
last_byte = self.current_bits if (self.bits_remaining < 8) else None
result = self.output
if last_byte is not None:
result = bytearray( result )
result.append( last_byte )
if self.bytes_reverse:
return bytes( reversed( result ) ... | Return a byte string containing the target as currently written. |
def _init_count_terms(self, annots):
gonotindag = set()
gocnts = self.gocnts
go2obj = self.go2obj
for terms in annots.values():
allterms = set()
for go_id in terms:
goobj = go2obj.get(go_id, None)
if goobj is not None:
... | Fills in the counts and overall aspect counts. |
def set(self, name, value=True):
"set a feature value"
setattr(self, name.lower(), value) | set a feature value |
def posargs_limiter(func, *args):
posargs = inspect.getargspec(func)[0]
length = len(posargs)
if inspect.ismethod(func):
length -= 1
if length == 0:
return func()
return func(*args[0:length]) | takes a function a positional arguments and sends only the number of
positional arguments the function is expecting |
def ssh_known_hosts_lines(application_name, user=None):
known_hosts_list = []
with open(known_hosts(application_name, user)) as hosts:
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list) | Return contents of known_hosts file for given application.
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str |
def multi_split(text, regexes):
def make_regex(s):
return re.compile(s) if isinstance(s, basestring) else s
regexes = [make_regex(r) for r in regexes]
piece_list = [text]
finished_pieces = set()
def apply_re(regex, piece_list):
for piece in piece_list:
if piece in finishe... | Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first... |
def normalize(path_name, override=None):
identity = identify(path_name, override=override)
new_path_name = os.path.normpath(os.path.expanduser(path_name))
return new_path_name, identity | Prepares a path name to be worked with. Path name must not be empty. This
function will return the 'normpath'ed path and the identity of the path.
This function takes an optional overriding argument for the identity.
ONLY PROVIDE OVERRIDE IF:
1) YOU AREWORKING WITH A FOLDER THAT HAS AN EXTENSION IN... |
def insert(self, resourcetype, source, insert_date=None):
caller = inspect.stack()[1][3]
if caller == 'transaction':
hhclass = 'Layer'
source = resourcetype
resourcetype = resourcetype.csw_schema
else:
hhclass = 'Service'
if resourcetyp... | Insert a record into the repository |
def fetch(self):
if self.data.type == self._manager.FOLDER_TYPE:
raise YagocdException("Can't fetch folder <{}>, only file!".format(self._path))
response = self._session.get(self.data.url)
return response.content | Method for getting artifact's content.
Could only be applicable for file type.
:return: content of the artifact. |
def get_table(ports):
table = PrettyTable(["Name", "Port", "Protocol", "Description"])
table.align["Name"] = "l"
table.align["Description"] = "l"
table.padding_width = 1
for port in ports:
table.add_row(port)
return table | This function returns a pretty table used to display the port results.
:param ports: list of found ports
:return: the table to display |
def login():
cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY']
redirect_url = create_cas_login_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGIN_ROUTE'],
flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True))... | This route has two purposes. First, it is used by the user
to login. Second, it is used by the CAS to respond with the
`ticket` after the user logs in successfully.
When the user accesses this url, they are redirected to the CAS
to login. If the login was successful, the CAS will respond to this
ro... |
def change_default_radii(def_map):
s = current_system()
rep = current_representation()
rep.radii_state.default = [def_map[t] for t in s.type_array]
rep.radii_state.reset() | Change the default radii |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.