code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def referenced_vertices(self):
if len(self.entities) == 0:
return np.array([], dtype=np.int64)
referenced = np.concatenate([e.points for e in self.entities])
referenced = np.unique(referenced.astype(np.int64))
return referenced | Which vertices are referenced by an entity.
Returns
-----------
referenced_vertices: (n,) int, indexes of self.vertices |
def drop(self, labels, dim=None):
if utils.is_scalar(labels):
labels = [labels]
if dim is None:
return self._drop_vars(labels)
else:
try:
index = self.indexes[dim]
except KeyError:
raise ValueError(
... | Drop variables or index labels from this dataset.
Parameters
----------
labels : scalar or list of scalars
Name(s) of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is No... |
def _slice_bam(in_bam, region, tmp_dir, config):
name_file = os.path.splitext(os.path.basename(in_bam))[0]
out_file = os.path.join(tmp_dir, os.path.join(tmp_dir, name_file + _to_str(region) + ".bam"))
sambamba = config_utils.get_program("sambamba", config)
region = _to_sambamba(region)
with file_tra... | Use sambamba to slice a bam region |
def trace_walker(module):
for name, function in inspect.getmembers(module, inspect.isfunction):
yield None, function
for name, cls in inspect.getmembers(module, inspect.isclass):
yield cls, None
for name, method in inspect.getmembers(cls, inspect.ismethod):
yield cls, method
... | Defines a generator used to walk into modules.
:param module: Module to walk.
:type module: ModuleType
:return: Class / Function / Method.
:rtype: object or object |
def resource_url(path):
url = QtCore.QUrl.fromLocalFile(path)
return str(url.toString()) | Get the a local filesystem url to a given resource.
.. versionadded:: 3.0
Note that in version 3.0 we removed the use of Qt Resource files in
favour of directly accessing on-disk resources.
:param path: Path to resource e.g. /home/timlinux/foo/bar.png
:type path: str
:return: A valid file ur... |
def _initialize_context(self, trace_header):
sampled = None
if not global_sdk_config.sdk_enabled():
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
... | Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments. |
def lst_avg(lst):
salt.utils.versions.warn_until(
'Neon',
'This results of this function are currently being rounded.'
'Beginning in the Salt Neon release, results will no longer be '
'rounded and this warning will be removed.',
stacklevel=3
)
if not isinstance(lst, c... | Returns the average value of a list.
.. code-block:: jinja
{% my_list = [1,2,3,4] -%}
{{ set my_list | avg }}
will be rendered as:
.. code-block:: yaml
2.5 |
def send_command(self, *args, **kwargs):
if len(args) >= 2:
expect_string = args[1]
else:
expect_string = kwargs.get("expect_string")
if expect_string is None:
expect_string = r"(OK|ERROR|Command not recognized\.)"
expect_string = self.... | Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined automatically.
command_string = command to execute
... |
def author_list(self):
author_list = [self.submitter] + \
[author for author in self.authors.all().exclude(pk=self.submitter.pk)]
return ",\n".join([author.get_full_name() for author in author_list]) | The list of authors als text, for admin submission list overview. |
def create_ellipse(width,height,angle):
angle = angle / 180.0 * np.pi
thetas = np.linspace(0,2*np.pi,200)
a = width / 2.0
b = height / 2.0
x = a*np.cos(thetas)*np.cos(angle) - b*np.sin(thetas)*np.sin(angle)
y = a*np.cos(thetas)*np.sin(angle) + b*np.sin(thetas)*np.cos(angle)
z = np.zeros(thet... | Create parametric ellipse from 200 points. |
def find_by_name(self, item_name, items_list, name_list=None):
if not name_list:
names = [item.name for item in items_list if item]
else:
names = name_list
if item_name in names:
ind = names.index(item_name)
return items_list[ind]
return Fa... | Return item from items_list with name item_name. |
def error(self, message, rofi_args=None, **kwargs):
rofi_args = rofi_args or []
args = ['rofi', '-e', message]
args.extend(self._common_args(allow_fullscreen=False, **kwargs))
args.extend(rofi_args)
self._run_blocking(args) | Show an error window.
This method blocks until the user presses a key.
Fullscreen mode is not supported for error windows, and if specified
will be ignored.
Parameters
----------
message: string
Error message to show. |
def close(self):
with self._lock:
for server in self._servers.values():
server.close()
self._description = self._description.reset()
self._update_servers()
self._opened = False
if self._publish_tp:
self._events.put((self._listen... | Clear pools and terminate monitors. Topology reopens on demand. |
def real_time_sequencing(self, availability, oauth, event, target_calendars=()):
args = {
'oauth': oauth,
'event': event,
'target_calendars': target_calendars
}
if availability:
options = {}
options['sequence'] = self.map_availability_s... | Generates an real time sequencing link to start the OAuth process with
an event to be automatically upserted
:param dict availability: - A dict describing the availability details for the event:
:sequence: An Array of dics representing sequences to find availability for
... |
def pause(msg="Press Enter to Continue..."):
print('\n' + Fore.YELLOW + msg + Fore.RESET, end='')
input() | press to continue |
def atlas_init(blockstack_opts, db, recover=False, port=None):
if port is None:
port = blockstack_opts['rpc_port']
atlas_state = None
if is_atlas_enabled(blockstack_opts):
atlas_seed_peers = filter( lambda x: len(x) > 0, blockstack_opts['atlas_seeds'].split(","))
atlas_blacklist = fi... | Start up atlas functionality |
def values(self):
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers] | Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list. |
def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, 'r') as f: lines = f.readlines()
return np.array([l.strip() for l in lines]) | Return `ndarray` of `str` of lines of text from `path`. |
def open_download_stream(self, file_id):
gout = GridOut(self._collection, file_id)
gout._ensure_file()
return gout | Opens a Stream from which the application can read the contents of
the stored file specified by file_id.
For example::
my_db = MongoClient().test
fs = GridFSBucket(my_db)
# get _id of file to read.
file_id = fs.upload_from_stream("test_file", "data I want to sto... |
def from_pdb(cls, path, forcefield=None, loader=PDBFile, strict=True, **kwargs):
pdb = loader(path)
box = kwargs.pop('box', pdb.topology.getPeriodicBoxVectors())
positions = kwargs.pop('positions', pdb.positions)
velocities = kwargs.pop('velocities', getattr(pdb, 'velocities', None))
... | Loads topology, positions and, potentially, velocities and vectors,
from a PDB or PDBx file
Parameters
----------
path : str
Path to PDB/PDBx file
forcefields : list of str
Paths to FFXML and/or FRCMOD forcefields. REQUIRED.
Returns
-----... |
def serve(application, host='127.0.0.1', port=8080, threads=4, **kw):
serve_(application, host=host, port=int(port), threads=int(threads), **kw) | The recommended development HTTP server.
Note that this server performs additional buffering and will not honour chunked encoding breaks. |
def check_errors(self):
errors = ERROR_PATTTERN.findall(self.out)
if errors:
self.log.error('! Errors occurred:')
self.log.error('\n'.join(
[error.replace('\r', '').strip() for error
in chain(*errors) if error.strip()]
))
s... | Check if errors occured during a latex run by
scanning the output. |
def format_name(self, name, indent_size=4):
name_block = ''
if self.short_desc is None:
name_block += name + '\n'
else:
name_block += name + ': ' + self.short_desc + '\n'
if self.long_desc is not None:
name_block += self.wrap_lines(self.long_desc, 1, i... | Format the name of this verifier
The name will be formatted as:
<name>: <short description>
long description if one is given followed by \n
otherwise no long description
Args:
name (string): A name for this validator
indent_size (int)... |
def add_package_dependency(self, package_name, version):
if not PEP440_VERSION_PATTERN.match(version):
raise ValueError('Invalid Version: "{}"'.format(version))
self.dependencies.add(PackageDependency(package_name, version)) | Add a package to the list of dependencies.
:param package_name: The name of the package dependency
:type package_name: str
:param version: The (minimum) version of the package
:type version: str |
def get_info(self, symbol, as_of=None):
version = self._read_metadata(symbol, as_of=as_of, read_preference=None)
handler = self._read_handler(version, symbol)
if handler and hasattr(handler, 'get_info'):
return handler.get_info(version)
return {} | Reads and returns information about the data stored for symbol
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number... |
def add(self, name, value):
normalized_name = normalize_name(name, self._normalize_overrides)
self._map[normalized_name].append(value) | Append the name-value pair to the record. |
def make_pattern(self, pattern, listsep=','):
if self is Cardinality.one:
return pattern
elif self is Cardinality.zero_or_one:
return self.schema % pattern
else:
return self.schema % (pattern, listsep, pattern) | Make pattern for a data type with the specified cardinality.
.. code-block:: python
yes_no_pattern = r"yes|no"
many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern)
:param pattern: Regular expression for type (as string).
:param listsep: List separator f... |
def create(cls, name, dead_interval=40, hello_interval=10,
hello_interval_type='normal', dead_multiplier=1,
mtu_mismatch_detection=True, retransmit_interval=5,
router_priority=1, transmit_delay=1,
authentication_type=None, password=None,
key_cha... | Create custom OSPF interface settings profile
:param str name: name of interface settings
:param int dead_interval: in seconds
:param str hello_interval: in seconds
:param str hello_interval_type: \|normal\|fast_hello
:param int dead_multipler: fast hello packet multipler
... |
def _setup_source_and_destination(self):
super(FetchTransformSaveWithSeparateNewCrashSourceApp, self) \
._setup_source_and_destination()
if self.config.new_crash_source.new_crash_source_class:
self.new_crash_source = \
self.config.new_crash_source.new_crash_source... | use the base class to setup the source and destinations but add to
that setup the instantiation of the "new_crash_source" |
def unique_otuids(groups):
uniques = {key: set() for key in groups}
for i, group in enumerate(groups):
to_combine = groups.values()[:i]+groups.values()[i+1:]
combined = combine_sets(*to_combine)
uniques[group] = groups[group].difference(combined)
return uniques | Get unique OTUIDs of each category.
:type groups: Dict
:param groups: {Category name: OTUIDs in category}
:return type: dict
:return: Dict keyed on category name and unique OTUIDs as values. |
def _print_app(self, app, models):
self._print(self._app_start % app)
self._print_models(models)
self._print(self._app_end) | Print the models of app, showing them in a package. |
def load_json_from_file(file_path):
try:
with open(file_path) as f:
json_data = json.load(f)
except ValueError as e:
raise ValueError('Given file {} is not a valid JSON file: {}'.format(file_path, e))
else:
return json_data | Load schema from a JSON file |
def add_graph(self, run_key, device_name, graph_def, debug=False):
graph_dict = (self._run_key_to_debug_graphs if debug else
self._run_key_to_original_graphs)
if not run_key in graph_dict:
graph_dict[run_key] = dict()
graph_dict[run_key][tf.compat.as_str(device_name)] = (
deb... | Add a GraphDef.
Args:
run_key: A key for the run, containing information about the feeds,
fetches, and targets.
device_name: The name of the device that the `GraphDef` is for.
graph_def: An instance of the `GraphDef` proto.
debug: Whether `graph_def` consists of the debug ops. |
def set_fun_prop(f, k, v):
if not hasattr(f, _FUN_PROPS):
setattr(f, _FUN_PROPS, {})
if not isinstance(getattr(f, _FUN_PROPS), dict):
raise InternalError("Invalid properties dictionary for %s" % str(f))
getattr(f, _FUN_PROPS)[k] = v | Set the value of property `k` to be `v` in function `f`.
We define properties as annotations added to a function throughout
the process of defining a function for verification, e.g. the
argument types. This sets function `f`'s property named `k` to be
value `v`.
Users should never access this fun... |
def execute(self, *args, **kwargs):
try:
return self.client.execute(*args, **kwargs)
except requests.exceptions.HTTPError as err:
res = err.response
logger.error("%s response executing GraphQL." % res.status_code)
logger.error(res.text)
self.di... | Wrapper around execute that logs in cases of failure. |
def do_we_have_enough_cookies(cj, class_name):
domain = 'class.coursera.org'
path = "/" + class_name
return cj.get('csrf_token', domain=domain, path=path) is not None | Check whether we have all the required cookies
to authenticate on class.coursera.org. |
def load_cml(cml_filename):
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
dh = CMLMoleculeLoader()
parser.setContentHandler(dh)
parser.parse(cml_filename)
return dh.molecules | Load the molecules from a CML file
Argument:
| ``cml_filename`` -- The filename of a CML file.
Returns a list of molecule objects with optional molecular graph
attribute and extra attributes. |
def generate_id(self, obj):
object_type = type(obj).__name__.lower()
return '{}_{}'.format(object_type, self.get_object_id(obj)) | Generate unique document id for ElasticSearch. |
def get(self, block=True, timeout=None):
return self._queue.get(block, timeout) | Get item from underlying queue. |
def get_cached_data(
step: 'projects.ProjectStep'
) -> typing.Union[None, STEP_DATA]:
cache_path = step.report.results_cache_path
if not os.path.exists(cache_path):
return None
out = create_data(step)
try:
with open(cache_path, 'r') as f:
cached_data = json.load(f)
... | Attempts to load and return the cached step data for the specified step. If
not cached data exists, or the cached data is corrupt, a None value is
returned instead.
:param step:
The step for which the cached data should be loaded
:return:
Either a step data structure containing the cac... |
def check(degree, knot_vector, num_ctrlpts):
try:
if knot_vector is None or len(knot_vector) == 0:
raise ValueError("Input knot vector cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Knot vector must be a list or tup... | Checks the validity of the input knot vector.
Please refer to The NURBS Book (2nd Edition), p.50 for details.
:param degree: degree of the curve or the surface
:type degree: int
:param knot_vector: knot vector to be checked
:type knot_vector: list, tuple
:param num_ctrlpts: number of control p... |
def _get_elements(mol, label):
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements | The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers. |
def kth_to_last(head, k):
if not (head or k > -1):
return False
p1 = head
p2 = head
for i in range(1, k+1):
if p1 is None:
raise IndexError
p1 = p1.next
while p1:
p1 = p1.next
p2 = p2.next
return p2 | This is an optimal method using iteration.
We move p1 k steps ahead into the list.
Then we move p1 and p2 together until p1 hits the end. |
def get_bool(self, key: str) -> Optional[bool]:
v = self.get(key)
if v is None:
return None
if v in ['true', 'True']:
return True
if v in ['false', 'False']:
return False
raise ConfigTypeError(self.full_key(key), v, 'bool') | Returns an optional configuration value, as a bool, by its key, or None if it doesn't exist.
If the configuration value isn't a legal boolean, this function will throw an error.
:param str key: The requested configuration key.
:return: The configuration key's value, or None if one does not exis... |
def applyReferrerVouchersTemporarily(sender,**kwargs):
if not getConstant('referrals__enableReferralProgram'):
return
logger.debug('Signal fired to temporarily apply referrer vouchers.')
reg = kwargs.pop('registration')
try:
c = Customer.objects.get(user__email=reg.email)
... | Unlike voucher codes which have to be manually supplied, referrer discounts are
automatically applied here, assuming that the referral program is enabled. |
def folder2ver(folder):
ver = folder.split('EnergyPlus')[-1]
ver = ver[1:]
splitapp = ver.split('-')
ver = '.'.join(splitapp)
return ver | get the version number from the E+ install folder |
def __connect(host, port, username, password, private_key):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if private_key is not None and password is not None:
private_key = paramiko.RSAKey.from_private_key_file(private_key, password)
eli... | Establish remote connection
:param host: Hostname or IP address to connect to
:param port: Port number to use for SSH
:param username: Username credentials for SSH access
:param password: Password credentials for SSH access (or private key passphrase)
:param private_key: Private... |
def get_file(self, index, doc_type, id=None):
data = self.get(index, doc_type, id)
return data['_name'], base64.standard_b64decode(data['content']) | Return the filename and memory data stream |
def save(self, specfiles=None, compress=True, path=None):
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error whil... | Writes the specified specfiles to ``siic`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``siic`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a... |
def _firmware_update(firmwarefile='', host='',
directory=''):
dest = os.path.join(directory, firmwarefile[7:])
__salt__['cp.get_file'](firmwarefile, dest)
username = __pillar__['proxy']['admin_user']
password = __pillar__['proxy']['admin_password']
__salt__['dracr.update_firmwar... | Update firmware for a single host |
def table(self):
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table] | a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table |
def die(self):
if self.process:
_log(self.logging,
'Stopping {0} server with PID: {1} running at {2}.'
.format(self.__class__.__name__, self.process.pid,
self.check_url))
self._kill() | Stops the server if it is running. |
def get_healthcheck(self, service_id, version_number, name):
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, name))
return FastlyHealthCheck(self, content) | Get the healthcheck for a particular service and version. |
def load_wikiqa():
dataset_path = _load('wikiqa')
data = _load_csv(dataset_path, 'data', set_index=True)
questions = _load_csv(dataset_path, 'questions', set_index=True)
sentences = _load_csv(dataset_path, 'sentences', set_index=True)
vocabulary = _load_csv(dataset_path, 'vocabulary', set_index=True... | A Challenge Dataset for Open-Domain Question Answering.
WikiQA dataset is a publicly available set of question and sentence (QS) pairs,
collected and annotated for research on open-domain question answering.
source: "Microsoft"
sourceURI: "https://www.microsoft.com/en-us/research/publication/wikiqa-a-... |
def loo_compare(psisloo1, psisloo2):
loores = psisloo1.pointwise.join(
psisloo2.pointwise,
lsuffix = '_m1',
rsuffix = '_m2')
loores['pw_diff'] = loores.pointwise_elpd_m2 - loores.pointwise_elpd_m1
sum_elpd_diff = loores.apply(numpy.sum).pw_diff
sd_elpd_diff = loores.apply(numpy.s... | Compares two models using pointwise approximate leave-one-out cross validation.
For the method to be valid, the two models should have been fit on the same input data.
Parameters
-------------------
psisloo1 : Psisloo object for model1
psisloo2 : Psisloo object for model2
Returns
---... |
def get_user(self, username=None):
username = username or self.username or ''
url = self.url('GET_USER', username=username)
response = self.dispatch('GET', url)
try:
return (response[0], response[1]['user'])
except TypeError:
pass
return response | Returns user informations.
If username is not defined, tries to return own informations. |
def _inner_default(x1, x2):
order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C'
if is_real_dtype(x1.dtype):
if x1.size > THRESHOLD_MEDIUM:
return np.tensordot(x1, x2, [range(x1.ndim)] * 2)
else:
return np.dot(x1.data.ravel(order),
... | Default Euclidean inner product implementation. |
def _make_default_header(self):
td_max = 0
for idx, tr in enumerate(self._tr_nodes):
td_count = len(tr.contents.filter_tags(matches=ftag('td')))
if td_count > td_max:
td_max = td_count
self._log('creating default header (%d columns)' % td_max)
retu... | Return a generic placeholder header based on the tables column count |
def evaluate_feature_performance(project, force=False):
if not force and not project.on_pr():
raise SkippedValidationTest('Not on PR')
out = project.build()
X_df, y, features = out['X_df'], out['y'], out['features']
proposed_feature = get_proposed_feature(project)
accepted_features = get_acc... | Evaluate feature performance |
def plot_string_match(sf,regex,field,**kwargs):
index,shape_records = string_match(sf,regex,field)
plot(shape_records,**kwargs) | Plot the geometry of a shapefile whose fields match a regular expression given
:param sf: shapefile
:type sf: shapefile object
:regex: regular expression to match
:type regex: string
:field: field number to be matched with the regex
:type field: integer |
def load_names(self):
self.all_male_first_names = load_csv_data('male-first-names.csv')
self.all_female_first_names = load_csv_data('female-first-names.csv')
self.all_last_names = load_csv_data('CSV_Database_of_Last_Names.csv') | Loads a name database from package data
Uses data files sourced from
http://www.quietaffiliate.com/free-first-name-and-last-name-databases-csv-and-sql/ |
def cancel(self):
if (not self.cancelled) and (self._fn is not None):
self._cancelled = True
self._drop_fn() | Cancel the scheduled task. |
def Copy(self):
result = QueueManager(store=self.data_store, token=self.token)
result.prev_frozen_timestamps = self.prev_frozen_timestamps
result.frozen_timestamp = self.frozen_timestamp
return result | Return a copy of the queue manager.
Returns:
Copy of the QueueManager object.
NOTE: pending writes/deletions are not copied. On the other hand, if the
original object has a frozen timestamp, a copy will have it as well. |
def discard(sample, embedding):
unembeded = {}
for v, chain in iteritems(embedding):
vals = [sample[u] for u in chain]
if _all_equal(vals):
unembeded[v] = vals.pop()
else:
return
yield unembeded | Discards the sample if broken.
Args:
sample (dict): A sample of the form {v: val, ...} where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the targ... |
def create_connection(self, alias='default', **kwargs):
kwargs.setdefault('serializer', serializer)
conn = self._conns[alias] = Elasticsearch(**kwargs)
return conn | Construct an instance of ``elasticsearch.Elasticsearch`` and register
it under given alias. |
def sql_key(self, generation, sql, params, order, result_type,
using='default'):
suffix = self.keygen.gen_key(sql, params, order, result_type)
using = settings.DB_CACHE_KEYS[using]
return '%s_%s_query_%s.%s' % (self.prefix, using, generation, suffix) | Return the specific cache key for the sql query described by the
pieces of the query and the generation key. |
def get_single_outfile (directory, archive, extension=""):
outfile = os.path.join(directory, stripext(archive))
if os.path.exists(outfile + extension):
i = 1
newfile = "%s%d" % (outfile, i)
while os.path.exists(newfile + extension):
newfile = "%s%d" % (outfile, i)
... | Get output filename if archive is in a single file format like gzip. |
def relation_set(relation_id=None, relation_settings=None, **kwargs):
try:
if relation_id in relation_ids('cluster'):
return leader_set(settings=relation_settings, **kwargs)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_set(relation_... | Attempt to use leader-set if supported in the current version of Juju,
otherwise falls back on relation-set.
Note that we only attempt to use leader-set if the provided relation_id is
a peer relation id or no relation id is provided (in which case we assume
we are within the peer relation context). |
def seek_in_frame(self, pos, *args, **kwargs):
super().seek(self._total_offset + pos, *args, **kwargs) | Seeks relative to the total offset of the current contextual frames. |
def iter_parents(self, paths='', **kwargs):
skip = kwargs.get("skip", 1)
if skip == 0:
skip = 1
kwargs['skip'] = skip
return self.iter_items(self.repo, self, paths, **kwargs) | Iterate _all_ parents of this commit.
:param paths:
Optional path or list of paths limiting the Commits to those that
contain at least one of the paths
:param kwargs: All arguments allowed by git-rev-list
:return: Iterator yielding Commit objects which are parents of sel... |
def set_state(self, state):
for k, v in state.items():
setattr(self, k, v) | Set the view state.
The passed object is the persisted `self.state` bunch.
May be overriden. |
def separators(self, reordered = True):
if reordered:
return [list(self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k+1]]) for k in range(self.Nsn)]
else:
return [list(self.__p[self.snrowidx[self.sncolptr[k]+self.snptr[k+1]-self.snptr[k]:self.sncolptr[k... | Returns a list of separator sets |
def get_avatar_url(self, size=2):
hashbytes = self.get_ps('avatar_hash')
if hashbytes != "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000":
ahash = hexlify(hashbytes).decode('ascii')
else:
ahash = 'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'... | Get URL to avatar picture
:param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
:type size: :class:`int`
:return: url to avatar
:rtype: :class:`str` |
def _generate_input(options):
if options.input:
fp = open(options.input) if options.input != "-" else sys.stdin
for string in fp.readlines():
yield string
if options.strings:
for string in options.strings:
yield string | First send strings from any given file, one string per line, sends
any strings provided on the command line.
:param options: ArgumentParser or equivalent to provide
options.input and options.strings.
:return: string |
def store(self, thing):
to_store = {'field1': thing.field1,
'date_field': thing.date_field,
}
to_store['stuff'] = Binary(cPickle.dumps(thing.stuff))
self._arctic_lib.check_quota()
self._collection.insert_one(to_store) | Simple persistence method |
async def list(self, **params) -> Mapping:
response = await self.docker._query_json("images/json", "GET", params=params)
return response | List of images |
def list_sessions(self) -> List[Session]:
data = self._client.get("/sessions")
return [Session.from_json(item) for item in data["sessions"]] | List all the active sessions in Livy. |
def to_java_rdd(jsc, features, labels, batch_size):
data_sets = java_classes.ArrayList()
num_batches = int(len(features) / batch_size)
for i in range(num_batches):
xi = ndarray(features[:batch_size].copy())
yi = ndarray(labels[:batch_size].copy())
data_set = java_classes.DataSet(xi.a... | Convert numpy features and labels into a JavaRDD of
DL4J DataSet type.
:param jsc: JavaSparkContext from pyjnius
:param features: numpy array with features
:param labels: numpy array with labels:
:return: JavaRDD<DataSet> |
def template_delete(call=None, kwargs=None):
if call != 'function':
raise SaltCloudSystemExit(
'The template_delete function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
template_id = kwargs.get('template_id',... | Deletes the given template from OpenNebula. Either a name or a template_id must
be supplied.
.. versionadded:: 2016.3.0
name
The name of the template to delete. Can be used instead of ``template_id``.
template_id
The ID of the template to delete. Can be used instead of ``name``.
... |
def _node_is_match(qualified_name, package_names, fqn):
if len(qualified_name) == 1 and fqn[-1] == qualified_name[0]:
return True
if qualified_name[0] in package_names:
if is_selected_node(fqn, qualified_name):
return True
for package_name in package_names:
local_qualifie... | Determine if a qualfied name matches an fqn, given the set of package
names in the graph.
:param List[str] qualified_name: The components of the selector or node
name, split on '.'.
:param Set[str] package_names: The set of pacakge names in the graph.
:param List[str] fqn: The node's fully qual... |
def create_with_secret(self, name, secret, encryption):
try:
encryption = encryption or DEFAULT_ENCRYPTION
enc = ENCRYPTION_MAP[encryption]
except KeyError:
raise TypeError('encryption must be one of "cleartext", "md5"'
' or "sha512"')
... | Creates a new user on the local node
Args:
name (str): The name of the user to craete
secret (str): The secret (password) to assign to this user
encryption (str): Specifies how the secret is encoded. Valid
values are "cleartext", "md5", "sha512". The defa... |
def get_knowledge_category_metadata(self):
metadata = dict(self._mdata['knowledge_category'])
metadata.update({'existing_id_values': self._my_map['knowledgeCategoryId']})
return Metadata(**metadata) | Gets the metadata for a knowledge category.
return: (osid.Metadata) - metadata for the knowledge category
*compliance: mandatory -- This method must be implemented.* |
def instruction(self, val):
self._instruction = val
if isinstance(val, tuple):
if len(val) is 2:
self._action, self.command = val
else:
self._action, self.command, self.extra = val
else:
split = val.split(" ", 1)
if ... | Set the action and command from an instruction |
def set_basic(self, realm='authentication required'):
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self) | Clear the auth info and enable basic auth. |
def __do_filter_sub(self, scanline, result):
ai = 0
for i in range(self.fu, len(result)):
x = scanline[i]
a = scanline[ai]
result[i] = (x - a) & 0xff
ai += 1 | Sub filter. |
def prepare_sparse_params(self, param_rowids):
if not self._kvstore:
return
assert(isinstance(param_rowids, dict))
for param_name, rowids in param_rowids.items():
if isinstance(rowids, (tuple, list)):
rowids_1d = []
for r in rowids:
... | Prepares the module for processing a data batch by pulling row_sparse
parameters from kvstore to all devices based on rowids.
Parameters
----------
param_rowids : dict of str to NDArray of list of NDArrays |
def set_value(self, value):
if self.__is_value_array:
if len(value) == self.__report_count:
for index, item in enumerate(value):
self.__setitem__(index, item)
else:
raise ValueError("Value size should match report item size "\
... | Set usage value within report |
def _scheduled_check_for_summaries(self):
if self._analysis_process is None:
return
timed_out = time.time() - self._analyze_start_time > self.time_limit
if timed_out:
self._handle_results('Analysis timed out but managed\n'
' to get lower t... | Present the results if they have become available or timed out. |
def round_(values, decimals=None, width=0,
lfill=None, rfill=None, **kwargs):
if decimals is None:
decimals = hydpy.pub.options.reprdigits
with hydpy.pub.options.reprdigits(decimals):
if isinstance(values, abctools.IterableNonStringABC):
string = repr_values(values)
... | Prints values with a maximum number of digits in doctests.
See the documentation on function |repr| for more details. And
note thate the option keyword arguments are passed to the print function.
Usually one would apply function |round_| on a single or a vector
of numbers:
>>> from hydpy import ... |
def append(self, newconfig):
for attr_name in (
'title', 'body', 'author', 'date',
'strip', 'strip_id_or_class', 'strip_image_src',
'single_page_link', 'single_page_link_in_feed',
'next_page_link', 'http_header'
):
current_set = getattr(self, a... | Append another site config to current instance.
All ``newconfig`` attributes are appended one by one to ours.
Order matters, eg. current instance values will come first when
merging.
Thus, if you plan to use some sort of global site config with
more generic directives, append i... |
def stats(self, request):
doc = HtmlDocument(title='Live server stats', media_path='/assets/')
return doc.http_response(request) | Live stats for the server.
Try sending lots of requests |
def visit_tuple(self, node, parent):
context = self._get_context(node)
newnode = nodes.Tuple(
ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent
)
newnode.postinit([self.visit(child, newnode) for child in node.elts])
return newnode | visit a Tuple node by returning a fresh instance of it |
def execute(self, fetchcommand, sql, params=None):
cur = self.conn.cursor()
if params:
if not type(params).__name__ == 'tuple':
raise ValueError('the params argument needs to be a tuple')
return None
cur.execute(sql, params)
else:
cur.execute(sql)
self.conn.commit()
if not fetchcommand or fet... | where 'fetchcommand' is either 'fetchone' or 'fetchall' |
def rbridge_id(self, **kwargs):
is_get_config = kwargs.pop('get', False)
if not is_get_config:
rbridge_id = kwargs.pop('rbridge_id')
else:
rbridge_id = ''
callback = kwargs.pop('callback', self._callback)
rid_args = dict(rbridge_id=rbridge_id)
rid ... | Configures device's rbridge ID. Setting this property will need
a switch reboot
Args:
rbridge_id (str): The rbridge ID of the device on which BGP will be
configured in a VCS fabric.
get (bool): Get config instead of editing config. (True, False)
callb... |
def emit(self, record):
try:
msg = self.format(record)
if isinstance(msg,unicode):
if hasattr(self.stream, "encoding") and self.stream.encoding:
self.stream.write(msg.encode(self.stream.encoding))
else:
self.stream.w... | Emit a record. Unless record.terminator is set, a trailing
newline will be written to the output stream. |
def add_noise_to_dict_values(dictionary: Dict[A, float], noise_param: float) -> Dict[A, float]:
new_dict = {}
for key, value in dictionary.items():
noise_value = value * noise_param
noise = random.uniform(-noise_value, noise_value)
new_dict[key] = value + noise
return new_dict | Returns a new dictionary with noise added to every key in ``dictionary``. The noise is
uniformly distributed within ``noise_param`` percent of the value for every value in the
dictionary. |
def _create_warm_start_tuner(self, additional_parents, warm_start_type, estimator=None):
all_parents = {self.latest_tuning_job.name}
if additional_parents:
all_parents = all_parents.union(additional_parents)
return HyperparameterTuner(estimator=estimator if estimator else self.estima... | Creates a new ``HyperparameterTuner`` with ``WarmStartConfig``, where type will be equal to
``warm_start_type`` and``parents`` would be equal to union of ``additional_parents`` and self.
Args:
additional_parents (set{str}): Additional parents along with self, to be used for warm starting.
... |
def _find_usage_parameter_groups(self):
num_groups = 0
paginator = self.conn.get_paginator('describe_cache_parameter_groups')
for page in paginator.paginate():
for group in page['CacheParameterGroups']:
num_groups += 1
self.limits['Parameter Groups']._add_curr... | find usage for elasticache parameter groups |
def set_step(self, value, block_events=False):
if block_events: self.block_events()
self._widget.setSingleStep(value)
if block_events: self.unblock_events() | Sets the step of the number box.
Setting block_events=True will temporarily block the widget from
sending any signals when setting the value. |
def attach(self, payload):
if self._payload is None:
self._payload = [payload]
else:
self._payload.append(payload) | Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.