code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _check_environ(variable, value):
if is_not_none(value):
return value
else:
value = os.environ.get(variable)
if is_none(value):
stop(''.join([variable,
]))
else:
return value | check if a variable is present in the environmental variables |
def reset(self):
if self.state is not TimerState.stopped:
if self.on_reset and self.state is TimerState.overflow:
if callable(self.on_reset):
self.on_reset()
else:
execute(self.on_reset)
self.state = TimerState.stopped | Stop timer and execute ``on_reset`` if overflow occured. |
def set_snapshots(self,snapshots):
self.snapshots = pd.Index(snapshots)
self.snapshot_weightings = self.snapshot_weightings.reindex(self.snapshots,fill_value=1.)
if isinstance(snapshots, pd.DatetimeIndex) and _pd_version < '0.18.0':
snapshots = pd.Index(snapshots.values)
for component in self.all_components:
pnl = self.pnl(component)
attrs = self.components[component]["attrs"]
for k,default in attrs.default[attrs.varying].iteritems():
pnl[k] = pnl[k].reindex(self.snapshots).fillna(default) | Set the snapshots and reindex all time-dependent data.
This will reindex all pandas.Panels of time-dependent data; NaNs are filled
with the default value for that quantity.
Parameters
----------
snapshots : list or pandas.Index
All time steps.
Returns
-------
None |
def to_description_dict(self):
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'status': self.status,
'certificatePem': self.certificate_pem,
'ownedBy': self.owner,
'creationDate': self.creation_date,
'lastModifiedDate': self.last_modified_date,
'transferData': self.transfer_data
} | You might need keys below in some situation
- caCertificateId
- previousOwnedBy |
def runlist_list(**kwargs):
ctx = Context(**kwargs)
ctx.execute_action('runlist:list', **{
'storage': ctx.repo.create_secure_service('storage'),
}) | Show uploaded runlists. |
def stop_instance(self, instance):
params = {'state': 'stopped'}
url = '/instances/{}'.format(instance)
self.patch_proto(url, params=params) | Stops a single instance.
:param str instance: A Yamcs instance name. |
def copy_scubadir_file(self, name, source):
dest = os.path.join(self.__scubadir_hostpath, name)
assert not os.path.exists(dest)
shutil.copy2(source, dest)
return os.path.join(self.__scubadir_contpath, name) | Copies source into the scubadir
Returns the container-path of the copied file |
def setWindowSize(self, winsz):
self.tracePlot.setWindowSize(winsz)
self.stimPlot.setWindowSize(winsz) | Sets the size of scroll window |
def sort_url_qsl(cls, raw_url, **kwargs):
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
return cls._join_url(parsed_url, sorted(qsl, **kwargs)) | Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``. |
def query_all_collisions(collision_object):
global collidable_objects
colliding = []
for obj in collidable_objects:
if obj is not collision_object:
if collision_object.is_colliding(obj):
colliding.append(obj)
return colliding | Check for and return the full list of objects colliding with collision_object |
def _attach_to_instance(self, instance):
self._instance = instance
self.lockable = self.lockable and instance.lockable | Attach the current field to an instance of a model. Can be overriden to
do something when an instance is set |
def do_sqlite_connect(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute('PRAGMA foreign_keys=ON')
cursor.close() | Ensure SQLite checks foreign key constraints.
For further details see "Foreign key support" sections on
https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support |
def get_tag_value(self, i):
found = False
for t in i.get('Tags', ()):
if t['Key'].lower() == self.tag_key:
found = t['Value']
break
if found is False:
return False
value = found.lower().encode('utf8').decode('utf8')
value = value.strip("'").strip('"')
return value | Get the resource's tag value specifying its schedule. |
def on_pause(self):
self.engine.commit()
self.strings.save()
self.funcs.save()
self.config.write() | Sync the database with the current state of the game. |
def _get_encoding(encoding_or_label):
if hasattr(encoding_or_label, 'codec_info'):
return encoding_or_label
encoding = lookup(encoding_or_label)
if encoding is None:
raise LookupError('Unknown encoding label: %r' % encoding_or_label)
return encoding | Accept either an encoding object or label.
:param encoding: An :class:`Encoding` object or a label string.
:returns: An :class:`Encoding` object.
:raises: :exc:`~exceptions.LookupError` for an unknown label. |
def get_random(self, n, l=None):
random_f = Fasta()
if l:
ids = self.ids[:]
random.shuffle(ids)
i = 0
while (i < n) and (len(ids) > 0):
seq_id = ids.pop()
if (len(self[seq_id]) >= l):
start = random.randint(0, len(self[seq_id]) - l)
random_f["random%s" % (i + 1)] = self[seq_id][start:start+l]
i += 1
if len(random_f) != n:
sys.stderr.write("Not enough sequences of required length")
return
else:
return random_f
else:
choice = random.sample(self.ids, n)
for i in range(n):
random_f[choice[i]] = self[choice[i]]
return random_f | Return n random sequences from this Fasta object |
def delete_metadata(self, container, prefix=None):
if prefix is None:
prefix = CONTAINER_META_PREFIX
new_meta = {}
curr_meta = self.get_metadata(container, prefix=prefix)
for ckey in curr_meta:
new_meta[ckey] = ""
uri = "/%s" % utils.get_name(container)
resp, resp_body = self.api.method_post(uri, headers=new_meta)
return 200 <= resp.status_code <= 299 | Removes all of the container's metadata.
By default, all metadata beginning with the standard container metadata
prefix ('X-Container-Meta-') is removed. If you wish to remove all
metadata beginning with a different prefix, you must specify that
prefix. |
def shrink(self, fraction=0.85):
poly = self.polydata(True)
shrink = vtk.vtkShrinkPolyData()
shrink.SetInputData(poly)
shrink.SetShrinkFactor(fraction)
shrink.Update()
return self.updateMesh(shrink.GetOutput()) | Shrink the triangle polydata in the representation of the input mesh.
Example:
.. code-block:: python
from vtkplotter import *
pot = load(datadir + 'shapes/teapot.vtk').shrink(0.75)
s = Sphere(r=0.2).pos(0,0,-0.5)
show(pot, s)
|shrink| |shrink.py|_ |
def plot_bbox(sf,bbox,inside_only=True):
index,shape_records = bbox_match(sf,bbox,inside_only)
A,B,C,D = bbox
plot(shape_records,xlims=[bbox[0],bbox[2]],ylims=[bbox[1],bbox[3]]) | Plot the geometry of a shapefile within a bbox
:param sf: shapefile
:type sf: shapefile object
:param bbox: bounding box
:type bbox: list of floats [x_min,y_min,x_max,y_max]
:inside_only: True if the objects returned are those that lie within the bbox and False if the objects returned are any that intersect the bbox
:type inside_only: Boolean |
def Set(self, value, fields=None):
self._metric_values[_FieldsToKey(fields)] = self._value_type(value) | Sets the metric's current value. |
def get_profane_words(self):
profane_words = []
if self._custom_censor_list:
profane_words = [w for w in self._custom_censor_list]
else:
profane_words = [w for w in self._censor_list]
profane_words.extend(self._extra_censor_list)
profane_words.extend([inflection.pluralize(word) for word in profane_words])
profane_words = list(set(profane_words))
profane_words.sort(key=len)
profane_words.reverse()
return profane_words | Returns all profane words currently in use. |
def _parallel_predict(estimators, estimators_features, X, n_classes, combination, estimators_weight):
n_samples = X.shape[0]
pred = np.zeros((n_samples, n_classes))
n_estimators = len(estimators)
for estimator, features, weight in zip(estimators, estimators_features, estimators_weight):
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
if combination == 'weighted_voting':
pred[i, int(predictions[i])] += 1 * weight
else:
pred[i, int(predictions[i])] += 1
return pred | Private function used to compute predictions within a job. |
def view_graph(graph_str, parent=None, prune_to=None):
from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog
from rez.config import config
h = hash((graph_str, prune_to))
filepath = graph_file_lookup.get(h)
if filepath and not os.path.exists(filepath):
filepath = None
if filepath is None:
suffix = ".%s" % config.dot_image_format
fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-")
os.close(fd)
dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to)
if not dlg.write_graph():
return
graph_file_lookup[h] = filepath
dlg = ImageViewerDialog(filepath, parent)
dlg.exec_() | View a graph. |
def num_model_per_iteration(self):
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value | Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration. |
def is_valid_device_id(device_id):
valid = valid_device_id.match(device_id)
if not valid:
logging.error("A valid device identifier contains "
"only ascii word characters or dashes. "
"Device '%s' not added.", device_id)
return valid | Check if device identifier is valid.
A valid device identifier contains only ascii word characters or dashes.
:param device_id: Device identifier
:returns: Valid or not. |
def _tighten_triplet(self, max_iterations, later_iter, max_triplets, prolong):
triangles = self.find_triangles()
triplet_scores = self._get_triplet_scores(triangles)
sorted_scores = sorted(triplet_scores, key=triplet_scores.get)
for niter in range(max_iterations):
if self._is_converged(integrality_gap_threshold=self.integrality_gap_threshold):
break
add_triplets = []
for triplet_number in (range(len(sorted_scores))):
if triplet_number >= max_triplets:
break
add_triplets.append(sorted_scores.pop())
if not add_triplets and prolong is False:
break
self._update_triangles(add_triplets)
self._run_mplp(later_iter) | This method finds all the triplets that are eligible and adds them iteratively in the bunch of max_triplets
Parameters
----------
max_iterations: integer
Maximum number of times we tighten the relaxation
later_iter: integer
Number of maximum iterations that we want MPLP to run. This is lesser than the initial number
of iterations.
max_triplets: integer
Maximum number of triplets that can be added atmost in one iteration.
prolong: bool
It sets the continuation of tightening after all the triplets are exhausted |
def weighted_accuracy(comparisons, weights):
N = len(comparisons)
if weights.shape[0] != N:
raise ValueError('weights and comparisons should be of the same'
' length. len(weights) = {} but len(comparisons)'
' = {}'.format(weights.shape[0], N))
if (weights < 0).any():
raise ValueError('Weights should all be positive.')
if np.sum(weights) == 0:
warnings.warn('No nonzero weights, returning 0')
return 0
valid_idx = (comparisons >= 0)
if valid_idx.sum() == 0:
warnings.warn("No reference chords were comparable "
"to estimated chords, returning 0.")
return 0
comparisons = comparisons[valid_idx]
weights = weights[valid_idx]
total_weight = float(np.sum(weights))
normalized_weights = np.asarray(weights, dtype=float)/total_weight
return np.sum(comparisons*normalized_weights) | Compute the weighted accuracy of a list of chord comparisons.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> # Here, we're using the "thirds" function to compare labels
>>> # but any of the comparison functions would work.
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
comparisons : np.ndarray
List of chord comparison scores, in [0, 1] or -1
weights : np.ndarray
Weights (not necessarily normalized) for each comparison.
This can be a list of interval durations
Returns
-------
score : float
Weighted accuracy |
def _value_validate(self, value, rnge, identifier="Given"):
if value is not None and (value < rnge[0] or value > rnge[1]):
raise ValueError('%s value must be between %d and %d.'
% (identifier, rnge[0], rnge[1])) | Make sure a value is within a given range |
def run(self):
self.otherThread._Thread__stderr = self._stderr
if hasattr(self.otherThread, '_Thread__stop'):
self.otherThread._Thread__stop()
while self.otherThread.isAlive():
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.otherThread.ident), ctypes.py_object(self.exception))
self.otherThread.join(self.repeatEvery)
try:
self._stderr.close()
except:
pass | run - The thread main. Will attempt to stop and join the attached thread. |
def first_name_capture(records):
logging.info('Applying _first_name_capture generator: '
'making sure ID only contains the first whitespace-delimited '
'word.')
whitespace = re.compile(r'\s+')
for record in records:
if whitespace.search(record.description):
yield SeqRecord(record.seq, id=record.id,
description="")
else:
yield record | Take only the first whitespace-delimited word as the name of the sequence.
Essentially removes any extra text from the sequence's description. |
def visit_loop(self, node, cond=None):
for stmt in node.body:
self.visit(stmt)
old_range = self.result.copy()
for stmt in node.body:
self.visit(stmt)
for expr, range_ in old_range.items():
self.result[expr] = self.result[expr].widen(range_)
cond and self.visit(cond)
for stmt in node.body:
self.visit(stmt)
for stmt in node.orelse:
self.visit(stmt) | Handle incremented variables in loop body.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse('''
... def foo():
... a = b = c = 2
... while a > 0:
... a -= 1
... b += 1''')
>>> pm = passmanager.PassManager("test")
>>> res = pm.gather(RangeValues, node)
>>> res['a']
Interval(low=-inf, high=2)
>>> res['b']
Interval(low=2, high=inf)
>>> res['c']
Interval(low=2, high=2) |
def disable(self):
self._enabled = False
for child in self.children:
if isinstance(child, (Container, Widget)):
child.disable() | Disable all the widgets in this container |
def _significant_pathways_dataframe(pvalue_information,
side_information,
alpha):
significant_pathways = pd.concat(
[pvalue_information, side_information], axis=1)
below_alpha, qvalues, _, _ = multipletests(
significant_pathways["p-value"], alpha=alpha, method="fdr_bh")
below_alpha = pd.Series(
below_alpha, index=pvalue_information.index, name="pass")
qvalues = pd.Series(
qvalues, index=pvalue_information.index, name="q-value")
significant_pathways = pd.concat(
[significant_pathways, below_alpha, qvalues], axis=1)
significant_pathways = significant_pathways[significant_pathways["pass"]]
significant_pathways.drop("pass", axis=1, inplace=True)
significant_pathways.loc[:, "pathway"] = significant_pathways.index
return significant_pathways | Create the significant pathways pandas.DataFrame.
Given the p-values corresponding to each pathway in a feature,
apply the FDR correction for multiple testing and remove those that
do not have a q-value of less than `alpha`. |
def start_logging(self, region, name):
ct = self.session.client('cloudtrail', region_name=region)
ct.start_logging(Name=name)
auditlog(
event='cloudtrail.start_logging',
actor=self.ns,
data={
'account': self.account.account_name,
'region': region
}
)
self.log.info('Enabled logging for {} ({})'.format(name, region)) | Turn on logging for a CloudTrail Trail
Args:
region (`str`): Name of the AWS region
name (`str`): Name of the CloudTrail Trail
Returns:
`None` |
def init_ui(self):
board_width = self.ms_game.board_width
board_height = self.ms_game.board_height
self.create_grid(board_width, board_height)
self.time = 0
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timing_game)
self.timer.start(1000) | Init game interface. |
def dump_in_memory_result(self, result, output_path):
file_count = 0
logger.debug("Dumping in-memory processing results to output folder: %s", output_path)
for k, v in iteritems(result):
cur_output_path = os.path.join(output_path, k)
if isinstance(v, dict):
file_count += self.dump_in_memory_result(v, cur_output_path)
else:
if not os.path.isdir(output_path):
os.makedirs(output_path)
filename = os.path.join(output_path, k)
logger.debug("Writing output file: %s", filename)
with open(filename, 'wt', encoding=self.config.encoding) as f:
f.write(v)
file_count += 1
return file_count | Recursively dumps the result of our processing into files within the
given output path.
Args:
result: The in-memory result of our processing.
output_path: Full path to the folder into which to dump the files.
Returns:
The number of files generated (integer). |
def get_nodes_by_tag(self, graph, tag_name):
for node, real_node in self.parsed_nodes(graph):
if tag_name in real_node.tags:
yield node | yields nodes from graph that have the specified tag |
def get_next_environment(env):
config = _config_file()
juicer.utils.Log.log_debug("Finding next environment...")
if env not in config.sections():
raise JuicerConfigError("%s is not a server configured in juicer.conf", env)
section = dict(config.items(env))
if 'promotes_to' not in section.keys():
err = "Environment `%s` has no entry for `promotes_to`\nCheck man 5 juicer.conf." % env
raise JuicerConfigError(err)
return section['promotes_to'] | Given an environment, return the next environment in the
promotion hierarchy |
def current_timestamp(self) -> datetime:
timestamp = DB.get_hash_value(self._key, 'current_timestamp')
return datetime_from_isoformat(timestamp) | Get the current state timestamp. |
def feed_fetch_force(request, id, redirect_to):
feed = Feed.objects.get(id=id)
feed.fetch(force=True)
msg = _("Fetched tweets for %s" % feed.name)
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(redirect_to) | Forcibly fetch tweets for the feed |
def load_werkzeug(path):
sys.path[0] = path
wz.__dict__.clear()
for key in sys.modules.keys():
if key.startswith("werkzeug.") or key == "werkzeug":
sys.modules.pop(key, None)
import werkzeug
for key in werkzeug.__all__:
setattr(wz, key, getattr(werkzeug, key))
hg_tag = find_hg_tag(path)
try:
f = open(os.path.join(path, "setup.py"))
except IOError:
pass
else:
try:
for line in f:
line = line.strip()
if line.startswith("version="):
return line[8:].strip(" \t,")[1:-1], hg_tag
finally:
f.close()
print("Unknown werkzeug version loaded", file=sys.stderr)
sys.exit(2) | Load werkzeug. |
def warp_object(self, tileMapObj):
print "Collision"
if tileMapObj.can_warp:
if self.map_association != self.exitWarp.map_association:
TileMapManager.load(exitWarp.map_association)
tileMapObj.parent.coords = self.exitWarp.coords | Warp the tile map object from one warp to another. |
def writeline(self, data):
try:
if self.ch_mode:
data += "\n"
parts = split_by_n(data, self.ch_mode_chunk_size)
for split_str in parts:
self.port.write(split_str.encode())
time.sleep(self.ch_mode_ch_delay)
else:
self.port.write((data + "\n").encode())
except SerialException as err:
self.logger.exception("SerialError occured while trying to write data {}.".format(data))
raise RuntimeError(str(err)) | Writes data to serial port.
:param data: Data to write
:return: Nothing
:raises: IOError if SerialException occurs. |
def delete(self, option):
if self.config is not None:
if option in self.config:
del self.config[option] | Deletes an option if exists |
def user_can_delete_attachments(self):
context = self.context
user = api.get_current_user()
if not self.is_ar_editable():
return False
return (self.user_can_add_attachments() and
not user.allowed(context, ["Client"])) or \
self.user_can_update_attachments() | Checks if the current logged in user is allowed to delete attachments |
def _collect_cpu_info(run_info):
cpu_info = {}
cpu_info["num_cores"] = multiprocessing.cpu_count()
import cpuinfo
info = cpuinfo.get_cpu_info()
cpu_info["cpu_info"] = info["brand"]
cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
run_info["machine_config"]["cpu_info"] = cpu_info | Collect the CPU information for the local environment. |
def legend(self, values):
if not isinstance(values, list):
raise TypeError("legend must be a list of labels")
self.options["legend"] = values | Set the legend labels.
Args:
values (list): list of labels.
Raises:
ValueError: legend must be a list of labels. |
def store_meta_data(self, copy_path=None):
if copy_path:
meta_file_path_json = os.path.join(copy_path, self.state.get_storage_path(), storage.FILE_NAME_META_DATA)
else:
if self.state.file_system_path is None:
logger.error("Meta data of {0} can be stored temporary arbitrary but by default first after the "
"respective state was stored and a file system path is set.".format(self))
return
meta_file_path_json = os.path.join(self.state.file_system_path, storage.FILE_NAME_META_DATA)
meta_data = deepcopy(self.meta)
self._generate_element_meta_data(meta_data)
storage_utils.write_dict_to_json(meta_data, meta_file_path_json) | Save meta data of state model to the file system
This method generates a dictionary of the meta data of the state together with the meta data of all state
elements (data ports, outcomes, etc.) and stores it on the filesystem.
Secure that the store meta data method is called after storing the core data otherwise the last_stored_path is
maybe wrong or None.
The copy path is considered to be a state machine file system path but not the current one but e.g.
of a as copy saved state machine. The meta data will be stored in respective relative state folder in the state
machine hierarchy. This folder has to exist.
Dues the core elements of the state machine has to be stored first.
:param str copy_path: Optional copy path if meta data is not stored to the file system path of state machine |
def reset_status(self):
for row in range(self.table.rowCount()):
status_item = self.table.item(row, 1)
status_item.setText(self.tr('')) | Set all scenarios' status to empty in the table. |
def _asdict(self):
with self._cond:
if self._prompt is None:
return
return {'id': self._prompt.id,
'message': self._prompt.message,
'text-input': self._prompt.text_input} | Return a dictionary representation of the current prompt. |
def parse(self, debug=False):
if self._parsed is None:
try:
if self._mode == "html":
self._parsed = self.html(self._content, self._show_everything, self._translation)
else:
self._parsed = self.rst(self._content, self._show_everything, self._translation, debug=debug)
except Exception as e:
if debug:
raise BaseException("Parsing failed") from e
else:
self._parsed = self._translation.gettext("<b>Parsing failed</b>: <pre>{}</pre>").format(html.escape(self._content))
return self._parsed | Returns parsed text |
def start_stream(self):
tracking_terms = self.term_checker.tracking_terms()
if len(tracking_terms) > 0 or self.unfiltered:
self.stream = tweepy.Stream(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
if len(tracking_terms) > 0:
logger.info("Starting new twitter stream with %s terms:", len(tracking_terms))
logger.info(" %s", repr(tracking_terms))
self.stream.filter(track=tracking_terms, async=True, languages=self.languages)
else:
logger.info("Starting new unfiltered stream")
self.stream.sample(async=True, languages=self.languages) | Starts a stream with teh current tracking terms |
def _remove_duplicate_files(xs):
seen = set([])
out = []
for x in xs:
if x["path"] not in seen:
out.append(x)
seen.add(x["path"])
return out | Remove files specified multiple times in a list. |
def equals(self,junc):
if self.left.equals(junc.left): return False
if self.right.equals(junc.right): return False
return True | test equality with another junction |
def l2_regularizer(weight=1.0, scope=None):
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer | Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function. |
def _set_directories(self):
if self._dirs['initial'] == None:
self._dirs['base'] = discover_base_dir(self._dirs['run'])
else:
self._dirs['base'] = discover_base_dir(self._dirs['initial'])
self._update_dirs_on_base()
self._tree_ready = verify_dir_structure(self._dirs['base'])
if self._tree_ready:
self._read_site_config() | Initialize variables based on evidence about the directories. |
def download(self, filename=None):
if filename is None:
filename = self.name
with self.remote_open() as infile:
with open(filename, 'wb') as outfile:
outfile.write(infile.read()) | Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved |
def containment_angle_bin(self, egy_bins, fraction=0.68, scale_fn=None):
vals = self.interp_bin(egy_bins, self.dtheta, scale_fn=scale_fn)
dtheta = np.radians(self.dtheta[:, np.newaxis] * np.ones(vals.shape))
return self._calc_containment(dtheta, vals, fraction) | Evaluate the PSF containment angle averaged over energy bins. |
def smove(self, src, dst, value):
src_set = self._get_set(src, 'SMOVE')
dst_set = self._get_set(dst, 'SMOVE')
value = self._encode(value)
if value not in src_set:
return False
src_set.discard(value)
dst_set.add(value)
self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set
return True | Emulate smove. |
def get_nodes(self, request):
nodes = []
nodes.append(NavigationNode(_('Tags'), reverse('zinnia:tag_list'),
'tags'))
for tag in tags_published():
nodes.append(NavigationNode(tag.name,
reverse('zinnia:tag_detail',
args=[tag.name]),
tag.pk, 'tags'))
return nodes | Return menu's node for tags |
def deactivate_mfa_device(self, user_name, serial_number):
user = self.get_user(user_name)
if serial_number not in user.mfa_devices:
raise IAMNotFoundException(
"Device {0} not found".format(serial_number)
)
user.deactivate_mfa_device(serial_number) | Deactivate and detach MFA Device from user if device exists. |
def _HandleHelp(self, request):
help_path = request.path.split("/", 2)[-1]
if not help_path:
raise werkzeug_exceptions.Forbidden("Error: Invalid help path.")
return self._RedirectToRemoteHelp(help_path) | Handles help requests. |
def install_monitor(self, monitor_pattern: str, monitor_stat_func_name: str):
self._monitor = mx.monitor.Monitor(interval=C.MEASURE_SPEED_EVERY,
stat_func=C.MONITOR_STAT_FUNCS.get(monitor_stat_func_name),
pattern=monitor_pattern,
sort=True)
self.module.install_monitor(self._monitor)
logger.info("Installed MXNet monitor; pattern='%s'; statistics_func='%s'",
monitor_pattern, monitor_stat_func_name) | Installs an MXNet monitor onto the underlying module.
:param monitor_pattern: Pattern string.
:param monitor_stat_func_name: Name of monitor statistics function. |
def setup_ui(self, ):
self.main_vbox = QtGui.QVBoxLayout(self)
self.import_all_references_cb = QtGui.QCheckBox("Import references")
self.main_vbox.addWidget(self.import_all_references_cb) | Create all ui elements and layouts
:returns: None
:rtype: None
:raises: None |
def save_coeffs(coeffs, out_dir=''):
for platform in coeffs.keys():
fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform)
fid = h5py.File(fname, 'w')
for chan in coeffs[platform].keys():
fid.create_group(chan)
fid[chan]['datetime'] = coeffs[platform][chan]['datetime']
fid[chan]['slope1'] = coeffs[platform][chan]['slope1']
fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1']
fid[chan]['slope2'] = coeffs[platform][chan]['slope2']
fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2']
fid.close()
print "Calibration coefficients saved for %s" % platform | Save calibration coefficients to HDF5 files. |
def ad_hoc_magic_from_file(filename, **kwargs):
with open(filename, 'rb') as stream:
head = stream.read(16)
if head[:4] == b'\x7fELF':
return b'application/x-executable'
elif head[:2] == b'MZ':
return b'application/x-dosexec'
else:
raise NotImplementedError() | Ad-hoc emulation of magic.from_file from python-magic. |
def get_data_files(dname, ignore=None, parent=None):
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filter_names(filenames):
resultfiles.append(filename)
if resultfiles:
for filename in resultfiles:
file_path = os.path.join(directory, filename)
if parent:
file_path = file_path.replace(parent + os.sep, '')
result.append(file_path)
return result | Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory. |
def get_fieldsets(self, request, obj=None):
fieldsets = list(super(CreateUpdateAdmin, self).get_fieldsets(
request=request, obj=obj))
fields = set()
to_add = set()
for fs in fieldsets:
fields = fields.union(fs[1]['fields'])
for k, v in self.ownership_info['fields'].items():
if (hasattr(self.model, k)
and k not in fields
and (not self.exclude
or (self.exclude and k not in self.exclude))):
if ('readonly' in v and not v['readonly']) or obj:
to_add.add(k)
if len(to_add) > 0:
fieldsets.append((self.ownership_info['label'],
{'fields': tuple(to_add)}))
return tuple(fieldsets) | Add ownership info fields in fieldset with proper separation.
Author: Himanshu Shankar (https://himanshus.com) |
def auth(self, transport, account_name, password):
auth_token = AuthToken()
auth_token.account_name = account_name
attrs = {sconstant.A_BY: sconstant.V_NAME}
account = SOAPpy.Types.stringType(data=account_name, attrs=attrs)
params = {sconstant.E_ACCOUNT: account,
sconstant.E_PASSWORD: password}
self.log.debug('Authenticating account %s' % account_name)
try:
res = transport.invoke(zconstant.NS_ZIMBRA_ACC_URL,
sconstant.AuthRequest,
params,
auth_token)
except SoapException as exc:
raise AuthException(unicode(exc), exc)
auth_token.token = res.authToken
if hasattr(res, 'sessionId'):
auth_token.session_id = res.sessionId
self.log.info('Authenticated account %s, session id %s'
% (account_name, auth_token.session_id))
return auth_token | Authenticates using username and password. |
def collect_info(self):
try:
info = {}
res = self._send_request('GET', "/")
info['server'] = {}
info['server']['name'] = res['name']
info['server']['version'] = res['version']
info['allinfo'] = res
info['status'] = self.cluster.status()
info['aliases'] = self.indices.aliases()
self.info = info
return True
except:
self.info = {}
return False | Collect info about the connection and fill the info dictionary. |
def update_DOM(self):
response = self.fetch()
self._DOM = html.fromstring(response.text)
return self | Makes a request and updates `self._DOM`.
Worth using only if you manually change `self.base_url` or `self.path`.
:return: self
:rtype: Url |
def reset():
if hasattr(ray.worker.global_worker, "signal_counters"):
ray.worker.global_worker.signal_counters = defaultdict(lambda: b"0") | Reset the worker state associated with any signals that this worker
has received so far.
If the worker calls receive() on a source next, it will get all the
signals generated by that source starting with index = 1. |
def onex(self):
xCols=[i for i in range(self.nCols) if self.colTypes[i]==3]
if len(xCols)>1:
for colI in xCols[1:][::-1]:
self.colDelete(colI) | delete all X columns except the first one. |
def set_next_page_params(self):
if self.items:
index = self.get_last_item_index()
self.params[self.mode] = self.get_next_page_param(self.items[index]) | Set the params so that the next page is fetched. |
def runlist_add_app(name, app, profile, force, **kwargs):
ctx = Context(**kwargs)
ctx.execute_action('runlist:add-app', **{
'storage': ctx.repo.create_secure_service('storage'),
'name': name,
'app': app,
'profile': profile,
'force': force
}) | Add specified application with profile to the specified runlist.
Existence of application or profile is not checked. |
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0 | Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list. |
def update_state_active(self):
self.update_state(self.links[REF_UPDATE_STATE_ACTIVE], {'type' : RUN_ACTIVE})
return self.refresh() | Update the state of the model run to active.
Raises an exception if update fails or resource is unknown.
Returns
-------
ModelRunHandle
Refreshed run handle. |
def is_this_year(self):
return is_current_year(datetime.datetime.combine(self.date, datetime.time())) | Return whether the block occurs after September 1st of this school year. |
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries):
tsv = True
boundaries = on_word_boundaries
with open_file(pattern_filename) as input_file:
for line in input_file:
line = line.decode(encoding)
if line.count('\t') != 1:
tsv = False
if '\\b' in line:
boundaries = True
if boundaries and not tsv:
break
return tsv, boundaries | Automatically detects the pattern file format, and determines
whether the Aho-Corasick string matching should pay attention to
word boundaries or not.
Arguments:
- `pattern_filename`:
- `encoding`:
- `on_word_boundaries`: |
def serialize(material_description):
material_description_bytes = bytearray(_MATERIAL_DESCRIPTION_VERSION)
for name, value in sorted(material_description.items(), key=lambda x: x[0]):
try:
material_description_bytes.extend(encode_value(to_bytes(name)))
material_description_bytes.extend(encode_value(to_bytes(value)))
except (TypeError, struct.error):
raise InvalidMaterialDescriptionError(
'Invalid name or value in material description: "{name}"="{value}"'.format(name=name, value=value)
)
return {Tag.BINARY.dynamodb_tag: bytes(material_description_bytes)} | Serialize a material description dictionary into a DynamodDB attribute.
:param dict material_description: Material description dictionary
:returns: Serialized material description as a DynamoDB binary attribute value
:rtype: dict
:raises InvalidMaterialDescriptionError: if invalid name or value found in material description |
def parse_name(name):
inverted, op = False, OP_EQ
if name is not None:
for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE):
if name.endswith(op_):
op = op_
name = name[:len(name) - len(op)]
break
if name.startswith('!'):
inverted = True
name = name[1:]
return name, inverted, op | Split a query name into field name, operator and whether it is
inverted. |
def Gamma(cls,
shape: 'TensorFluent',
scale: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
if shape.scope != scale.scope:
raise ValueError('Gamma distribution: parameters must have same scope!')
concentration = shape.tensor
rate = 1 / scale.tensor
dist = tf.distributions.Gamma(concentration, rate)
batch = shape.batch or scale.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = shape.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.
Args:
shape: The shape parameter of the Gamma distribution.
scale: The scale parameter of the Gamma distribution.
batch_size: The size of the batch (optional).
Returns:
The Gamma distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope. |
def fitness_vs(self):
"Median Fitness in the validation set"
l = [x.fitness_vs for x in self.models]
return np.median(l) | Median Fitness in the validation set |
def cache(self, refreshing=None, next_action=None, data_blob=None, json_last_refresh=None, rollback_point=False):
LOGGER.debug("InjectorComponentSkeleton.cache")
if json_last_refresh is None:
json_last_refresh = datetime.datetime.now()
if rollback_point:
self.rollback_point_refreshing = refreshing
self.rollback_point_next_action = next_action
self.rollback_point_data_blob = data_blob
self.rollback_point_refreshing = refreshing
return self.component_cache_actor.save(refreshing=refreshing, next_action=next_action,
json_last_refresh=json_last_refresh, data_blob=data_blob).get() | push this component into the cache
:param refreshing: the new refreshing value
:param next_action: the new next action value
:param data_blob: the new data blob value
:param json_last_refresh: the new json last refresh value - if None the date of this call
:param rollback_point: define the rollback point with provided values (refreshing, next_action, data_blob and
json_last_refresh)
:return: |
def to_params(self):
params = {}
for name in self.PROPERTIES:
attr = '_{0}'.format(name)
value = getattr(self, attr, None) or getattr(self, name, None)
if value is None:
continue
if isinstance(value, datetime):
params[name] = format_time(value)
elif isinstance(value, list):
params[name] = ','.join(map(str, value))
elif isinstance(value, bool):
params[name] = str(value).lower()
else:
params[name] = value
return params | Generates a Hash of property values for the current object. This helper
handles all necessary type coercions as it generates its output. |
def ListGrrUsers(self):
args = user_management_pb2.ApiListGrrUsersArgs()
items = self._context.SendIteratorRequest("ListGrrUsers", args)
return utils.MapItemsIterator(
lambda data: GrrUser(data=data, context=self._context), items) | Lists all registered GRR users. |
def stage_subset(self, *files_to_add: str):
LOGGER.info('staging files: %s', files_to_add)
self.repo.git.add(*files_to_add, A=True) | Stages a subset of files
:param files_to_add: files to stage
:type files_to_add: str |
def setCurrentIndex(self, index):
if self._currentIndex == index:
return
self._currentIndex = index
self.currentIndexChanged.emit(index)
for i, item in enumerate(self.items()):
item.setMenuEnabled(i == index)
self.repaint() | Sets the current item to the item at the inputed index.
:param index | <int> |
def import_package(rel_path_to_package, package_name):
try:
curr_dir = os.path.dirname(os.path.realpath(__file__))
except NameError:
curr_dir = os.path.dirname(os.path.realpath(os.getcwd()))
package_path = os.path.join(curr_dir, rel_path_to_package)
if package_path not in sys.path:
sys.path = [package_path] + sys.path
package = __import__(package_name)
return package | Imports a python package into the current namespace.
Parameters
----------
rel_path_to_package : str
Path to the package containing director relative from this script's
directory.
package_name : str
The name of the package to be imported.
Returns
---------
package : The imported package object. |
def profiles(self):
raw_profiles = self.account.service.management().profiles().list(
accountId=self.account.id,
webPropertyId=self.id).execute()['items']
profiles = [Profile(raw, self) for raw in raw_profiles]
return addressable.List(profiles, indices=['id', 'name'], insensitive=True) | A list of all profiles on this web property. You may
select a specific profile using its name, its id
or an index.
```python
property.profiles[0]
property.profiles['9234823']
property.profiles['marketing profile']
``` |
def get_max_events_in_both_arrays(events_one, events_two):
events_one = np.ascontiguousarray(events_one)
events_two = np.ascontiguousarray(events_two)
event_result = np.empty(shape=(events_one.shape[0] + events_two.shape[0], ), dtype=events_one.dtype)
count = analysis_functions.get_max_events_in_both_arrays(events_one, events_two, event_result)
return event_result[:count] | Calculates the maximum count of events that exist in both arrays. |
def merge_all_cells(cells):
current = 0
while len(cells) > 1:
count = 0
while count < len(cells):
cell1 = cells[current]
cell2 = cells[count]
merge_direction = get_merge_direction(cell1, cell2)
if not merge_direction == "NONE":
merge_cells(cell1, cell2, merge_direction)
if current > count:
current -= 1
cells.pop(count)
else:
count += 1
current += 1
if current >= len(cells):
current = 0
return cells[0].text | Loop through list of cells and piece them together one by one
Parameters
----------
cells : list of dashtable.data2rst.Cell
Returns
-------
grid_table : str
The final grid table |
def Disks(self):
if not self.disks: self.disks = clc.v2.Disks(server=self,disks_lst=self.data['details']['disks'],session=self.session)
return(self.disks) | Return disks object associated with server.
>>> clc.v2.Server("WA1BTDIX01").Disks()
<clc.APIv2.disk.Disks object at 0x10feea190> |
def handle_initialize(self, data):
logger.info('start to handle_initialize')
self.handle_update_search_space(data)
if self.search_space:
self.cg = CG_BOHB(configspace=self.search_space,
min_points_in_model=self.min_points_in_model,
top_n_percent=self.top_n_percent,
num_samples=self.num_samples,
random_fraction=self.random_fraction,
bandwidth_factor=self.bandwidth_factor,
min_bandwidth=self.min_bandwidth)
else:
raise ValueError('Error: Search space is None')
self.generate_new_bracket()
send(CommandType.Initialized, '') | Initialize Tuner, including creating Bayesian optimization-based parametric models
and search space formations
Parameters
----------
data: search space
search space of this experiment
Raises
------
ValueError
Error: Search space is None |
def publish_proto_in_ipfs(self):
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
self._printout(ipfs_hash_base58) | Publish proto files in ipfs and print hash |
def trt_pmf(matrices):
ntrts, nmags, ndists, nlons, nlats, neps = matrices.shape
pmf = numpy.zeros(ntrts)
for t in range(ntrts):
pmf[t] = 1. - numpy.prod(
[1. - matrices[t, i, j, k, l, m]
for i in range(nmags)
for j in range(ndists)
for k in range(nlons)
for l in range(nlats)
for m in range(neps)])
return pmf | Fold full disaggregation matrix to tectonic region type PMF.
:param matrices:
a matrix with T submatrices
:returns:
an array of T probabilities one per each tectonic region type |
def from_json(cls, json):
obj = cls(key_range.KeyRange.from_json(json["key_range"]),
model.QuerySpec.from_json(json["query_spec"]))
cursor = json["cursor"]
if cursor and json["cursor_object"]:
obj._cursor = datastore_query.Cursor.from_websafe_string(cursor)
else:
obj._cursor = cursor
return obj | Reverse of to_json. |
def allReadGroups(self):
for dataset in self.getDatasets():
for readGroupSet in dataset.getReadGroupSets():
for readGroup in readGroupSet.getReadGroups():
yield readGroup | Return an iterator over all read groups in the data repo |
def summary_pairwise_indices(self):
summary_pairwise_indices = np.empty(
self.values[0].t_stats.shape[1], dtype=object
)
summary_pairwise_indices[:] = [
sig.summary_pairwise_indices for sig in self.values
]
return summary_pairwise_indices | ndarray containing tuples of pairwise indices for the column summary. |
def check_gcdt_update():
try:
inst_version, latest_version = get_package_versions('gcdt')
if inst_version < latest_version:
log.warn('Please consider an update to gcdt version: %s' %
latest_version)
except GracefulExit:
raise
except Exception:
log.warn('PyPi appears to be down - we currently can\'t check for newer gcdt versions') | Check whether a newer gcdt is available and output a warning. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.