code stringlengths 51 2.34k | docstring stringlengths 11 171 |
|---|---|
def extract(filename_url_filelike_or_htmlstring):
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist) | An "improved" algorithm over the original eatiht algorithm |
def _check_required_settings(batches):
required_settings = [
'sawtooth.consensus.algorithm.name',
'sawtooth.consensus.algorithm.version']
for batch in batches:
for txn in batch.transactions:
txn_header = TransactionHeader()
txn_header.ParseFromString(txn.header)
if txn_header.family_name == 'sawtooth_settings':
settings_payload = SettingsPayload()
settings_payload.ParseFromString(txn.payload)
if settings_payload.action == SettingsPayload.PROPOSE:
proposal = SettingProposal()
proposal.ParseFromString(settings_payload.data)
if proposal.setting in required_settings:
required_settings.remove(proposal.setting)
if required_settings:
raise CliException(
'The following setting(s) are required at genesis, but were not '
'included in the genesis batches: {}'.format(required_settings)) | Ensure that all settings required at genesis are set. |
def _build_offset(offset, kwargs, default):
if offset is None:
if not kwargs:
return default
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError('Cannot pass kwargs and an offset')
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") | Builds the offset argument for event rules. |
def action_update(self):
order = []
form = self.request.form
attachments = form.get("attachments", [])
for attachment in attachments:
values = dict(attachment)
uid = values.pop("UID")
obj = api.get_object_by_uid(uid)
if values.pop("delete", False):
self.delete_attachment(obj)
continue
order.append(uid)
obj.update(**values)
obj.reindexObject()
self.set_attachments_order(order)
return self.request.response.redirect(self.context.absolute_url()) | Form action enpoint to update the attachments |
def _find_observable_paths(extra_files=None):
rv = set(
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
for x in sys.path
)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, "__file__", None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv) | Finds all paths that should be observed. |
def align(doc):
validate_doc(doc)
def evaluator(indent, column, page_width, ribbon_width):
return Nest(column - indent, doc)
return contextual(evaluator) | Aligns each new line in ``doc`` with the first new line. |
def clean_ufo(path):
if path.endswith(".ufo") and os.path.exists(path):
shutil.rmtree(path) | Make sure old UFO data is removed, as it may contain deleted glyphs. |
def hmget(self, key, field, *fields, encoding=_NOTSET):
return self.execute(b'HMGET', key, field, *fields, encoding=encoding) | Get the values of all the given fields. |
def removeApplicationManifest(self, pchApplicationManifestFullPath):
fn = self.function_table.removeApplicationManifest
result = fn(pchApplicationManifestFullPath)
return result | Removes an application manifest from the list to load when building the list of installed applications. |
def learn(self, numEpochs, batchsize):
for epoch in range(numEpochs):
print('epoch %d' % epoch)
indexes = np.random.permutation(self.trainsize)
for i in range(0, self.trainsize, batchsize):
x = Variable(self.x_train[indexes[i: i + batchsize]])
t = Variable(self.y_train[indexes[i: i + batchsize]])
self.optimizer.update(self.model, x, t) | Train the classifier for a given number of epochs, with a given batchsize |
def _get_csv_cells_gen(self, line):
digest_types = self.digest_types
for j, value in enumerate(line):
if self.first_line:
digest_key = None
digest = lambda x: x.decode(self.encoding)
else:
try:
digest_key = digest_types[j]
except IndexError:
digest_key = digest_types[0]
digest = Digest(acceptable_types=[digest_key],
encoding=self.encoding)
try:
digest_res = digest(value)
if digest_res == "\b":
digest_res = None
elif digest_key is not types.CodeType:
digest_res = repr(digest_res)
except Exception:
digest_res = ""
yield digest_res | Generator of values in a csv line |
def from_json(self, value):
if value is None:
return None
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
if isinstance(value, six.text_type):
if value == "":
return None
try:
value = dateutil.parser.parse(value)
except (TypeError, ValueError):
raise ValueError("Could not parse {} as a date".format(value))
if not isinstance(value, datetime.datetime):
raise TypeError(
"Value should be loaded from a string, a datetime object or None, not {}".format(type(value))
)
if value.tzinfo is not None:
return value.astimezone(pytz.utc)
else:
return value.replace(tzinfo=pytz.utc) | Parse the date from an ISO-formatted date string, or None. |
def handle_starttag(self, tag, attrs):
if tag == 'a' and ( ('class', 'download-pdf') in attrs or ('id', 'download-pdf') in attrs ):
for attr in attrs:
if attr[0] == 'href':
self.download_link = 'http://www.nature.com' + attr[1] | PDF link handler; never gets explicitly called by user |
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0' | 1' if Daylight Savings Time, '0' otherwise. |
def _get_registerd_func(name_or_func):
if callable(name_or_func):
func = register_array_xcorr(name_or_func)
else:
func = XCOR_FUNCS[name_or_func or 'default']
assert callable(func), 'func is not callable'
if not hasattr(func, 'registered'):
func = register_array_xcorr(func)
return func | get a xcorr function from a str or callable. |
def stn(s, length, encoding, errors):
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL | Convert a string to a null-terminated bytes object. |
def getdrawings():
infos = Info.query.all()
sketches = [json.loads(info.contents) for info in infos]
return jsonify(drawings=sketches) | Get all the drawings. |
def make_prefix(api_version, manipulator, auth_type):
prefix = "%s_%s" % (api_version, manipulator)
if (auth_type and auth_type != 'none'):
prefix += '_' + auth_type
return prefix | Make prefix string based on configuration parameters. |
def expandf(m, format):
_assert_expandable(format, True)
return _apply_replace_backrefs(m, format, flags=FORMAT) | Expand the string using the format replace pattern or function. |
def call_output(cmd, stdin=None, encoding_errors="replace", **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr, retcode = [], [], None
while retcode is None:
if stdin is not None:
logger.log_prefix("<0 ", stdin.rstrip())
raw_out, raw_err = p.communicate(stdin)
stdin = None
out = raw_out.decode(get_encoding(sys.stdout), encoding_errors) if raw_out else ""
if out:
logger.log_prefix("1> ", out.rstrip())
stdout.append(out)
err = raw_err.decode(get_encoding(sys.stderr), encoding_errors) if raw_err else ""
if err:
logger.log_prefix("2> ", err.rstrip())
stderr.append(err)
retcode = p.poll()
return stdout, stderr, retcode | Run command and read output. |
def maybe_start_recording(tokens, index):
if tokens[index].type == TokenType.BeginInlineRST:
return _InlineRSTRecorder(index) | Return a new _InlineRSTRecorder when its time to record. |
def recent_articles(limit=10, exclude=None):
queryset = Article.objects.filter(published=True).order_by('-modified')
if exclude:
if hasattr(exclude, '__iter__'):
queryset = queryset.exclude(pk__in=exclude)
else:
queryset = queryset.exclude(pk=exclude)
return queryset | Returns list of latest article |
async def shutdown(self):
"Force stop the output stream, if there are more data to download, shutdown the connection"
if self.stream:
if not self.stream.dataeof and not self.stream.dataerror:
self.stream.close(self.scheduler)
await self.connection.shutdown()
else:
self.stream.close(self.scheduler)
self.stream = None | Force stop the output stream, if there are more data to download, shutdown the connection |
def _get_cmd(command, arguments):
if arguments is None:
arguments = []
if command.endswith(".py") or command.endswith(".pyw"):
return [sys.executable, command] + list(arguments)
else:
return [command] + list(arguments) | Merge command with arguments. |
def have_graph(name):
for g in mestate.graphs:
if g.name == name:
return True
return False | return true if we have a graph of the given name |
def split_timesteps(data, consistent_abmn=False):
if has_multiple_timesteps(data):
grouped = data.groupby("timestep")
return [group[1] for group in grouped]
else:
return data | Split data into multiple timesteps. |
def make_stmt(stmt_cls, tf_agent, target_agent, pmid):
ev = Evidence(source_api='trrust', pmid=pmid)
return stmt_cls(deepcopy(tf_agent), deepcopy(target_agent),
evidence=[ev]) | Return a Statement based on its type, agents, and PMID. |
def make_module_reload_func(module_name=None, module_prefix='[???]', module=None):
module = _get_module(module_name, module, register=False)
if module_name is None:
module_name = str(module.__name__)
def rrr(verbose=True):
if not __RELOAD_OK__:
raise Exception('Reloading has been forced off')
try:
import imp
if verbose and not QUIET:
builtins.print('RELOAD: ' + str(module_prefix) + ' __name__=' + module_name)
imp.reload(module)
except Exception as ex:
print(ex)
print('%s Failed to reload' % module_prefix)
raise
return rrr | Injects dynamic module reloading |
def triangle(self, verts=True, lines=True):
tf = vtk.vtkTriangleFilter()
tf.SetPassLines(lines)
tf.SetPassVerts(verts)
tf.SetInputData(self.poly)
tf.Update()
return self.updateMesh(tf.GetOutput()) | Converts actor polygons and strips to triangles. |
def regxy(pattern, response, supress_regex, custom):
try:
matches = re.findall(r'%s' % pattern, response)
for match in matches:
verb('Custom regex', match)
custom.add(match)
except:
supress_regex = True | Extract a string based on regex pattern supplied by user. |
def tricu(P, k=0):
tri = numpy.sum(numpy.mgrid[[slice(0,_,1) for _ in P.shape]], 0)
tri = tri<len(tri) + k
if isinstance(P, Poly):
A = P.A.copy()
B = {}
for key in P.keys:
B[key] = A[key]*tri
return Poly(B, shape=P.shape, dim=P.dim, dtype=P.dtype)
out = P*tri
return out | Cross-diagonal upper triangle. |
def filespecs(self):
filespecs = {'globs': self._file_globs}
exclude_filespecs = self._exclude_filespecs
if exclude_filespecs:
filespecs['exclude'] = exclude_filespecs
return filespecs | Return a filespecs dict representing both globs and excludes. |
def export_project(self):
output = copy.deepcopy(self.generated_project)
data_for_make = self.workspace.copy()
self.exporter.process_data_for_makefile(data_for_make)
output['path'], output['files']['makefile'] = self.gen_file_jinja('makefile_gcc.tmpl', data_for_make, 'Makefile', data_for_make['output_dir']['path'])
expanded_dic = self.workspace.copy()
expanded_dic['rel_path'] = data_for_make['output_dir']['rel_path']
groups = self._get_groups(expanded_dic)
expanded_dic['groups'] = {}
for group in groups:
expanded_dic['groups'][group] = []
self._iterate(self.workspace, expanded_dic)
project_path, output['files']['cproj'] = self.gen_file_jinja(
'eclipse_makefile.cproject.tmpl', expanded_dic, '.cproject', data_for_make['output_dir']['path'])
project_path, output['files']['proj_file'] = self.gen_file_jinja(
'eclipse.project.tmpl', expanded_dic, '.project', data_for_make['output_dir']['path'])
return output | Processes groups and misc options specific for eclipse, and run generator |
def word_count(ctx, text, by_spaces=False):
text = conversions.to_string(text, ctx)
by_spaces = conversions.to_boolean(by_spaces, ctx)
return len(__get_words(text, by_spaces)) | Returns the number of words in the given text string |
def heating_values(self):
heating_dict = {
'level': self.heating_level,
'target': self.target_heating_level,
'active': self.now_heating,
'remaining': self.heating_remaining,
'last_seen': self.last_seen,
}
return heating_dict | Return a dict of all the current heating values. |
def _transform_snapshot(
raw_snapshot: str,
storage: SQLiteStorage,
cache: BlockHashCache,
) -> str:
snapshot = json.loads(raw_snapshot)
block_number = int(snapshot['block_number'])
snapshot['block_hash'] = cache.get(block_number)
pending_transactions = snapshot['pending_transactions']
new_pending_transactions = []
for transaction_data in pending_transactions:
if 'raiden.transfer.events.ContractSend' not in transaction_data['_type']:
raise InvalidDBData(
"Error during v18 -> v19 upgrade. Chain state's pending transactions "
"should only contain ContractSend transactions",
)
event_record = storage.get_latest_event_by_data_field(
filters=transaction_data,
)
if not event_record.data:
raise InvalidDBData(
'Error during v18 -> v19 upgrade. Could not find a database event '
'table entry for a pending transaction.',
)
event_record_data = json.loads(event_record.data)
transaction_data['triggered_by_block_hash'] = event_record_data['triggered_by_block_hash']
new_pending_transactions.append(transaction_data)
snapshot['pending_transactions'] = new_pending_transactions
return json.dumps(snapshot) | Upgrades a single snapshot by adding the blockhash to it and to any pending transactions |
def _score_macro_average(self, n_classes):
all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)]))
avg_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i])
avg_tpr /= n_classes
self.fpr[MACRO] = all_fpr
self.tpr[MACRO] = avg_tpr
self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO]) | Compute the macro average scores for the ROCAUC curves. |
def main(argv=None):
arguments = cli_common(__doc__, argv=argv)
es_export = ESExporter(arguments['CAMPAIGN-DIR'], arguments['--es'])
es_export.export()
if argv is not None:
return es_export | ben-elastic entry point |
def errored_tasks(self):
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks) | List of errored tasks. |
def _validate_configuration(self):
if not self.access_token:
raise ConfigurationException(
'You will need to initialize a client with an Access Token'
)
if not self.api_url:
raise ConfigurationException(
'The client configuration needs to contain an API URL'
)
if not self.default_locale:
raise ConfigurationException(
'The client configuration needs to contain a Default Locale'
)
if not self.api_version or self.api_version < 1:
raise ConfigurationException(
'The API Version must be a positive number'
) | Validates that required parameters are present. |
def memory_usage_psutil():
process = psutil.Process(os.getpid())
mem = process.memory_info()[0] / float(2 ** 20)
mem_vms = process.memory_info()[1] / float(2 ** 20)
return mem, mem_vms | Return the current process memory usage in MB. |
def del_key(self, k):
if k not in self.mirror:
raise KeyError
del self.proxy[k]
if '_config' in self.proxy and k in self.proxy['_config']:
del self.proxy['_config'][k] | Delete the key and any configuration for it |
def append_text(self, txt):
with open(self.fullname, "a") as myfile:
myfile.write(txt) | adds a line of text to a file |
def main(argv=None):
arguments = cli_common(__doc__, argv=argv)
driver = CampaignDriver(arguments['CAMPAIGN-DIR'], expandcampvars=False)
driver(no_exec=True)
if argv is not None:
return driver | ben-umb entry point |
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]:
logger.debug("Input file: %s", filename)
with open(filename, "r") as stream:
structure = yaml.safe_load(stream)
return structure | Read and parse yaml file. |
def convert_all(self):
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) | Convert all links in URL table. |
def process_view(self, request, view_func, view_args, view_kwargs):
try:
if ignore_path(request.path):
TrackedRequest.instance().tag("ignore_transaction", True)
view_name = request.resolver_match._func_path
span = TrackedRequest.instance().current_span()
if span is not None:
span.operation = "Controller/" + view_name
Context.add("path", request.path)
Context.add("user_ip", RemoteIp.lookup_from_headers(request.META))
if getattr(request, "user", None) is not None:
Context.add("username", request.user.get_username())
except Exception:
pass | Capture details about the view_func that is about to execute |
def stop(self):
if not self.running.wait(0.2):
return
self._logger.debug('runner disabled: %s', self)
with self._lock:
self.running.clear()
self._stopped.wait() | Stop execution of all current and future payloads |
def add(self, sensor):
if isinstance(sensor, (list, tuple)):
for sss in sensor:
self.add(sss)
return
if not isinstance(sensor, Sensor):
raise TypeError("pysma.Sensor expected")
if sensor.name in self:
old = self[sensor.name]
self.__s.remove(old)
_LOGGER.warning("Replacing sensor %s with %s", old, sensor)
if sensor.key in self:
_LOGGER.warning("Duplicate SMA sensor key %s", sensor.key)
self.__s.append(sensor) | Add a sensor, warning if it exists. |
def execute_locally(self):
self.make_script()
with open(self.kwargs['out_file'], 'w') as handle:
sh.python(self.script_path, _out=handle, _err=handle) | Runs the equivalent command locally in a blocking way. |
def load_image(name, n, m=None, gpu=None, square=None):
if m is None:
m = n
if gpu is None:
gpu = 0
if square is None:
square = 0
command = ('Shearlab.load_image("{}", {}, {}, {}, {})'.format(name,
n, m, gpu, square))
return j.eval(command) | Function to load images with certain size. |
def _is_device_active(device):
cmd = ['dmsetup', 'info', device]
dmsetup_info = util.subp(cmd)
for dm_line in dmsetup_info.stdout.split("\n"):
line = dm_line.split(':')
if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()):
return True
return False | Checks dmsetup to see if a device is already active |
def token_meta(opt):
meta = {
'via': 'aomi',
'operation': opt.operation,
'hostname': socket.gethostname()
}
if 'USER' in os.environ:
meta['unix_user'] = os.environ['USER']
if opt.metadata:
meta_bits = opt.metadata.split(',')
for meta_bit in meta_bits:
key, value = meta_bit.split('=')
if key not in meta:
meta[key] = value
for key, value in meta.items():
LOG.debug("Token metadata %s %s", key, value)
return meta | Generates metadata for a token |
def monthly_clear_sky_conditions(self):
if self._monthly_tau_diffuse is [] or self._monthly_tau_beam is []:
return [OriginalClearSkyCondition(i, 21) for i in xrange(1, 13)]
return [RevisedClearSkyCondition(i, 21, x, y) for i, x, y in zip(
list(xrange(1, 13)), self._monthly_tau_beam, self._monthly_tau_diffuse)] | A list of 12 monthly clear sky conditions that are used on the design days. |
def _interface_exists(self, interface):
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
itfcs_raw = parse.find_lines("^interface " + interface)
return len(itfcs_raw) > 0 | Check whether interface exists. |
def maybe_download_and_extract(dest_directory, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
cifar_foldername = 'cifar-10-batches-py'
else:
cifar_foldername = 'cifar-100-python'
if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):
logger.info("Found cifar{} data in {}.".format(cifar_classnum, dest_directory))
return
else:
DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100
filename = DATA_URL[0].split('/')[-1]
filepath = os.path.join(dest_directory, filename)
download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1])
tarfile.open(filepath, 'r:gz').extractall(dest_directory) | Download and extract the tarball from Alex's website. Copied from tensorflow example |
def _get_or_create_service_key(self):
keys = self.service._get_service_keys(self.name)
for key in keys['resources']:
if key['entity']['name'] == self.service_name:
return self.service.get_service_key(self.name,
self.service_name)
self.service.create_service_key(self.name, self.service_name)
return self.service.get_service_key(self.name, self.service_name) | Get a service key or create one if needed. |
def main():
global args
args = parse_args()
if not args:
return 1
state = MyState(args)
for path in args.paths:
if os.path.isdir(path):
walk_dir(path, args, state)
else:
safe_process_files(os.path.dirname(path), [os.path.basename(path)], args, state)
if state.should_quit():
break
if state.failed_files:
sys.stderr.write("error: %i/%i AEADs failed\n" % (len(state.failed_files), state.file_count))
return 1
if args.debug:
sys.stderr.write("Successfully processed %i AEADs\n" % (state.file_count)) | Main function when running as a program. |
def try_fix_dataset(dataset):
if isinstance(dataset, numpy.ndarray):
if len(dataset.shape) == 3:
if dataset.shape[-1] == 3:
return dataset.transpose((2, 0, 1))
elif len(dataset.shape) == 4:
if dataset.shape[-1] == 3:
return dataset.transpose((0, 3, 1, 2))
return dataset
for i, d in enumerate(dataset):
if not isinstance(d, numpy.ndarray):
return dataset
if not (len(d.shape) == 3 and d.shape[-1] == 3):
return dataset
dataset[i] = d.transpose()
return dataset | Transpose the image data if it's in PIL format. |
def _print_napps(cls, napp_list):
mgr = NAppsManager()
enabled = mgr.get_enabled()
installed = mgr.get_installed()
napps = []
for napp, desc in sorted(napp_list):
status = 'i' if napp in installed else '-'
status += 'e' if napp in enabled else '-'
status = '[{}]'.format(status)
name = '{}/{}'.format(*napp)
napps.append((status, name, desc))
cls.print_napps(napps) | Format the NApp list to be printed. |
def lineage(self):
if not self.parent:
return [self]
else:
L = self.parent.lineage()
L.append(self)
return L | Return all nodes between this node and the root, including this one. |
def _load_info(self):
if self._info is None:
try:
self._info = self._api.tables_get(self._name_parts)
except Exception as e:
raise e | Loads metadata about this table. |
def standard_exc_info(self):
tb = self.frames[0]
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb | Standard python exc_info for re-raising |
def list_team_codes():
cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"]))
leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist])))
for league in leaguenames:
teams = [team for team in cleanlist if team["league"]["name"] == league]
click.secho(league, fg="green", bold=True)
for team in teams:
if team["code"] != "null":
click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow")
click.secho("") | List team names in alphabetical order of team ID, per league. |
def _encode_personality(self, personality):
if personality is None:
personality = []
else:
personality = utils.coerce_to_list(personality)
for pfile in personality:
if "contents" in pfile:
pfile["contents"] = base64.b64encode(pfile["contents"])
return personality | Personality files must be base64-encoded before transmitting. |
def GetReportData(self, get_report_args, token):
report = rdf_report_plugins.ApiReportData(
representation_type=rdf_report_plugins.ApiReportData.RepresentationType
.PIE_CHART)
graph_series = client_report_utils.FetchMostRecentGraphSeries(
get_report_args.client_label,
rdf_stats.ClientGraphSeries.ReportType.OS_TYPE,
token=token)
if graph_series is not None:
for graph in graph_series.graphs:
if "%s day" % self.__class__.ACTIVE_DAYS in graph.title:
for sample in graph:
report.pie_chart.data.Append(
rdf_report_plugins.ApiReportDataPoint1D(
label=sample.label, x=sample.y_value))
break
report.pie_chart.data = sorted(
report.pie_chart.data, key=lambda point: point.label)
return report | Extract only the operating system type from the active histogram. |
def findfirst(f, coll):
result = list(dropwhile(f, coll))
return result[0] if result else None | Return first occurrence matching f, otherwise None |
def getSubOrder(existing):
alpha = list(zip(*sorted(((k, v['rec']['label']) for k, v in existing.items()), key=lambda a: a[1])))[0]
depths = {}
def getDepth(id_):
if id_ in depths:
return depths[id_]
else:
if id_ in existing:
names_above = getDepth(existing[id_]['sc'])
depths[id_] = names_above + [existing[id_]['rec']['label']]
return depths[id_]
else:
return ['']
for id_ in existing:
getDepth(id_)
print(sorted(depths.values()))
def key_(id_):
return depths[id_]
return sorted(depths, key=key_) | Alpha sort by the full chain of parents. |
def reading_order(e1, e2):
b1 = e1.bbox
b2 = e2.bbox
if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]):
return float_cmp(b1[x0], b2[x0])
return float_cmp(b1[y0], b2[y0]) | A comparator to sort bboxes from top to bottom, left to right |
def indicator_constraints(self,x):
x = np.atleast_2d(x)
I_x = np.ones((x.shape[0],1))
if self.constraints is not None:
for d in self.constraints:
try:
exec('constraint = lambda x:' + d['constraint'], globals())
ind_x = (constraint(x) <= 0) * 1
I_x *= ind_x.reshape(x.shape[0],1)
except:
print('Fail to compile the constraint: ' + str(d))
raise
return I_x | Returns array of ones and zeros indicating if x is within the constraints |
def submit_row(context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
} | Displays the row of buttons for delete and save. |
def populate_from_settings(self):
settings_aliases = settings.THUMBNAIL_ALIASES
if settings_aliases:
for target, aliases in settings_aliases.items():
target_aliases = self._aliases.setdefault(target, {})
target_aliases.update(aliases) | Populate the aliases from the ``THUMBNAIL_ALIASES`` setting. |
def decipher_block (self, state):
if len(state) != 16:
Log.error(u"Expecting block of 16")
self._add_round_key(state, self._Nr)
for i in range(self._Nr - 1, 0, -1):
self._i_shift_rows(state)
self._i_sub_bytes(state)
self._add_round_key(state, i)
self._mix_columns(state, True)
self._i_shift_rows(state)
self._i_sub_bytes(state)
self._add_round_key(state, 0)
return state | Perform AES block decipher on input |
def dist_to(self, p2):
return math.hypot(self.x - p2.x, self.y - p2.y) | Measure the distance to another point. |
def serialize_on_parent(
self,
parent,
value,
state
):
if value is None and self.required:
state.raise_error(MissingValue, self._missing_value_message(parent))
if not value and self.omit_empty:
return
element = _element_get_or_add_from_parent(parent, self.element_path)
self._serialize(element, value, state) | Serialize the value and add it to the parent element. |
async def _remote_close(self, exc=None):
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
log.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception:
log.exception("Exception in close handler.") | close session from remote. |
def add_str(self,oid,value,label=None):
self.add_oid_entry(oid,'STRING',value,label=label) | Short helper to add a string value to the MIB subtree. |
def recruit(self):
if self.networks(full=False):
self.recruiter.recruit(n=1)
else:
self.recruiter.close_recruitment() | Recruit one participant at a time until all networks are full. |
def DeleteRequest(self, request):
self.requests_to_delete.append(request)
if request and request.HasField("request"):
self.DeQueueClientRequest(request.request)
data_store.DB.DeleteRequest(request) | Deletes the request and all its responses from the flow state queue. |
def AddChild(self, path_info):
if self._path_type != path_info.path_type:
message = "Incompatible path types: `%s` and `%s`"
raise ValueError(message % (self._path_type, path_info.path_type))
if self._components != path_info.components[:-1]:
message = "Incompatible path components, expected `%s` but got `%s`"
raise ValueError(message % (self._components, path_info.components[:-1]))
self._children.add(path_info.GetPathID()) | Makes the path aware of some child. |
def _get_destination(script_parts):
for part in script_parts:
if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part):
return part | When arguments order is wrong first argument will be destination. |
def _fetch_app_role_token(vault_url, role_id, secret_id):
url = _url_joiner(vault_url, 'v1/auth/approle/login')
resp = requests.post(url, data={'role_id': role_id, 'secret_id': secret_id})
resp.raise_for_status()
data = resp.json()
if data.get('errors'):
raise VaultException(u'Error fetching Vault token: {}'.format(data['errors']))
return data['auth']['client_token'] | Get a Vault token, using the RoleID and SecretID |
def put_abs (self, r, c, ch):
r = constrain (r, 1, self.rows)
c = constrain (c, 1, self.cols)
if isinstance(ch, bytes):
ch = self._decode(ch)[0]
else:
ch = ch[0]
self.w[r-1][c-1] = ch | Screen array starts at 1 index. |
def simple_filter(self, key, value):
"Search keys whose values match with the searched values"
searched = {key: value}
return set([k for k, v in self.data.items() if
intersect(searched, v) == searched]) | Search keys whose values match with the searched values |
def _bypass_non_decrypted_field_exception(self):
if getattr(settings, 'PGPFIELDS_BYPASS_NON_DECRYPTED_FIELD_EXCEPTION', False):
return True
if getattr(settings, 'PGPFIELDS_BYPASS_FIELD_EXCEPTION_IN_MIGRATIONS', False):
if {'manage.py', 'migrate'}.issubset(sys.argv):
return True
return False | Bypass exception if some field was not decrypted. |
def upload_files_in_folder(self, dirname, fnames):
if utils.match_pattern(dirname, self.ignore):
return False
good_names = (nm for nm in fnames
if not utils.match_pattern(nm, self.ignore))
for fname in good_names:
if self.client._should_abort_folder_upload(self.upload_key):
return
full_path = os.path.join(dirname, fname)
obj_name = os.path.relpath(full_path, self.root_folder)
obj_size = os.stat(full_path).st_size
self.client.upload_file(self.container, full_path,
obj_name=obj_name, return_none=True, ttl=self.ttl)
self.client._update_progress(self.upload_key, obj_size) | Handles the iteration across files within a folder. |
def min_order_amount(self) -> Money:
return self._fetch('minimum order amount', self.market.code)(self._min_order_amount)() | Minimum amount to place an order. |
def nocache(func):
def new_func(*args, **kwargs):
resp = make_response(func(*args, **kwargs))
resp.cache_control.no_cache = True
return resp
return update_wrapper(new_func, func) | Stop caching for pages wrapped in nocache decorator. |
def schedule_hourly():
if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'):
logging.info('Scheduled email reports not enabled in config')
return
resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60
start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0)
stop_at = start_at + timedelta(seconds=3600)
schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution)
schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution) | Celery beat job meant to be invoked hourly |
def change_speed(body, speed=1):
if speed == 1:
return body
length = int(len(body) * speed)
rv = bytearray(length)
step = 0
for v in body:
i = int(step)
while i < int(step + speed) and i < length:
rv[i] = v
i += 1
step += speed
return rv | Change the voice speed of the wave body. |
def convert_to_node(instance, xml_node: XmlNode, node_globals: InheritedDict = None)\
-> InstanceNode:
return InstanceNode(instance, xml_node, node_globals) | Wraps passed instance with InstanceNode |
def check_valid_varname(varname,
custom_units,
custom_structs,
constants,
pos,
error_prefix="Variable name invalid.",
exc=None):
exc = VariableDeclarationException if exc is None else exc
valid_varname, msg = is_varname_valid(varname, custom_units, custom_structs, constants)
if not valid_varname:
raise exc(error_prefix + msg, pos)
return True | Handle invalid variable names |
def _read_python_source(self, filename):
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding | Do our best to decode a Python source file correctly. |
def toBCD (n):
bcd = 0
bits = 0
while True:
n, r = divmod(n, 10)
bcd |= (r << bits)
if n is 0:
break
bits += 4
return bcd | Converts the number n into Binary Coded Decimal. |
def types(self):
r = requests.get(self.evaluator_url + 'types')
r.raise_for_status()
return r.json() | List of the known event types |
def chain_user_names(users, exclude_user, truncate=35):
if not users or not isinstance(exclude_user, get_user_model()):
return ''
return truncatechars(
', '.join(u'{}'.format(u) for u in users.exclude(pk=exclude_user.pk)),
truncate) | Tag to return a truncated chain of user names. |
def threshold(np, acc, stream_raster, threshold=100., workingdir=None,
mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
fname = TauDEM.func_name('threshold')
return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
{'-ssa': acc}, workingdir,
{'-thresh': threshold},
{'-src': stream_raster},
{'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
{'logfile': log_file, 'runtimefile': runtime_file}) | Run threshold for stream raster |
def update_field_forward_refs(field: 'Field', globalns: Any, localns: Any) -> None:
if type(field.type_) == ForwardRef:
field.type_ = field.type_._evaluate(globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns) | Try to update ForwardRefs on fields based on this Field, globalns and localns. |
def _get_vcpu_field_and_address(self, field_name, x, y, p):
vcpu_struct = self.structs[b"vcpu"]
field = vcpu_struct[six.b(field_name)]
address = (self.read_struct_field("sv", "vcpu_base", x, y) +
vcpu_struct.size * p) + field.offset
pack_chars = b"<" + field.pack_chars
return field, address, pack_chars | Get the field and address for a VCPU struct field. |
def extract_version_from_filename(filename):
filename = os.path.splitext(os.path.basename(filename))[0]
if filename.endswith('.tar'):
filename = os.path.splitext(filename)[0]
return filename.partition('-')[2] | Extract version number from sdist filename. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.