text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def _forwardImplementation(self, inbuf, outbuf):
""" Proportional probability method.
"""
assert self.module
propensities = self.module.getActionValues(0)
summedProps = sum(propensities)
probabilities = propensities / summedProps
action = eventGenerator(probabilities)
# action = drawIndex(probabilities)
outbuf[:] = scipy.array([action]) | [
"def",
"_forwardImplementation",
"(",
"self",
",",
"inbuf",
",",
"outbuf",
")",
":",
"assert",
"self",
".",
"module",
"propensities",
"=",
"self",
".",
"module",
".",
"getActionValues",
"(",
"0",
")",
"summedProps",
"=",
"sum",
"(",
"propensities",
")",
"p... | 28.571429 | 15.357143 |
def metadata(self):
"""Retrieves metadata about the bucket.
Returns:
A BucketMetadata instance with information about this bucket.
Raises:
Exception if there was an error requesting the bucket's metadata.
"""
if self._info is None:
try:
self._info = self._api.buckets_get(self._name)
except Exception as e:
raise e
return BucketMetadata(self._info) if self._info else None | [
"def",
"metadata",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info",
"is",
"None",
":",
"try",
":",
"self",
".",
"_info",
"=",
"self",
".",
"_api",
".",
"buckets_get",
"(",
"self",
".",
"_name",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",... | 28.266667 | 22.133333 |
def get_election(self, row, race):
"""
Gets the Election object for the given row of election results.
Depends on knowing the Race object.
If this is the presidential election, this will determine the
Division attached to the election based on the row's statename.
This function depends on knowing the Race object from `get_race`.
"""
election_day = election.ElectionDay.objects.get(
date=row["electiondate"]
)
if row["racetypeid"] in ["D", "E"]:
party = government.Party.objects.get(ap_code="Dem")
elif row["racetypeid"] in ["R", "S"]:
party = government.Party.objects.get(ap_code="GOP")
else:
party = None
if row["racetype"] == "Runoff" and party:
election_type = election.ElectionType.objects.get_or_create(
slug=election.ElectionType.PRIMARY_RUNOFF,
label="Primary Runoff",
number_of_winners=1,
)[0]
return election.Election.objects.get_or_create(
election_type=election_type,
election_day=election_day,
division=race.office.division,
race=race,
party=party,
)[0]
try:
return election.Election.objects.get(
election_day=election_day,
division=race.office.division,
race=race,
party=party,
)
except ObjectDoesNotExist:
print(
"Could not find election for {0} {1} {2}".format(
race, row["party"], row["last"]
)
)
return None | [
"def",
"get_election",
"(",
"self",
",",
"row",
",",
"race",
")",
":",
"election_day",
"=",
"election",
".",
"ElectionDay",
".",
"objects",
".",
"get",
"(",
"date",
"=",
"row",
"[",
"\"electiondate\"",
"]",
")",
"if",
"row",
"[",
"\"racetypeid\"",
"]",
... | 34.06 | 18.38 |
def count_words(pattern):
"""
Count the number of words in a pattern as well as the total length of those words
:param pattern: The pattern to parse
:type pattern: str
:return: The word count first, then the total length of all words
:rtype : tuple of (int, int)
"""
word_pattern = re.compile(r'(\b(?<![\(\)\[\]\|])\w\w*\b(?![\(\)\[\]\|]))', re.IGNORECASE)
words = word_pattern.findall(pattern)
word_count = len(words)
word_len = sum(len(word) for word in words)
return word_count, word_len | [
"def",
"count_words",
"(",
"pattern",
")",
":",
"word_pattern",
"=",
"re",
".",
"compile",
"(",
"r'(\\b(?<![\\(\\)\\[\\]\\|])\\w\\w*\\b(?![\\(\\)\\[\\]\\|]))'",
",",
"re",
".",
"IGNORECASE",
")",
"words",
"=",
"word_pattern",
".",
"findall",
"(",
"pattern",
")",
"... | 35.9375 | 20.3125 |
def set_col_min_width(self, x: int, min_width: int):
"""Sets a minimum width for blocks in the column with coordinate x."""
if x < 0:
raise IndexError('x < 0')
self._min_widths[x] = min_width | [
"def",
"set_col_min_width",
"(",
"self",
",",
"x",
":",
"int",
",",
"min_width",
":",
"int",
")",
":",
"if",
"x",
"<",
"0",
":",
"raise",
"IndexError",
"(",
"'x < 0'",
")",
"self",
".",
"_min_widths",
"[",
"x",
"]",
"=",
"min_width"
] | 44.6 | 7.8 |
def mappability(args):
"""
%prog mappability reference.fasta
Generate 50mer mappability for reference genome. Commands are based on gem
mapper. See instructions:
<https://github.com/xuefzhao/Reference.Mappability>
"""
p = OptionParser(mappability.__doc__)
p.add_option("--mer", default=50, type="int", help="User mer size")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
ref, = args
K = opts.mer
pf = ref.rsplit(".", 1)[0]
mm = MakeManager()
gem = pf + ".gem"
cmd = "gem-indexer -i {} -o {}".format(ref, pf)
mm.add(ref, gem, cmd)
mer = pf + ".{}mer".format(K)
mapb = mer + ".mappability"
cmd = "gem-mappability -I {} -l {} -o {} -T {}".\
format(gem, K, mer, opts.cpus)
mm.add(gem, mapb, cmd)
wig = mer + ".wig"
cmd = "gem-2-wig -I {} -i {} -o {}".format(gem, mapb, mer)
mm.add(mapb, wig, cmd)
bw = mer + ".bw"
cmd = "wigToBigWig {} {}.sizes {}".format(wig, mer, bw)
mm.add(wig, bw, cmd)
bg = mer + ".bedGraph"
cmd = "bigWigToBedGraph {} {}".format(bw, bg)
mm.add(bw, bg, cmd)
merged = mer + ".filtered-1.merge.bed"
cmd = "python -m jcvi.formats.bed filterbedgraph {} 1".format(bg)
mm.add(bg, merged, cmd)
mm.write() | [
"def",
"mappability",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"mappability",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--mer\"",
",",
"default",
"=",
"50",
",",
"type",
"=",
"\"int\"",
",",
"help",
"=",
"\"User mer size\"",
")",
... | 26.833333 | 19.958333 |
def tpot_driver(args):
"""Perform a TPOT run."""
if args.VERBOSITY >= 2:
_print_args(args)
input_data = _read_data_file(args)
features = input_data.drop(args.TARGET_NAME, axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, input_data[args.TARGET_NAME], random_state=args.RANDOM_STATE)
tpot_type = TPOTClassifier if args.TPOT_MODE == 'classification' else TPOTRegressor
scoring_func = load_scoring_function(args.SCORING_FN)
tpot_obj = tpot_type(
generations=args.GENERATIONS,
population_size=args.POPULATION_SIZE,
offspring_size=args.OFFSPRING_SIZE,
mutation_rate=args.MUTATION_RATE,
crossover_rate=args.CROSSOVER_RATE,
cv=args.NUM_CV_FOLDS,
subsample=args.SUBSAMPLE,
n_jobs=args.NUM_JOBS,
scoring=scoring_func,
max_time_mins=args.MAX_TIME_MINS,
max_eval_time_mins=args.MAX_EVAL_MINS,
random_state=args.RANDOM_STATE,
config_dict=args.CONFIG_FILE,
template=args.TEMPLATE,
memory=args.MEMORY,
periodic_checkpoint_folder=args.CHECKPOINT_FOLDER,
early_stop=args.EARLY_STOP,
verbosity=args.VERBOSITY,
disable_update_check=args.DISABLE_UPDATE_CHECK
)
tpot_obj.fit(training_features, training_target)
if args.VERBOSITY in [1, 2] and tpot_obj._optimized_pipeline:
training_score = max([x.wvalues[1] for x in tpot_obj._pareto_front.keys])
print('\nTraining score: {}'.format(training_score))
print('Holdout score: {}'.format(tpot_obj.score(testing_features, testing_target)))
elif args.VERBOSITY >= 3 and tpot_obj._pareto_front:
print('Final Pareto front testing scores:')
pipelines = zip(tpot_obj._pareto_front.items, reversed(tpot_obj._pareto_front.keys))
for pipeline, pipeline_scores in pipelines:
tpot_obj._fitted_pipeline = tpot_obj.pareto_front_fitted_pipelines_[str(pipeline)]
print('{TRAIN_SCORE}\t{TEST_SCORE}\t{PIPELINE}'.format(
TRAIN_SCORE=int(pipeline_scores.wvalues[0]),
TEST_SCORE=tpot_obj.score(testing_features, testing_target),
PIPELINE=pipeline
)
)
if args.OUTPUT_FILE:
tpot_obj.export(args.OUTPUT_FILE) | [
"def",
"tpot_driver",
"(",
"args",
")",
":",
"if",
"args",
".",
"VERBOSITY",
">=",
"2",
":",
"_print_args",
"(",
"args",
")",
"input_data",
"=",
"_read_data_file",
"(",
"args",
")",
"features",
"=",
"input_data",
".",
"drop",
"(",
"args",
".",
"TARGET_NA... | 39.135593 | 21.288136 |
def get_nodes(self, coord, coords):
"""Get the variables containing the definition of the nodes
Parameters
----------
coord: xarray.Coordinate
The mesh variable
coords: dict
The coordinates to use to get node coordinates"""
def get_coord(coord):
return coords.get(coord, self.ds.coords.get(coord))
return list(map(get_coord,
coord.attrs.get('node_coordinates', '').split()[:2])) | [
"def",
"get_nodes",
"(",
"self",
",",
"coord",
",",
"coords",
")",
":",
"def",
"get_coord",
"(",
"coord",
")",
":",
"return",
"coords",
".",
"get",
"(",
"coord",
",",
"self",
".",
"ds",
".",
"coords",
".",
"get",
"(",
"coord",
")",
")",
"return",
... | 37.153846 | 15.769231 |
def init(ctx):
"""Initialize the project for use with EasyCI. This installs the necessary
git hooks (pre-commit + pre-push) and add a config file if one does not
already exists.
"""
# install hooks
git = ctx.obj['vcs']
click.echo("Installing hooks...", nl=False)
for old in ['commit-msg']:
path = os.path.join(git.path, '.git/hooks', old)
if os.path.exists(path):
os.remove(path)
for new in ['pre-commit', 'pre-push']:
git.install_hook(new, hooks_manager.get_hook(new))
click.echo("Done.")
# add a config file if one does not exist
config_path = os.path.join(git.path, 'eci.yaml')
if not os.path.exists(config_path):
click.echo("Placing a trivial config file in your project...", nl=False)
with open(config_path, 'w') as f:
f.write(yaml.safe_dump(
{'tests': ['echo please modify to run your tests', 'true']}))
click.echo("Done.")
# initialize lock
locking.init(git)
# update installed version
click.echo("Updating installed version...", nl=False)
set_installed_version(git, easyci.__version__)
click.echo("Done.") | [
"def",
"init",
"(",
"ctx",
")",
":",
"# install hooks",
"git",
"=",
"ctx",
".",
"obj",
"[",
"'vcs'",
"]",
"click",
".",
"echo",
"(",
"\"Installing hooks...\"",
",",
"nl",
"=",
"False",
")",
"for",
"old",
"in",
"[",
"'commit-msg'",
"]",
":",
"path",
"... | 35.90625 | 16.75 |
def invalidate_cache(self):
"""
Invalidate httpBL cache
"""
if self._use_cache:
self._cache_version += 1
self._cache.increment('cached_httpbl_{0}_version'.format(self._api_key)) | [
"def",
"invalidate_cache",
"(",
"self",
")",
":",
"if",
"self",
".",
"_use_cache",
":",
"self",
".",
"_cache_version",
"+=",
"1",
"self",
".",
"_cache",
".",
"increment",
"(",
"'cached_httpbl_{0}_version'",
".",
"format",
"(",
"self",
".",
"_api_key",
")",
... | 28.375 | 15.375 |
def pages(request, path=None, instance=None):
"""
Представление для отображения текстовых страниц
:param request: запрос
:param path: адрес
:param instance: страница
:return:
"""
if instance and instance.active:
p = instance
else:
raise Http404()
return render(request, p.tpl, get_page_tpl_ctx(p, request)) | [
"def",
"pages",
"(",
"request",
",",
"path",
"=",
"None",
",",
"instance",
"=",
"None",
")",
":",
"if",
"instance",
"and",
"instance",
".",
"active",
":",
"p",
"=",
"instance",
"else",
":",
"raise",
"Http404",
"(",
")",
"return",
"render",
"(",
"requ... | 23.333333 | 17.466667 |
async def _read(self, path, *,
raw=None,
recurse=None,
dc=None,
separator=None,
keys=None,
watch=None,
consistency=None):
"""Returns the specified key
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
"""
response = await self._api.get(
"/v1/kv", path,
params={
"raw": raw,
"dc": dc,
"recurse": recurse,
"separator": separator,
"keys": keys
},
watch=watch,
consistency=consistency)
return response | [
"async",
"def",
"_read",
"(",
"self",
",",
"path",
",",
"*",
",",
"raw",
"=",
"None",
",",
"recurse",
"=",
"None",
",",
"dc",
"=",
"None",
",",
"separator",
"=",
"None",
",",
"keys",
"=",
"None",
",",
"watch",
"=",
"None",
",",
"consistency",
"="... | 31.571429 | 12.035714 |
def coreBurkAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y):
"""
deflection angle
:param R:
:param Rs:
:param rho0:
:param r_core:
:param ax_x:
:param ax_y:
:return:
"""
x = R * Rs ** -1
p = Rs * r_core ** -1
gx = self._G(x, p)
a = 2 * rho0 * Rs ** 2 * gx / x
return a * ax_x / R, a * ax_y / R | [
"def",
"coreBurkAlpha",
"(",
"self",
",",
"R",
",",
"Rs",
",",
"rho0",
",",
"r_core",
",",
"ax_x",
",",
"ax_y",
")",
":",
"x",
"=",
"R",
"*",
"Rs",
"**",
"-",
"1",
"p",
"=",
"Rs",
"*",
"r_core",
"**",
"-",
"1",
"gx",
"=",
"self",
".",
"_G",... | 19.75 | 19.15 |
async def lookup(client: Client, search: str) -> dict:
"""
GET UID/Public key data
:param client: Client to connect to the api
:param search: UID or public key
:return:
"""
return await client.get(MODULE + '/lookup/%s' % search, schema=LOOKUP_SCHEMA) | [
"async",
"def",
"lookup",
"(",
"client",
":",
"Client",
",",
"search",
":",
"str",
")",
"->",
"dict",
":",
"return",
"await",
"client",
".",
"get",
"(",
"MODULE",
"+",
"'/lookup/%s'",
"%",
"search",
",",
"schema",
"=",
"LOOKUP_SCHEMA",
")"
] | 30.111111 | 16.333333 |
def delete_course(self, courseid):
""" Erase all course data """
# Wipes the course (delete database)
self.wipe_course(courseid)
# Deletes the course from the factory (entire folder)
self.course_factory.delete_course(courseid)
# Removes backup
filepath = os.path.join(self.backup_dir, courseid)
if os.path.exists(os.path.dirname(filepath)):
for backup in glob.glob(os.path.join(filepath, '*.zip')):
os.remove(backup)
self._logger.info("Course %s files erased.", courseid) | [
"def",
"delete_course",
"(",
"self",
",",
"courseid",
")",
":",
"# Wipes the course (delete database)",
"self",
".",
"wipe_course",
"(",
"courseid",
")",
"# Deletes the course from the factory (entire folder)",
"self",
".",
"course_factory",
".",
"delete_course",
"(",
"co... | 37.333333 | 18.2 |
def parse_document(graph: BELGraph,
enumerated_lines: Iterable[Tuple[int, str]],
metadata_parser: MetadataParser,
) -> None:
"""Parse the lines in the document section of a BEL script."""
parse_document_start_time = time.time()
for line_number, line in enumerated_lines:
try:
metadata_parser.parseString(line, line_number=line_number)
except VersionFormatWarning as exc:
_log_parse_exception(graph, exc)
graph.add_warning(exc)
except Exception as e:
exc = MalformedMetadataException(line_number, line, 0)
_log_parse_exception(graph, exc)
raise exc from e
for required in REQUIRED_METADATA:
required_metadatum = metadata_parser.document_metadata.get(required)
if required_metadatum is not None:
continue
required_metadatum_key = INVERSE_DOCUMENT_KEYS[required]
# This has to be insert since it needs to go on the front!
exc = MissingMetadataException.make(required_metadatum_key)
graph.warnings.insert(0, (None, exc, {}))
_log_parse_exception(graph, exc)
graph.document.update(metadata_parser.document_metadata)
log.info('Finished parsing document section in %.02f seconds', time.time() - parse_document_start_time) | [
"def",
"parse_document",
"(",
"graph",
":",
"BELGraph",
",",
"enumerated_lines",
":",
"Iterable",
"[",
"Tuple",
"[",
"int",
",",
"str",
"]",
"]",
",",
"metadata_parser",
":",
"MetadataParser",
",",
")",
"->",
"None",
":",
"parse_document_start_time",
"=",
"t... | 41.65625 | 19.21875 |
def preprovision_rbridge_id_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
preprovision = ET.SubElement(config, "preprovision", xmlns="urn:brocade.com:mgmt:brocade-preprovision")
rbridge_id = ET.SubElement(preprovision, "rbridge-id")
rbridge_id_key = ET.SubElement(rbridge_id, "rbridge-id")
rbridge_id_key.text = kwargs.pop('rbridge_id')
wwn = ET.SubElement(rbridge_id, "wwn")
wwn.text = kwargs.pop('wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"preprovision_rbridge_id_wwn",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"preprovision",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"preprovision\"",
",",
"xmlns",
"=",
"\"u... | 45.153846 | 16.769231 |
def mcmc(x1, x2, x1err=[], x2err=[], po=(1,1,0.5), logify=True,
nsteps=5000, nwalkers=100, nburn=500, output='full'):
"""
Use emcee to find the best-fit linear relation or power law
accounting for measurement uncertainties and intrinsic scatter
Parameters
----------
x1 : array of floats
Independent variable, or observable
x2 : array of floats
Dependent variable
x1err : array of floats (optional)
Uncertainties on the independent variable
x2err : array of floats (optional)
Uncertainties on the dependent variable
po : tuple of 3 floats (optional)
Initial guesses for zero point, slope, and intrinsic
scatter. Results are not very sensitive to these values
so they shouldn't matter a lot.
logify : bool (default True)
Whether to take the log of the measurements in order to
estimate the best-fit power law instead of linear relation
nsteps : int (default 5000)
Number of steps each walker should take in the MCMC
nwalkers : int (default 100)
Number of MCMC walkers
nburn : int (default 500)
Number of samples to discard to give the MCMC enough time
to converge.
output : list of ints or 'full' (default 'full')
If 'full', then return the full samples (except for burn-in
section) for each parameter. Otherwise, each float
corresponds to a percentile that will be returned for
each parameter.
Returns
-------
See *output* argument above for return options.
"""
import emcee
if len(x1err) == 0:
x1err = numpy.ones(len(x1))
if len(x2err) == 0:
x2err = numpy.ones(len(x1))
def lnlike(theta, x, y, xerr, yerr):
a, b, s = theta
model = a + b*x
sigma = numpy.sqrt((b*xerr)**2 + yerr*2 + s**2)
lglk = 2 * sum(numpy.log(sigma)) + \
sum(((y-model) / sigma) ** 2) + \
numpy.log(len(x)) * numpy.sqrt(2*numpy.pi) / 2
return -lglk
def lnprior(theta):
a, b, s = theta
if s >= 0:
return 0
return -numpy.inf
def lnprob(theta, x, y, xerr, yerr):
lp = lnprior(theta)
return lp + lnlike(theta, x, y, xerr, yerr)
if logify:
x1, x2, x1err, x2err = to_log(x1, x2, x1err, x2err)
start = numpy.array(po)
ndim = len(start)
pos = [start + 1e-4*numpy.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(x1,x2,x1err,x2err))
sampler.run_mcmc(pos, nsteps)
samples = numpy.array([sampler.chain[:,nburn:,i].reshape(-1) \
for i in xrange(ndim)])
if logify:
samples[2] *= numpy.log(10)
if output == 'full':
return samples
else:
try:
values = [[numpy.percentile(s, o) for o in output]
for s in samples]
return values
except TypeError:
msg = 'ERROR: wrong value for argument output in mcmc().'
msg += ' Must be "full" or list of ints.'
print msg
exit()
return | [
"def",
"mcmc",
"(",
"x1",
",",
"x2",
",",
"x1err",
"=",
"[",
"]",
",",
"x2err",
"=",
"[",
"]",
",",
"po",
"=",
"(",
"1",
",",
"1",
",",
"0.5",
")",
",",
"logify",
"=",
"True",
",",
"nsteps",
"=",
"5000",
",",
"nwalkers",
"=",
"100",
",",
... | 38.574713 | 17.701149 |
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# pylint:disable=too-many-locals,unused-argument
result = []
for field in self.table_description_cache(table_name)['fields']:
params = OrderedDict()
if field['label'] and field['label'] != camel_case_to_spaces(re.sub('__c$', '', field['name'])).title():
params['verbose_name'] = field['label']
if not field['updateable'] or not field['createable']:
# Fields that are result of a formula or system fields modified
# by triggers or by other apex code
sf_read_only = (0 if field['updateable'] else 1) | (0 if field['createable'] else 2)
# use symbolic names NOT_UPDATEABLE, NON_CREATABLE, READ_ONLY instead of 1, 2, 3
params['sf_read_only'] = reverse_models_names[sf_read_only]
if field['defaultValue'] is not None:
params['default'] = field['defaultValue']
if field['inlineHelpText']:
params['help_text'] = field['inlineHelpText']
if field['picklistValues']:
params['choices'] = [(x['value'], x['label']) for x in field['picklistValues'] if x['active']]
if field['defaultedOnCreate'] and field['createable']:
params['default'] = SymbolicModelsName('DEFAULTED_ON_CREATE')
if field['type'] == 'reference' and not field['referenceTo']:
params['ref_comment'] = 'No Reference table'
field['type'] = 'string'
if field['calculatedFormula']:
# calculated formula field are without length in Salesforce 45 Spring '19,
# but Django requires a length, though the field is read only and never written
field['length'] = 1300
# We prefer "length" over "byteLength" for "internal_size".
# (because strings have usually: byteLength == 3 * length)
result.append(FieldInfo(
field['name'], # name,
field['type'], # type_code,
field['length'], # display_size,
field['length'], # internal_size,
field['precision'], # precision,
field['scale'], # scale,
field['nillable'], # null_ok,
params.get('default'), # default
params,
))
return result | [
"def",
"get_table_description",
"(",
"self",
",",
"cursor",
",",
"table_name",
")",
":",
"# pylint:disable=too-many-locals,unused-argument",
"result",
"=",
"[",
"]",
"for",
"field",
"in",
"self",
".",
"table_description_cache",
"(",
"table_name",
")",
"[",
"'fields'... | 58.627907 | 23.139535 |
def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None,
output_shape=None, guess=None,
outgrid=None):
"""Fit a 2d array to a 2d function
USE ONLY MASKED VALUES
* [down_scale_factor] map to speed up fitting procedure, set value smaller than 1
* [output_shape] shape of the output array
* [guess] must be scaled using [scale_factor]
Returns:
Fitted map, fitting params (scaled), error
"""
if mask is None:
#assert outgrid is not None
mask = np.ones(shape=arr.shape, dtype=bool)
if down_scale_factor is None:
if mask.sum() > 1000:
down_scale_factor = 0.3
else:
down_scale_factor = 1
if down_scale_factor != 1:
# SCALE TO DECREASE AMOUNT OF POINTS TO FIT:
arr2 = zoom(arr, down_scale_factor)
mask = zoom(mask, down_scale_factor, output=bool)
else:
arr2 = arr
# USE ONLY VALID POINTS:
x, y = np.where(mask)
z = arr2[mask]
# FIT:
parameters, cov_matrix = curve_fit(fn, (x, y), z, p0=guess)
# ERROR:
perr = np.sqrt(np.diag(cov_matrix))
if outgrid is not None:
yy,xx = outgrid
rebuilt = fn((yy,xx), *parameters)
else:
if output_shape is None:
output_shape = arr.shape
fx = arr2.shape[0] / output_shape[0]
fy = arr2.shape[1] / output_shape[1]
rebuilt = np.fromfunction(lambda x, y: fn((x * fx, y * fy),
*parameters), output_shape)
return rebuilt, parameters, perr | [
"def",
"fit2dArrayToFn",
"(",
"arr",
",",
"fn",
",",
"mask",
"=",
"None",
",",
"down_scale_factor",
"=",
"None",
",",
"output_shape",
"=",
"None",
",",
"guess",
"=",
"None",
",",
"outgrid",
"=",
"None",
")",
":",
"if",
"mask",
"is",
"None",
":",
"#as... | 29.980769 | 18.615385 |
async def worker_start(self, worker, exc=None):
'''Start the worker by invoking the :meth:`create_server` method.
'''
if not exc and self.name not in worker.servers:
servers = await self.binds(worker, worker.sockets)
for server in servers.values():
server.event('stop').bind(lambda _, **kw: worker.stop()) | [
"async",
"def",
"worker_start",
"(",
"self",
",",
"worker",
",",
"exc",
"=",
"None",
")",
":",
"if",
"not",
"exc",
"and",
"self",
".",
"name",
"not",
"in",
"worker",
".",
"servers",
":",
"servers",
"=",
"await",
"self",
".",
"binds",
"(",
"worker",
... | 51.857143 | 20.142857 |
def copy(self, dest, src):
"""Copy element from sequence, member from mapping.
:param dest: the destination
:type dest: Pointer
:param src: the source
:type src: Pointer
:return: resolved document
:rtype: Target
"""
doc = fragment = deepcopy(self.document)
for token in Pointer(src):
fragment = token.extract(fragment, bypass_ref=True)
return Target(doc).add(dest, fragment) | [
"def",
"copy",
"(",
"self",
",",
"dest",
",",
"src",
")",
":",
"doc",
"=",
"fragment",
"=",
"deepcopy",
"(",
"self",
".",
"document",
")",
"for",
"token",
"in",
"Pointer",
"(",
"src",
")",
":",
"fragment",
"=",
"token",
".",
"extract",
"(",
"fragme... | 30.8 | 13.466667 |
def get_notables(self, id_num):
"""Return the notables of the activity with the given id.
"""
url = self._build_url('my', 'activities', id_num, 'notables')
return self._json(url) | [
"def",
"get_notables",
"(",
"self",
",",
"id_num",
")",
":",
"url",
"=",
"self",
".",
"_build_url",
"(",
"'my'",
",",
"'activities'",
",",
"id_num",
",",
"'notables'",
")",
"return",
"self",
".",
"_json",
"(",
"url",
")"
] | 41.2 | 9.6 |
def dispatch(self, block = False, timeout = None):
"""Get the next event from the queue and pass it to
the appropriate handlers.
:Parameters:
- `block`: wait for event if the queue is empty
- `timeout`: maximum time, in seconds, to wait if `block` is `True`
:Types:
- `block`: `bool`
- `timeout`: `float`
:Return: the event handled (may be `QUIT`) or `None`
"""
logger.debug(" dispatching...")
try:
event = self.queue.get(block, timeout)
except Queue.Empty:
logger.debug(" queue empty")
return None
try:
logger.debug(" event: {0!r}".format(event))
if event is QUIT:
return QUIT
handlers = list(self._handler_map[None])
klass = event.__class__
if klass in self._handler_map:
handlers += self._handler_map[klass]
logger.debug(" handlers: {0!r}".format(handlers))
# to restore the original order of handler objects
handlers.sort(key = lambda x: x[0])
for dummy, handler in handlers:
logger.debug(u" passing the event to: {0!r}".format(handler))
result = handler(event)
if isinstance(result, Event):
self.queue.put(result)
elif result and event is not QUIT:
return event
return event
finally:
self.queue.task_done() | [
"def",
"dispatch",
"(",
"self",
",",
"block",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"\" dispatching...\"",
")",
"try",
":",
"event",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"block",
",",
"timeout",
")",... | 38.1 | 14.45 |
def _upgradeTableOid(store, table, createTable, postCreate=lambda: None):
"""
Upgrade a table to have an explicit oid.
Must be called in a transaction to avoid corrupting the database.
"""
if _hasExplicitOid(store, table):
return
store.executeSchemaSQL(
'ALTER TABLE *DATABASE*.{0} RENAME TO {0}_temp'.format(table))
createTable()
store.executeSchemaSQL(
'INSERT INTO *DATABASE*.{0} '
'SELECT oid, * FROM *DATABASE*.{0}_temp'.format(table))
store.executeSchemaSQL('DROP TABLE *DATABASE*.{0}_temp'.format(table))
postCreate() | [
"def",
"_upgradeTableOid",
"(",
"store",
",",
"table",
",",
"createTable",
",",
"postCreate",
"=",
"lambda",
":",
"None",
")",
":",
"if",
"_hasExplicitOid",
"(",
"store",
",",
"table",
")",
":",
"return",
"store",
".",
"executeSchemaSQL",
"(",
"'ALTER TABLE ... | 36.375 | 18.625 |
def chart(symbol, timeframe='1m', date=None, token='', version=''):
'''Historical price/volume data, daily and intraday
https://iexcloud.io/docs/api/#historical-prices
Data Schedule
1d: -9:30-4pm ET Mon-Fri on regular market trading days
-9:30-1pm ET on early close trading days
All others:
-Prior trading day available after 4am ET Tue-Sat
Args:
symbol (string); Ticker to request
timeframe (string); Timeframe to request e.g. 1m
date (datetime): date, if requesting intraday
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if timeframe is not None and timeframe != '1d':
if timeframe not in _TIMEFRAME_CHART:
raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART))
return _getJson('stock/' + symbol + '/chart' + '/' + timeframe, token, version)
if date:
date = _strOrDate(date)
return _getJson('stock/' + symbol + '/chart' + '/date/' + date, token, version)
return _getJson('stock/' + symbol + '/chart', token, version) | [
"def",
"chart",
"(",
"symbol",
",",
"timeframe",
"=",
"'1m'",
",",
"date",
"=",
"None",
",",
"token",
"=",
"''",
",",
"version",
"=",
"''",
")",
":",
"_raiseIfNotStr",
"(",
"symbol",
")",
"if",
"timeframe",
"is",
"not",
"None",
"and",
"timeframe",
"!... | 38.655172 | 21.965517 |
def _maketicks(self, ax, ylabel='Energy (eV)'):
"""Utility method to add tick marks to a band structure."""
# set y-ticks
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
# set x-ticks; only plot the unique tick labels
ticks = self.get_ticks()
unique_d = []
unique_l = []
if ticks['distance']:
temp_ticks = list(zip(ticks['distance'], ticks['label']))
unique_d.append(temp_ticks[0][0])
unique_l.append(temp_ticks[0][1])
for i in range(1, len(temp_ticks)):
# Hide labels marked with @
if '@' in temp_ticks[i][1]:
# If a branch connection, check all parts of label
if r'$\mid$' in temp_ticks[i][1]:
label_components = temp_ticks[i][1].split(r'$\mid$')
good_labels = [l for l in label_components
if l[0] != '@']
if len(good_labels) == 0:
continue
else:
temp_ticks[i] = (temp_ticks[i][0],
r'$\mid$'.join(good_labels))
# If a single label, check first character
elif temp_ticks[i][1][0] == '@':
continue
# Append label to sequence if it is not same as predecessor
if unique_l[-1] != temp_ticks[i][1]:
unique_d.append(temp_ticks[i][0])
unique_l.append(temp_ticks[i][1])
logging.info('Label positions:')
for dist, label in list(zip(unique_d, unique_l)):
logging.info('\t{:.4f}: {}'.format(dist, label))
ax.set_xticks(unique_d)
ax.set_xticklabels(unique_l)
ax.xaxis.grid(True, ls='-')
ax.set_ylabel(ylabel)
trans_xdata_yaxes = blended_transform_factory(ax.transData,
ax.transAxes)
ax.vlines(unique_d, 0, 1,
transform=trans_xdata_yaxes,
colors=rcParams['grid.color'],
linewidth=rcParams['grid.linewidth'],
zorder=3) | [
"def",
"_maketicks",
"(",
"self",
",",
"ax",
",",
"ylabel",
"=",
"'Energy (eV)'",
")",
":",
"# set y-ticks",
"ax",
".",
"yaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"6",
")",
")",
"ax",
".",
"yaxis",
".",
"set_minor_locator",
"(",
"AutoMinor... | 43.615385 | 16.480769 |
def list_of_installed(self, repo, name):
"""Return installed packages
"""
all_installed_names = []
all_installed_packages = find_package("", self.meta.pkg_path)
for inst in all_installed_packages:
if repo == "sbo" and inst.endswith("_SBo"):
name = split_package(inst)[0]
all_installed_names.append(name)
else:
if name:
all_installed_names.append(split_package(inst)[0])
else:
all_installed_names.append(inst)
return all_installed_names | [
"def",
"list_of_installed",
"(",
"self",
",",
"repo",
",",
"name",
")",
":",
"all_installed_names",
"=",
"[",
"]",
"all_installed_packages",
"=",
"find_package",
"(",
"\"\"",
",",
"self",
".",
"meta",
".",
"pkg_path",
")",
"for",
"inst",
"in",
"all_installed... | 39.8 | 11.6 |
def download(url):
"""
Download `url` and return it as utf-8 encoded text.
Args:
url (str): What should be downloaded?
Returns:
str: Content of the page.
"""
headers = {"User-Agent": USER_AGENT}
resp = requests.get(
url,
timeout=REQUEST_TIMEOUT,
headers=headers,
allow_redirects=True,
verify=False,
)
def decode(st, alt_encoding=None):
encodings = ['ascii', 'utf-8', 'iso-8859-1', 'iso-8859-15']
if alt_encoding:
if isinstance(alt_encoding, basestring):
encodings.append(alt_encoding)
else:
encodings.extend(alt_encoding)
for encoding in encodings:
try:
return st.encode(encoding).decode("utf-8")
except UnicodeEncodeError, UnicodeDecodeError:
pass
raise UnicodeError('Could not find encoding.')
return decode(resp.text, resp.encoding) | [
"def",
"download",
"(",
"url",
")",
":",
"headers",
"=",
"{",
"\"User-Agent\"",
":",
"USER_AGENT",
"}",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"REQUEST_TIMEOUT",
",",
"headers",
"=",
"headers",
",",
"allow_redirects",
"=",
... | 25.567568 | 19.351351 |
def add_time_event(self, time_event):
"""Add a TimeEvent.
:type time_event: :class: `~opencensus.trace.time_event.TimeEvent`
:param time_event: A TimeEvent object.
"""
if isinstance(time_event, time_event_module.TimeEvent):
self.time_events.append(time_event)
else:
raise TypeError("Type Error: received {}, but requires TimeEvent.".
format(type(time_event).__name__)) | [
"def",
"add_time_event",
"(",
"self",
",",
"time_event",
")",
":",
"if",
"isinstance",
"(",
"time_event",
",",
"time_event_module",
".",
"TimeEvent",
")",
":",
"self",
".",
"time_events",
".",
"append",
"(",
"time_event",
")",
"else",
":",
"raise",
"TypeErro... | 41.727273 | 18.272727 |
def set(self, indexes=None, columns=None, values=None):
"""
Given indexes and columns will set a sub-set of the DataFrame to the values provided. This method will direct
to the below methods based on what types are passed in for the indexes and columns. If the indexes or columns
contains values not in the DataFrame then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans. If None then all indexes are used
:param columns: columns name, if None then all columns are used. Currently can only handle a single column or\
all columns
:param values: value or list of values to set (index, column) to. If setting just a single row, then must be a\
dict where the keys are the column names. If a list then must be the same length as the indexes parameter, if\
indexes=None, then must be the same and length of DataFrame
:return: nothing
"""
if (indexes is not None) and (columns is not None):
if isinstance(indexes, (list, blist)):
self.set_column(indexes, columns, values)
else:
self.set_cell(indexes, columns, values)
elif (indexes is not None) and (columns is None):
self.set_row(indexes, values)
elif (indexes is None) and (columns is not None):
self.set_column(indexes, columns, values)
else:
raise ValueError('either or both of indexes or columns must be provided') | [
"def",
"set",
"(",
"self",
",",
"indexes",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"values",
"=",
"None",
")",
":",
"if",
"(",
"indexes",
"is",
"not",
"None",
")",
"and",
"(",
"columns",
"is",
"not",
"None",
")",
":",
"if",
"isinstance",
"... | 60.92 | 33.4 |
def get_version_name(version_id):
"""
Get the name of a protocol version by the internal version ID.
:param Integer version_id: Internal protocol version ID
:return: Name of the version
:rtype: String
"""
ver = registry.version_info.get(version_id)
if ver:
return ver.name
return 'unknown' | [
"def",
"get_version_name",
"(",
"version_id",
")",
":",
"ver",
"=",
"registry",
".",
"version_info",
".",
"get",
"(",
"version_id",
")",
"if",
"ver",
":",
"return",
"ver",
".",
"name",
"return",
"'unknown'"
] | 24.846154 | 18.076923 |
def moran_cultural(network):
"""Generalized cultural Moran process.
At eachtime step, an individual is chosen to receive information from
another individual. Nobody dies, but perhaps their ideas do.
"""
if not network.transmissions(): # first step, replacer is a source
replacer = random.choice(network.nodes(type=Source))
replacer.transmit()
else:
replacer = random.choice(network.nodes(type=Agent))
replaced = random.choice(
replacer.neighbors(direction="to", type=Agent))
from operator import attrgetter
replacer.transmit(
what=max(replacer.infos(), key=attrgetter('creation_time')),
to_whom=replaced) | [
"def",
"moran_cultural",
"(",
"network",
")",
":",
"if",
"not",
"network",
".",
"transmissions",
"(",
")",
":",
"# first step, replacer is a source",
"replacer",
"=",
"random",
".",
"choice",
"(",
"network",
".",
"nodes",
"(",
"type",
"=",
"Source",
")",
")"... | 36.736842 | 20.368421 |
def get_status(self, hosts, services):
"""Get the status of this host
:return: "UP", "DOWN", "UNREACHABLE" or "n/a" based on host state_id or business_rule state
:rtype: str
"""
if self.got_business_rule:
mapping = {
0: "UP",
1: "DOWN",
4: "UNREACHABLE",
}
return mapping.get(self.business_rule.get_state(hosts, services), "n/a")
return self.state | [
"def",
"get_status",
"(",
"self",
",",
"hosts",
",",
"services",
")",
":",
"if",
"self",
".",
"got_business_rule",
":",
"mapping",
"=",
"{",
"0",
":",
"\"UP\"",
",",
"1",
":",
"\"DOWN\"",
",",
"4",
":",
"\"UNREACHABLE\"",
",",
"}",
"return",
"mapping",... | 31.133333 | 20.533333 |
def respond(self):
"""Process the current request.
From :pep:`333`:
The start_response callable must not actually transmit
the response headers. Instead, it must store them for the
server or gateway to transmit only after the first
iteration of the application return value that yields
a NON-EMPTY string, or upon the application's first
invocation of the write() callable.
"""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in filter(None, response):
if not isinstance(chunk, six.binary_type):
raise ValueError('WSGI Applications must yield bytes')
self.write(chunk)
finally:
# Send headers if not already sent
self.req.ensure_headers_sent()
if hasattr(response, 'close'):
response.close() | [
"def",
"respond",
"(",
"self",
")",
":",
"response",
"=",
"self",
".",
"req",
".",
"server",
".",
"wsgi_app",
"(",
"self",
".",
"env",
",",
"self",
".",
"start_response",
")",
"try",
":",
"for",
"chunk",
"in",
"filter",
"(",
"None",
",",
"response",
... | 40.913043 | 18.304348 |
def _scan_for_tokens(contents):
"""Scan a string for tokens and return immediate form tokens."""
# Regexes are in priority order. Changing the order may alter the
# behavior of the lexer
scanner = re.Scanner([
# Things inside quotes
(r"(?<![^\s\(])([\"\'])(?:(?=(\\?))\2.)*?\1(?![^\s\)])",
lambda s, t: (TokenType.QuotedLiteral, t)),
# Numbers on their own
(r"(?<![^\s\(])-?[0-9]+(?![^\s\)\(])", lambda s, t: (TokenType.Number,
t)),
# Left Paren
(r"\(", lambda s, t: (TokenType.LeftParen, t)),
# Right Paren
(r"\)", lambda s, t: (TokenType.RightParen, t)),
# Either a valid function name or variable name.
(r"(?<![^\s\(])[a-zA-z_][a-zA-Z0-9_]*(?![^\s\)\(])",
lambda s, t: (TokenType.Word, t)),
# Variable dereference.
(r"(?<![^\s\(])\${[a-zA-z_][a-zA-Z0-9_]*}(?![^\s\)])",
lambda s, t: (TokenType.Deref, t)),
# Newline
(r"\n", lambda s, t: (TokenType.Newline, t)),
# Whitespace
(r"\s+", lambda s, t: (TokenType.Whitespace, t)),
# The beginning of a double-quoted string, terminating at end of line
(r"(?<![^\s\(\\])[\"]([^\"]|\\[\"])*$",
lambda s, t: (TokenType.BeginDoubleQuotedLiteral, t)),
# The end of a double-quoted string
(r"[^\s]*(?<!\\)[\"](?![^\s\)])",
lambda s, t: (TokenType.EndDoubleQuotedLiteral, t)),
# The beginning of a single-quoted string, terminating at end of line
(r"(?<![^\s\(\\])[\']([^\']|\\[\'])*$",
lambda s, t: (TokenType.BeginSingleQuotedLiteral, t)),
# The end of a single-quoted string
(r"[^\s]*(?<!\\)[\'](?![^\s\)])",
lambda s, t: (TokenType.EndSingleQuotedLiteral, t)),
# Begin-RST Comment Block
(r"#.rst:$", lambda s, t: (TokenType.BeginRSTComment, t)),
# Begin Inline RST
(r"#\[=*\[.rst:$", lambda s, t: (TokenType.BeginInlineRST, t)),
# End Inline RST
(r"#\]=*\]$", lambda s, t: (TokenType.EndInlineRST, t)),
# Comment
(r"#", lambda s, t: (TokenType.Comment, t)),
# Catch-all for literals which are compound statements.
(r"([^\s\(\)]+|[^\s\(]*[^\)]|[^\(][^\s\)]*)",
lambda s, t: (TokenType.UnquotedLiteral, t))
])
tokens_return = []
lines = contents.splitlines(True)
lineno = 0
for line in lines:
lineno += 1
col = 1
tokens, remaining = scanner.scan(line)
if remaining != "":
msg = "Unknown tokens found on line {0}: {1}".format(lineno,
remaining)
raise RuntimeError(msg)
for token_type, token_contents in tokens:
tokens_return.append(Token(type=token_type,
content=token_contents,
line=lineno,
col=col))
col += len(token_contents)
return tokens_return | [
"def",
"_scan_for_tokens",
"(",
"contents",
")",
":",
"# Regexes are in priority order. Changing the order may alter the",
"# behavior of the lexer",
"scanner",
"=",
"re",
".",
"Scanner",
"(",
"[",
"# Things inside quotes",
"(",
"r\"(?<![^\\s\\(])([\\\"\\'])(?:(?=(\\\\?))\\2.)*?\\1... | 41.616438 | 18.438356 |
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while self.cookedq.tell() == 0 and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq.getvalue()
self.cookedq.seek(0)
self.cookedq.truncate()
return buf | [
"def",
"read_some",
"(",
"self",
")",
":",
"self",
".",
"process_rawq",
"(",
")",
"while",
"self",
".",
"cookedq",
".",
"tell",
"(",
")",
"==",
"0",
"and",
"not",
"self",
".",
"eof",
":",
"self",
".",
"fill_rawq",
"(",
")",
"self",
".",
"process_ra... | 29.133333 | 16.2 |
def post(self, endpoint, json=None, params=None, **kwargs):
"""POST to DHIS2
:param endpoint: DHIS2 API endpoint
:param json: HTTP payload
:param params: HTTP parameters
:return: requests.Response object
"""
json = kwargs['data'] if 'data' in kwargs else json
return self._make_request('post', endpoint, data=json, params=params) | [
"def",
"post",
"(",
"self",
",",
"endpoint",
",",
"json",
"=",
"None",
",",
"params",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"json",
"=",
"kwargs",
"[",
"'data'",
"]",
"if",
"'data'",
"in",
"kwargs",
"else",
"json",
"return",
"self",
".",
... | 42.777778 | 9.777778 |
def _bundle_models(custom_models):
""" Create a JavaScript bundle with selected `models`. """
exports = []
modules = []
def read_json(name):
with io.open(join(bokehjs_dir, "js", name + ".json"), encoding="utf-8") as f:
return json.loads(f.read())
bundles = ["bokeh", "bokeh-api", "bokeh-widgets", "bokeh-tables", "bokeh-gl"]
known_modules = set(sum([ read_json(name) for name in bundles ], []))
custom_impls = _compile_models(custom_models)
extra_modules = {}
def resolve_modules(to_resolve, root):
resolved = {}
for module in to_resolve:
if module.startswith(("./", "../")):
def mkpath(module, ext=""):
return abspath(join(root, *module.split("/")) + ext)
if module.endswith(exts):
path = mkpath(module)
if not exists(path):
raise RuntimeError("no such module: %s" % module)
else:
for ext in exts:
path = mkpath(module, ext)
if exists(path):
break
else:
raise RuntimeError("no such module: %s" % module)
impl = FromFile(path)
compiled = nodejs_compile(impl.code, lang=impl.lang, file=impl.file)
if "error" in compiled:
raise CompilationError(compiled.error)
if impl.lang == "less":
code = _style_template % dict(css=json.dumps(compiled.code))
deps = []
else:
code = compiled.code
deps = compiled.deps
sig = hashlib.sha256(code.encode('utf-8')).hexdigest()
resolved[module] = sig
deps_map = resolve_deps(deps, dirname(path))
if sig not in extra_modules:
extra_modules[sig] = True
modules.append((sig, code, deps_map))
else:
index = module + ("" if module.endswith("/") else "/") + "index"
if index not in known_modules:
raise RuntimeError("no such module: %s" % module)
return resolved
def resolve_deps(deps, root):
custom_modules = set(model.module for model in custom_models.values())
missing = set(deps) - known_modules - custom_modules
return resolve_modules(missing, root)
for model in custom_models.values():
compiled = custom_impls[model.full_name]
deps_map = resolve_deps(compiled.deps, model.path)
exports.append((model.name, model.module))
modules.append((model.module, compiled.code, deps_map))
# sort everything by module name
exports = sorted(exports, key=lambda spec: spec[1])
modules = sorted(modules, key=lambda spec: spec[0])
for i, (module, code, deps) in enumerate(modules):
for name, ref in deps.items():
code = code.replace("""require("%s")""" % name, """require("%s")""" % ref)
code = code.replace("""require('%s')""" % name, """require('%s')""" % ref)
modules[i] = (module, code)
sep = ",\n"
exports = sep.join(_export_template % dict(name=name, module=module) for (name, module) in exports)
modules = sep.join(_module_template % dict(module=module, source=code) for (module, code) in modules)
content = _plugin_template % dict(prelude=_plugin_prelude, exports=exports, modules=modules)
return _plugin_umd % dict(content=content) | [
"def",
"_bundle_models",
"(",
"custom_models",
")",
":",
"exports",
"=",
"[",
"]",
"modules",
"=",
"[",
"]",
"def",
"read_json",
"(",
"name",
")",
":",
"with",
"io",
".",
"open",
"(",
"join",
"(",
"bokehjs_dir",
",",
"\"js\"",
",",
"name",
"+",
"\".j... | 38.835165 | 21.318681 |
def rollaxis(vari, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Args:
vari (chaospy.poly.base.Poly, numpy.ndarray):
Input array or polynomial.
axis (int):
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start (int):
The axis is rolled until it lies before thes position.
"""
if isinstance(vari, Poly):
core_old = vari.A.copy()
core_new = {}
for key in vari.keys:
core_new[key] = rollaxis(core_old[key], axis, start)
return Poly(core_new, vari.dim, None, vari.dtype)
return numpy.rollaxis(vari, axis, start) | [
"def",
"rollaxis",
"(",
"vari",
",",
"axis",
",",
"start",
"=",
"0",
")",
":",
"if",
"isinstance",
"(",
"vari",
",",
"Poly",
")",
":",
"core_old",
"=",
"vari",
".",
"A",
".",
"copy",
"(",
")",
"core_new",
"=",
"{",
"}",
"for",
"key",
"in",
"var... | 34.47619 | 17.428571 |
def _return_metadata():
"""
Return JSON-formatted metadata for route attachment.
Requires flask.current_app to be set, which means
`with app.app_context()`
"""
app = current_app
retdict = {"auth": app.config["AUTH"]["type"]}
for fld in ["name", "repository", "version", "description",
"api_version"]:
retdict[fld] = app.config[fld.upper()]
return jsonify(retdict) | [
"def",
"_return_metadata",
"(",
")",
":",
"app",
"=",
"current_app",
"retdict",
"=",
"{",
"\"auth\"",
":",
"app",
".",
"config",
"[",
"\"AUTH\"",
"]",
"[",
"\"type\"",
"]",
"}",
"for",
"fld",
"in",
"[",
"\"name\"",
",",
"\"repository\"",
",",
"\"version\... | 34.416667 | 11.416667 |
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z | [
"def",
"lz",
"(",
"inlist",
",",
"score",
")",
":",
"z",
"=",
"(",
"score",
"-",
"mean",
"(",
"inlist",
")",
")",
"/",
"samplestdev",
"(",
"inlist",
")",
"return",
"z"
] | 29.555556 | 19.333333 |
def fetch(self, remote='origin'):
"""fetch from a remote"""
git(self.gitdir, "fetch", remote, _env=self.env()) | [
"def",
"fetch",
"(",
"self",
",",
"remote",
"=",
"'origin'",
")",
":",
"git",
"(",
"self",
".",
"gitdir",
",",
"\"fetch\"",
",",
"remote",
",",
"_env",
"=",
"self",
".",
"env",
"(",
")",
")"
] | 41.333333 | 8.333333 |
def fitlin_clipped(xy,uv,verbose=False,mode='rscale',nclip=3,reject=3):
""" Perform a clipped fit based on the number of iterations and rejection limit
(in sigma) specified by the user. This will more closely replicate the results
obtained by 'geomap' using 'maxiter' and 'reject' parameters.
"""
fitting_funcs = {'rscale':fitlin_rscale,'general':fitlin}
# Get the fitting function to be used
fit_func = fitting_funcs[mode.lower()]
# Perform the initial fit
P,Q = fit_func(xy,uv)
xyc = apply_fitlin(xy,P,Q)
# compute residuals from fit for input positions
dx = uv[:,0] - xyc[0]
dy = uv[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if nclip > 0:
data = xy.copy()
outdata = uv.copy()
numclipped = 0
for i in range(nclip):
iterclipped = 0
xyc = apply_fitlin(data,P,Q)
# compute residuals from fit for input positions
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
# find indices of outliers in x and y
xout = np.where(np.abs(dx - dx.mean()) > reject*dx.std())
yout = np.where(np.abs(dy - dy.mean()) > reject*dy.std())
# concatenate those indices and sort them
outliers_indx = xout[0].tolist()+yout[0].tolist()
outliers_indx.sort()
# define the full range of indices for the data points left
full_indx = list(range(data.shape[0]))
# remove all unique indices specified in outliers from full range
for o in outliers_indx:
# only remove if it has not been removed already
# accounts for the same point being an outlier in both x and y
if full_indx.count(o) > 0:
full_indx.remove(o)
iterclipped += 1
if iterclipped == 0:
break
numclipped += iterclipped
if verbose:
print('Removed a total of ',numclipped,' points through iteration ',i+1)
# create clipped data
data_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
if verbose:
print('Iter #',i+1,' data:',data.shape,data_iter.shape,len(full_indx))
data_iter[:,0] = data[:,0][full_indx]
data_iter[:,1] = data[:,1][full_indx]
outdata_iter = np.zeros([len(full_indx),2],dtype=data.dtype)
outdata_iter[:,0] = outdata[:,0][full_indx]
outdata_iter[:,1] = outdata[:,1][full_indx]
# perform the fit again with the clipped data and go to the next iteration
data = data_iter
outdata = outdata_iter
P,Q = fit_func(data,outdata)
# compute residuals from fit for input positions
xyc = apply_fitlin(data,P,Q)
dx = outdata[:,0] - xyc[0]
dy = outdata[:,1] - xyc[1]
fit_rms = [dx.std(),dy.std()]
if verbose:
print('Fit clipped ',numclipped,' points over ',nclip,' iterations.')
return P,Q,fit_rms | [
"def",
"fitlin_clipped",
"(",
"xy",
",",
"uv",
",",
"verbose",
"=",
"False",
",",
"mode",
"=",
"'rscale'",
",",
"nclip",
"=",
"3",
",",
"reject",
"=",
"3",
")",
":",
"fitting_funcs",
"=",
"{",
"'rscale'",
":",
"fitlin_rscale",
",",
"'general'",
":",
... | 37.552632 | 19.026316 |
def assoc(m, *kvs):
"""Associate keys to values in associative data structure m. If m is None,
returns a new Map with key-values kvs."""
if m is None:
return lmap.Map.empty().assoc(*kvs)
if isinstance(m, IAssociative):
return m.assoc(*kvs)
raise TypeError(
f"Object of type {type(m)} does not implement Associative interface"
) | [
"def",
"assoc",
"(",
"m",
",",
"*",
"kvs",
")",
":",
"if",
"m",
"is",
"None",
":",
"return",
"lmap",
".",
"Map",
".",
"empty",
"(",
")",
".",
"assoc",
"(",
"*",
"kvs",
")",
"if",
"isinstance",
"(",
"m",
",",
"IAssociative",
")",
":",
"return",
... | 36.6 | 15.5 |
def do_capacity(self, line):
"capacity [tablename] {read_units} {write_units}"
table, line = self.get_table_params(line)
args = self.getargs(line)
read_units = int(args[0])
write_units = int(args[1])
desc = table.describe()
prov = desc['Table']['ProvisionedThroughput']
current_read, current_write = prov['ReadCapacityUnits'], prov['WriteCapacityUnits']
if read_units < current_read or write_units < current_write:
print "%s: updating capacity to %d read units, %d write units" % (table.table_name, read_units, write_units)
print ""
if not table.update(throughput={'read': read_units, 'write': write_units}):
print "update failed"
else:
self.do_refresh(table.table_name)
else:
print "%s: current capacity is %d read units, %d write units" % (table.table_name, current_read, current_write)
# we can only double the current value at each call
while current_read < read_units or current_write < write_units:
if (read_units - current_read) > current_read:
current_read *= 2
else:
current_read = read_units
if (write_units - current_write) > current_write:
current_write *= 2
else:
current_write = write_units
print "%s: updating capacity to %d read units, %d write units" % (table.table_name, current_read, current_write)
if not table.update({'read': current_read, 'write': current_write}):
print ""
print "update failed"
print ""
break
else:
print ""
self.do_refresh(table.table_name)
print "" | [
"def",
"do_capacity",
"(",
"self",
",",
"line",
")",
":",
"table",
",",
"line",
"=",
"self",
".",
"get_table_params",
"(",
"line",
")",
"args",
"=",
"self",
".",
"getargs",
"(",
"line",
")",
"read_units",
"=",
"int",
"(",
"args",
"[",
"0",
"]",
")"... | 42.75 | 24.522727 |
def _do_scatter_var(v, parallel):
"""Logic for scattering a variable.
"""
# For batches, scatter records only at the top level (double nested)
if parallel.startswith("batch") and workflow.is_cwl_record(v):
return (tz.get_in(["type", "type"], v) == "array" and
tz.get_in(["type", "type", "type"], v) == "array")
# Otherwise, scatter arrays
else:
return (tz.get_in(["type", "type"], v) == "array") | [
"def",
"_do_scatter_var",
"(",
"v",
",",
"parallel",
")",
":",
"# For batches, scatter records only at the top level (double nested)",
"if",
"parallel",
".",
"startswith",
"(",
"\"batch\"",
")",
"and",
"workflow",
".",
"is_cwl_record",
"(",
"v",
")",
":",
"return",
... | 44.2 | 17 |
def split(nodeShape, jobShape, wallTime):
"""
Partition a node allocation into two to fit the job, returning the
modified shape of the node and a new node reservation for
the extra time that the job didn't fill.
"""
return (Shape(wallTime,
nodeShape.memory - jobShape.memory,
nodeShape.cores - jobShape.cores,
nodeShape.disk - jobShape.disk,
nodeShape.preemptable),
NodeReservation(Shape(nodeShape.wallTime - wallTime,
nodeShape.memory,
nodeShape.cores,
nodeShape.disk,
nodeShape.preemptable))) | [
"def",
"split",
"(",
"nodeShape",
",",
"jobShape",
",",
"wallTime",
")",
":",
"return",
"(",
"Shape",
"(",
"wallTime",
",",
"nodeShape",
".",
"memory",
"-",
"jobShape",
".",
"memory",
",",
"nodeShape",
".",
"cores",
"-",
"jobShape",
".",
"cores",
",",
... | 45.1875 | 10.9375 |
def get_relative_from_paths(self, filepath, paths):
"""
Find the relative filepath from the most relevant multiple paths.
This is somewhat like a ``os.path.relpath(path[, start])`` but where
``start`` is a list. The most relevant item from ``paths`` will be used
to apply the relative transform.
Args:
filepath (str): Path to transform to relative.
paths (list): List of absolute paths to use to find and remove the
start path from ``filepath`` argument. If there is multiple
path starting with the same directories, the biggest will
match.
Raises:
boussole.exception.FinderException: If no ``filepath`` start could
be finded.
Returns:
str: Relative filepath where the start coming from ``paths`` is
removed.
"""
for systempath in paths_by_depth(paths):
if filepath.startswith(systempath):
return os.path.relpath(filepath, systempath)
raise FinderException("'Finder.get_relative_from_paths()' could not "
"find filepath start from '{}'".format(filepath)) | [
"def",
"get_relative_from_paths",
"(",
"self",
",",
"filepath",
",",
"paths",
")",
":",
"for",
"systempath",
"in",
"paths_by_depth",
"(",
"paths",
")",
":",
"if",
"filepath",
".",
"startswith",
"(",
"systempath",
")",
":",
"return",
"os",
".",
"path",
".",... | 41.413793 | 26.034483 |
def request(self, session_id):
"""Force the termination of a NETCONF session (not the current one!)
*session_id* is the session identifier of the NETCONF session to be terminated as a string
"""
node = new_ele("kill-session")
sub_ele(node, "session-id").text = session_id
return self._request(node) | [
"def",
"request",
"(",
"self",
",",
"session_id",
")",
":",
"node",
"=",
"new_ele",
"(",
"\"kill-session\"",
")",
"sub_ele",
"(",
"node",
",",
"\"session-id\"",
")",
".",
"text",
"=",
"session_id",
"return",
"self",
".",
"_request",
"(",
"node",
")"
] | 42.5 | 16.125 |
def add_to_deleted_models(sender, instance=None, *args, **kwargs):
"""
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
"""
if issubclass(sender, SyncableModel):
instance._update_deleted_models() | [
"def",
"add_to_deleted_models",
"(",
"sender",
",",
"instance",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"issubclass",
"(",
"sender",
",",
"SyncableModel",
")",
":",
"instance",
".",
"_update_deleted_models",
"(",
")"
] | 46.714286 | 16.714286 |
def file_attribs(location, mode=None, owner=None, group=None, sudo=False):
"""Updates the mode/owner/group for the remote file at the given
location."""
return dir_attribs(location, mode, owner, group, False, sudo) | [
"def",
"file_attribs",
"(",
"location",
",",
"mode",
"=",
"None",
",",
"owner",
"=",
"None",
",",
"group",
"=",
"None",
",",
"sudo",
"=",
"False",
")",
":",
"return",
"dir_attribs",
"(",
"location",
",",
"mode",
",",
"owner",
",",
"group",
",",
"Fals... | 55.75 | 14.75 |
def _get_children_as_string(node):
"""Iterate through all the children of a node.
Returns one string containing the values from all the text-nodes
recursively.
"""
out = []
if node:
for child in node:
if child.nodeType == child.TEXT_NODE:
out.append(child.data)
else:
out.append(_get_children_as_string(child.childNodes))
return ''.join(out) | [
"def",
"_get_children_as_string",
"(",
"node",
")",
":",
"out",
"=",
"[",
"]",
"if",
"node",
":",
"for",
"child",
"in",
"node",
":",
"if",
"child",
".",
"nodeType",
"==",
"child",
".",
"TEXT_NODE",
":",
"out",
".",
"append",
"(",
"child",
".",
"data"... | 30.071429 | 17.714286 |
def verify_ethinca_metric_options(opts, parser):
"""
Checks that the necessary options are given for the ethinca metric
calculation.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
"""
if opts.filter_cutoff is not None and not (opts.filter_cutoff in
pnutils.named_frequency_cutoffs.keys()):
parser.error("Need a valid cutoff formula to calculate ethinca or "
"assign filter f_final values! Possible values are "
+str(pnutils.named_frequency_cutoffs.keys()))
if (opts.calculate_ethinca_metric or opts.calculate_time_metric_components)\
and not opts.ethinca_frequency_step:
parser.error("Need to specify a cutoff frequency step to calculate "
"ethinca!")
if not (opts.calculate_ethinca_metric or\
opts.calculate_time_metric_components) and opts.ethinca_pn_order:
parser.error("Can't specify an ethinca PN order if not "
"calculating ethinca metric!") | [
"def",
"verify_ethinca_metric_options",
"(",
"opts",
",",
"parser",
")",
":",
"if",
"opts",
".",
"filter_cutoff",
"is",
"not",
"None",
"and",
"not",
"(",
"opts",
".",
"filter_cutoff",
"in",
"pnutils",
".",
"named_frequency_cutoffs",
".",
"keys",
"(",
")",
")... | 46.68 | 21.8 |
def log(self, sequence, infoarray) -> None:
"""Log the given |IOSequence| object either for reading or writing
data.
The optional `array` argument allows for passing alternative data
in an |InfoArray| object replacing the series of the |IOSequence|
object, which is useful for writing modified (e.g. spatially
averaged) time series.
Logged time series data is available via attribute access:
>>> from hydpy.core.netcdftools import NetCDFVariableBase
>>> from hydpy import make_abc_testable
>>> NCVar = make_abc_testable(NetCDFVariableBase)
>>> ncvar = NCVar('flux_nkor', isolate=True, timeaxis=1)
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> nkor = elements.element1.model.sequences.fluxes.nkor
>>> ncvar.log(nkor, nkor.series)
>>> 'element1' in dir(ncvar)
True
>>> ncvar.element1.sequence is nkor
True
>>> 'element2' in dir(ncvar)
False
>>> ncvar.element2
Traceback (most recent call last):
...
AttributeError: The NetCDFVariable object `flux_nkor` does \
neither handle time series data under the (sub)device name `element2` \
nor does it define a member named `element2`.
"""
descr_device = sequence.descr_device
self.sequences[descr_device] = sequence
self.arrays[descr_device] = infoarray | [
"def",
"log",
"(",
"self",
",",
"sequence",
",",
"infoarray",
")",
"->",
"None",
":",
"descr_device",
"=",
"sequence",
".",
"descr_device",
"self",
".",
"sequences",
"[",
"descr_device",
"]",
"=",
"sequence",
"self",
".",
"arrays",
"[",
"descr_device",
"]"... | 41.571429 | 18.057143 |
def search_function(root1, q, s, f, l, o='g'):
"""
function to get links
"""
global links
links = search(q, o, s, f, l)
root1.destroy()
root1.quit() | [
"def",
"search_function",
"(",
"root1",
",",
"q",
",",
"s",
",",
"f",
",",
"l",
",",
"o",
"=",
"'g'",
")",
":",
"global",
"links",
"links",
"=",
"search",
"(",
"q",
",",
"o",
",",
"s",
",",
"f",
",",
"l",
")",
"root1",
".",
"destroy",
"(",
... | 18.5 | 14 |
def options_absent(name, sections=None, separator='='):
'''
.. code-block:: yaml
/home/saltminion/api-paste.ini:
ini.options_absent:
- separator: '='
- sections:
test:
- testkey
- secondoption
test1:
- testkey1
options present in file and not specified in sections
dict will be untouched
changes dict will contain the list of changes made
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'No anomaly detected'
}
if __opts__['test']:
ret['result'] = True
ret['comment'] = ''
for section in sections or {}:
section_name = ' in section ' + section if section else ''
try:
cur_section = __salt__['ini.get_section'](name, section, separator)
except IOError as err:
ret['comment'] = "{0}".format(err)
ret['result'] = False
return ret
except AttributeError:
cur_section = section
if isinstance(sections[section], (dict, OrderedDict)):
for key in sections[section]:
cur_value = cur_section.get(key)
if not cur_value:
ret['comment'] += 'Key {0}{1} does not exist.\n'.format(key, section_name)
continue
ret['comment'] += 'Deleted key {0}{1}.\n'.format(key, section_name)
ret['result'] = None
else:
option = section
if not __salt__['ini.get_option'](name, None, option, separator):
ret['comment'] += 'Key {0} does not exist.\n'.format(option)
continue
ret['comment'] += 'Deleted key {0}.\n'.format(option)
ret['result'] = None
if ret['comment'] == '':
ret['comment'] = 'No changes detected.'
return ret
sections = sections or {}
for section, keys in six.iteritems(sections):
for key in keys:
try:
current_value = __salt__['ini.remove_option'](name, section, key, separator)
except IOError as err:
ret['comment'] = "{0}".format(err)
ret['result'] = False
return ret
if not current_value:
continue
if section not in ret['changes']:
ret['changes'].update({section: {}})
ret['changes'][section].update({key: current_value})
if not isinstance(sections[section], (dict, OrderedDict)):
ret['changes'].update({section: current_value})
# break
ret['comment'] = 'Changes take effect'
return ret | [
"def",
"options_absent",
"(",
"name",
",",
"sections",
"=",
"None",
",",
"separator",
"=",
"'='",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'No anomaly detec... | 37.453333 | 17.906667 |
def vmomentdensity(self,*args,**kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit numerical integral; default: 4)
vTmax - upper limit for integration over vT (default: 1.5)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z (no support for units)
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
"""
use_physical= kwargs.pop('use_physical',True)
ro= kwargs.pop('ro',None)
if ro is None and hasattr(self,'_roSet') and self._roSet:
ro= self._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
if use_physical and not vo is None and not ro is None:
fac= vo**(args[2]+args[3]+args[4])/ro**3
if _APY_UNITS:
u= 1/units.kpc**3*(units.km/units.s)**(args[2]+args[3]+args[4])
out= self._vmomentdensity(*args,**kwargs)
if _APY_UNITS:
return units.Quantity(out*fac,unit=u)
else:
return out*fac
else:
return self._vmomentdensity(*args,**kwargs) | [
"def",
"vmomentdensity",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"use_physical",
"=",
"kwargs",
".",
"pop",
"(",
"'use_physical'",
",",
"True",
")",
"ro",
"=",
"kwargs",
".",
"pop",
"(",
"'ro'",
",",
"None",
")",
"if",
"ro... | 29.985714 | 26.3 |
def get_type(type_name):
"""Get a type given its importable name.
Parameters
----------
task_name : `str`
Name of the Python type, such as ``mypackage.MyClass``.
Returns
-------
object
The object.
"""
parts = type_name.split('.')
if len(parts) < 2:
raise SphinxError(
'Type must be fully-qualified, '
'of the form ``module.MyClass``. Got: {}'.format(type_name)
)
module_name = ".".join(parts[0:-1])
name = parts[-1]
return getattr(import_module(module_name), name) | [
"def",
"get_type",
"(",
"type_name",
")",
":",
"parts",
"=",
"type_name",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"2",
":",
"raise",
"SphinxError",
"(",
"'Type must be fully-qualified, '",
"'of the form ``module.MyClass``. Got: {}'",
... | 25.136364 | 19.909091 |
def check_precomputed_distance_matrix(X):
"""Perform check_array(X) after removing infinite values (numpy.inf) from the given distance matrix.
"""
tmp = X.copy()
tmp[np.isinf(tmp)] = 1
check_array(tmp) | [
"def",
"check_precomputed_distance_matrix",
"(",
"X",
")",
":",
"tmp",
"=",
"X",
".",
"copy",
"(",
")",
"tmp",
"[",
"np",
".",
"isinf",
"(",
"tmp",
")",
"]",
"=",
"1",
"check_array",
"(",
"tmp",
")"
] | 36 | 9.5 |
def add_child(self, child, logical_block_size, allow_duplicate=False):
# type: (DirectoryRecord, int, bool) -> bool
'''
A method to add a new child to this directory record.
Parameters:
child - The child directory record object to add.
logical_block_size - The size of a logical block for this volume descriptor.
allow_duplicate - Whether to allow duplicate names, as there are
situations where duplicate children are allowed.
Returns:
True if adding this child caused the directory to overflow into another
extent, False otherwise.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Directory Record not yet initialized')
return self._add_child(child, logical_block_size, allow_duplicate, True) | [
"def",
"add_child",
"(",
"self",
",",
"child",
",",
"logical_block_size",
",",
"allow_duplicate",
"=",
"False",
")",
":",
"# type: (DirectoryRecord, int, bool) -> bool",
"if",
"not",
"self",
".",
"_initialized",
":",
"raise",
"pycdlibexception",
".",
"PyCdlibInternalE... | 47.333333 | 29.222222 |
def find_instance_and_eni_by_ip(vpc_info, ip):
"""
Given a specific IP address, find the EC2 instance and ENI.
We need this information for setting the route.
Returns instance and emi in a tuple.
"""
for instance in vpc_info['instances']:
for eni in instance.interfaces:
for pa in eni.private_ip_addresses:
if pa.private_ip_address == ip:
return instance, eni
raise VpcRouteSetError("Could not find instance/eni for '%s' "
"in VPC '%s'." % (ip, vpc_info['vpc'].id)) | [
"def",
"find_instance_and_eni_by_ip",
"(",
"vpc_info",
",",
"ip",
")",
":",
"for",
"instance",
"in",
"vpc_info",
"[",
"'instances'",
"]",
":",
"for",
"eni",
"in",
"instance",
".",
"interfaces",
":",
"for",
"pa",
"in",
"eni",
".",
"private_ip_addresses",
":",... | 35.25 | 14.5 |
def building(shape=None, gray=False):
"""Photo of the Centre for Mathematical Sciences in Cambridge.
Returns
-------
An image with the following properties:
image type: color (or gray scales if `gray=True`)
size: [442, 331] (if not specified by `size`)
scale: [0, 1]
type: float64
"""
# TODO: Store data in some ODL controlled url
name = 'cms.mat'
url = URL_CAM + name
dct = get_data(name, subset=DATA_SUBSET, url=url)
im = np.rot90(dct['im'], k=3)
return convert(im, shape, gray=gray) | [
"def",
"building",
"(",
"shape",
"=",
"None",
",",
"gray",
"=",
"False",
")",
":",
"# TODO: Store data in some ODL controlled url",
"name",
"=",
"'cms.mat'",
"url",
"=",
"URL_CAM",
"+",
"name",
"dct",
"=",
"get_data",
"(",
"name",
",",
"subset",
"=",
"DATA_S... | 30.333333 | 15.388889 |
def get_next_step(self, step=None):
"""
Returns the next step after the given `step`. If no more steps are
available, None will be returned. If the `step` argument is None, the
current step will be determined automatically.
"""
if step is None:
step = self.steps.current
form_list = self.get_form_list()
keys = list(form_list.keys())
key = keys.index(step) + 1
for next_step in keys[key:]:
if self.is_step_visible(next_step):
return next_step
return None | [
"def",
"get_next_step",
"(",
"self",
",",
"step",
"=",
"None",
")",
":",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"self",
".",
"steps",
".",
"current",
"form_list",
"=",
"self",
".",
"get_form_list",
"(",
")",
"keys",
"=",
"list",
"(",
"form_lis... | 37.866667 | 10.533333 |
def bed(args):
"""
%prog bed frgscffile
Convert the frgscf posmap file to bed format.
"""
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
frgscffile, = args
bedfile = frgscffile.rsplit(".", 1)[0] + ".bed"
fw = open(bedfile, "w")
fp = open(frgscffile)
for row in fp:
f = FrgScfLine(row)
print(f.bedline, file=fw)
logging.debug("File written to `{0}`.".format(bedfile))
return bedfile | [
"def",
"bed",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"bed",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
... | 21.125 | 19.375 |
def get_pages(parser, token):
"""Add to context the list of page links.
Usage:
.. code-block:: html+django
{% get_pages %}
This is mostly used for Digg-style pagination.
This call inserts in the template context a *pages* variable, as a sequence
of page links. You can use *pages* in different ways:
- just print *pages.get_rendered* and you will get Digg-style pagination displayed:
.. code-block:: html+django
{{ pages.get_rendered }}
- display pages count:
.. code-block:: html+django
{{ pages|length }}
- check if the page list contains more than one page:
.. code-block:: html+django
{{ pages.paginated }}
{# the following is equivalent #}
{{ pages|length > 1 }}
- get a specific page:
.. code-block:: html+django
{# the current selected page #}
{{ pages.current }}
{# the first page #}
{{ pages.first }}
{# the last page #}
{{ pages.last }}
{# the previous page (or nothing if you are on first page) #}
{{ pages.previous }}
{# the next page (or nothing if you are in last page) #}
{{ pages.next }}
{# the third page #}
{{ pages.3 }}
{# this means page.1 is the same as page.first #}
{# the 1-based index of the first item on the current page #}
{{ pages.current_start_index }}
{# the 1-based index of the last item on the current page #}
{{ pages.current_end_index }}
{# the total number of objects, across all pages #}
{{ pages.total_count }}
{# the first page represented as an arrow #}
{{ pages.first_as_arrow }}
{# the last page represented as an arrow #}
{{ pages.last_as_arrow }}
- iterate over *pages* to get all pages:
.. code-block:: html+django
{% for page in pages %}
{# display page link #}
{{ page.render_link}}
{# the page url (beginning with "?") #}
{{ page.url }}
{# the page path #}
{{ page.path }}
{# the page number #}
{{ page.number }}
{# a string representing the page (commonly the page number) #}
{{ page.label }}
{# check if the page is the current one #}
{{ page.is_current }}
{# check if the page is the first one #}
{{ page.is_first }}
{# check if the page is the last one #}
{{ page.is_last }}
{% endfor %}
You can change the variable name, e.g.:
.. code-block:: html+django
{% get_pages as page_links %}
Must be called after ``{% paginate objects %}``.
"""
# Validate args.
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
var_name = 'pages'
else:
args = args.split()
if len(args) == 2 and args[0] == 'as':
var_name = args[1]
else:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
# Call the node.
return GetPagesNode(var_name) | [
"def",
"get_pages",
"(",
"parser",
",",
"token",
")",
":",
"# Validate args.",
"try",
":",
"tag_name",
",",
"args",
"=",
"token",
".",
"contents",
".",
"split",
"(",
"None",
",",
"1",
")",
"except",
"ValueError",
":",
"var_name",
"=",
"'pages'",
"else",
... | 24.95122 | 22.170732 |
def is_complex(arg):
'''
is_complex(x) yields True if x is a complex numeric object and False otherwise. Note that this
includes anything representable as as a complex number such as an integer or a boolean value.
In effect, this makes this function an alias for is_number(arg).
'''
return (is_complex(mag(arg)) if is_quantity(arg) else
True if isinstance(arg, numbers.Complex) else
is_npscalar(arg, 'complex') or is_npvalue(arg, 'complex')) | [
"def",
"is_complex",
"(",
"arg",
")",
":",
"return",
"(",
"is_complex",
"(",
"mag",
"(",
"arg",
")",
")",
"if",
"is_quantity",
"(",
"arg",
")",
"else",
"True",
"if",
"isinstance",
"(",
"arg",
",",
"numbers",
".",
"Complex",
")",
"else",
"is_npscalar",
... | 57.444444 | 36.555556 |
def get_all_child_edges(self):
"""Return tuples for all child GO IDs, containing current GO ID and child GO ID."""
all_child_edges = set()
for parent in self.children:
all_child_edges.add((parent.item_id, self.item_id))
all_child_edges |= parent.get_all_child_edges()
return all_child_edges | [
"def",
"get_all_child_edges",
"(",
"self",
")",
":",
"all_child_edges",
"=",
"set",
"(",
")",
"for",
"parent",
"in",
"self",
".",
"children",
":",
"all_child_edges",
".",
"add",
"(",
"(",
"parent",
".",
"item_id",
",",
"self",
".",
"item_id",
")",
")",
... | 48.571429 | 10.714286 |
def select_candidates(config):
"""Select candidates to download.
Parameters
----------
config: NgdConfig
Runtime configuration object
Returns
-------
list of (<candidate entry>, <taxonomic group>)
"""
download_candidates = []
for group in config.group:
summary_file = get_summary(config.section, group, config.uri, config.use_cache)
entries = parse_summary(summary_file)
for entry in filter_entries(entries, config):
download_candidates.append((entry, group))
return download_candidates | [
"def",
"select_candidates",
"(",
"config",
")",
":",
"download_candidates",
"=",
"[",
"]",
"for",
"group",
"in",
"config",
".",
"group",
":",
"summary_file",
"=",
"get_summary",
"(",
"config",
".",
"section",
",",
"group",
",",
"config",
".",
"uri",
",",
... | 24.26087 | 21.913043 |
def parse_filespec(fspec, sep=':', gpat='*'):
"""
Parse given filespec `fspec` and return [(filetype, filepath)].
Because anyconfig.load should find correct file's type to load by the file
extension, this function will not try guessing file's type if not file type
is specified explicitly.
:param fspec: filespec
:param sep: a char separating filetype and filepath in filespec
:param gpat: a char for glob pattern
>>> parse_filespec("base.json")
[('base.json', None)]
>>> parse_filespec("json:base.json")
[('base.json', 'json')]
>>> parse_filespec("yaml:foo.yaml")
[('foo.yaml', 'yaml')]
>>> parse_filespec("yaml:foo.dat")
[('foo.dat', 'yaml')]
TODO: Allow '*' (glob pattern) in filepath when escaped with '\\', etc.
# >>> parse_filespec("yaml:bar/*.conf")
# [('bar/a.conf', 'yaml'), ('bar/b.conf', 'yaml')]
"""
if sep in fspec:
tpl = (ftype, fpath) = tuple(fspec.split(sep))
else:
tpl = (ftype, fpath) = (None, fspec)
return [(fs, ftype) for fs in sorted(glob.glob(fpath))] \
if gpat in fspec else [flip(tpl)] | [
"def",
"parse_filespec",
"(",
"fspec",
",",
"sep",
"=",
"':'",
",",
"gpat",
"=",
"'*'",
")",
":",
"if",
"sep",
"in",
"fspec",
":",
"tpl",
"=",
"(",
"ftype",
",",
"fpath",
")",
"=",
"tuple",
"(",
"fspec",
".",
"split",
"(",
"sep",
")",
")",
"els... | 34.375 | 17.8125 |
def run(self):
"""
Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output.
"""
with Popen(self.command, stdout=PIPE, shell=True) as process:
# We initiate a process and parse the command to it.
while True:
# We loop infinitly because we want to get the output
# until there is none.
# We get the current line from the process stdout.
#
# Note: we use rstrip() because we are paranoid :-)
current_line = process.stdout.readline().rstrip()
if not current_line:
# The current line is empty or equal to None.
# We break the loop.
break
# The line is not empty nor equal to None.
# We encode and yield the current line
yield self._decode_output(current_line) | [
"def",
"run",
"(",
"self",
")",
":",
"with",
"Popen",
"(",
"self",
".",
"command",
",",
"stdout",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"as",
"process",
":",
"# We initiate a process and parse the command to it.",
"while",
"True",
":",
"# We loop infini... | 34.78125 | 23.46875 |
def get_page_by_id_text(self, project, wiki_identifier, id, recursion_level=None, include_content=None, **kwargs):
"""GetPageByIdText.
[Preview API] Gets metadata or content of the wiki page for the provided page id. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki Id or name.
:param int id: Wiki page id.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
if id is not None:
route_values['id'] = self._serialize.url('id', id, 'int')
query_parameters = {}
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='ceddcf75-1068-452d-8b13-2d4d76e1f970',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | [
"def",
"get_page_by_id_text",
"(",
"self",
",",
"project",
",",
"wiki_identifier",
",",
"id",
",",
"recursion_level",
"=",
"None",
",",
"include_content",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",... | 60.515152 | 29.181818 |
def start_drag(self, sprite, cursor_x = None, cursor_y = None):
"""start dragging given sprite"""
cursor_x, cursor_y = cursor_x or sprite.x, cursor_y or sprite.y
self._mouse_down_sprite = self._drag_sprite = sprite
sprite.drag_x, sprite.drag_y = self._drag_sprite.x, self._drag_sprite.y
self.__drag_start_x, self.__drag_start_y = cursor_x, cursor_y
self.__drag_started = True | [
"def",
"start_drag",
"(",
"self",
",",
"sprite",
",",
"cursor_x",
"=",
"None",
",",
"cursor_y",
"=",
"None",
")",
":",
"cursor_x",
",",
"cursor_y",
"=",
"cursor_x",
"or",
"sprite",
".",
"x",
",",
"cursor_y",
"or",
"sprite",
".",
"y",
"self",
".",
"_m... | 52.125 | 23.5 |
def _ensure_update_ha_compliant(self, router, current_router,
r_hd_binding_db):
"""To be called in update_router() BEFORE router has been
updated in DB.
"""
if r_hd_binding_db.role == ROUTER_ROLE_HA_REDUNDANCY:
return {ha.ENABLED: False}
auto_enable_ha = r_hd_binding_db.router_type.ha_enabled_by_default
requested_ha_details = router.pop(ha.DETAILS, {})
# If ha_details are given then ha is assumed to be enabled even if
# it is not explicitly specified or if auto_enable_ha says so.
# Note that None is used to indicate that request did not include any
# ha information was provided!
requested_ha_enabled = router.pop(
ha.ENABLED, True if requested_ha_details or auto_enable_ha is True
else None)
res = {}
ha_currently_enabled = current_router.get(ha.ENABLED, False)
# Note: must check for 'is True' as None implies attribute not given
if requested_ha_enabled is True or ha_currently_enabled is True:
if not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
curr_ha_details = current_router.get(ha.DETAILS, {})
if ha.TYPE in requested_ha_details:
requested_ha_type = requested_ha_details[ha.TYPE]
if (ha.TYPE in curr_ha_details and
requested_ha_type != curr_ha_details[ha.TYPE]):
raise ha.HATypeCannotBeChanged()
elif requested_ha_type in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=requested_ha_type)
if requested_ha_enabled:
res[ha.ENABLED] = requested_ha_enabled
if requested_ha_details:
res[ha.DETAILS] = requested_ha_details
elif requested_ha_enabled is False:
res[ha.ENABLED] = False
return res | [
"def",
"_ensure_update_ha_compliant",
"(",
"self",
",",
"router",
",",
"current_router",
",",
"r_hd_binding_db",
")",
":",
"if",
"r_hd_binding_db",
".",
"role",
"==",
"ROUTER_ROLE_HA_REDUNDANCY",
":",
"return",
"{",
"ha",
".",
"ENABLED",
":",
"False",
"}",
"auto... | 52.108108 | 17.945946 |
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild | [
"def",
"_lastRecursiveChild",
"(",
"self",
")",
":",
"lastChild",
"=",
"self",
"while",
"hasattr",
"(",
"lastChild",
",",
"'contents'",
")",
"and",
"lastChild",
".",
"contents",
":",
"lastChild",
"=",
"lastChild",
".",
"contents",
"[",
"-",
"1",
"]",
"retu... | 43 | 17 |
def _decode(self):
"""
Convert the characters of character in value of component to standard
value (WFN value).
This function scans the value of component and returns a copy
with all percent-encoded characters decoded.
:exception: ValueError - invalid character in value of component
"""
result = []
idx = 0
s = self._encoded_value
embedded = False
errmsg = []
errmsg.append("Invalid value: ")
while (idx < len(s)):
errmsg.append(s)
errmsg_str = "".join(errmsg)
# Get the idx'th character of s
c = s[idx]
# Deal with dot, hyphen and tilde: decode with quoting
if ((c == '.') or (c == '-') or (c == '~')):
result.append("\\")
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
if (c != '%'):
result.append(c)
idx += 1
embedded = True # a non-%01 encountered
continue
# we get here if we have a substring starting w/ '%'
form = s[idx: idx + 3] # get the three-char sequence
if form == CPEComponent2_3_URI.WILDCARD_ONE:
# If %01 legal at beginning or end
# embedded is false, so must be preceded by %01
# embedded is true, so must be followed by %01
if (((idx == 0) or (idx == (len(s)-3))) or
((not embedded) and (s[idx - 3:idx] == CPEComponent2_3_URI.WILDCARD_ONE)) or
(embedded and (len(s) >= idx + 6) and (s[idx + 3:idx + 6] == CPEComponent2_3_URI.WILDCARD_ONE))):
# A percent-encoded question mark is found
# at the beginning or the end of the string,
# or embedded in sequence as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_ONE)
idx += 3
continue
else:
raise ValueError(errmsg_str)
elif form == CPEComponent2_3_URI.WILDCARD_MULTI:
if ((idx == 0) or (idx == (len(s) - 3))):
# Percent-encoded asterisk is at the beginning
# or the end of the string, as required.
# Decode to unquoted form.
result.append(CPEComponent2_3_WFN.WILDCARD_MULTI)
else:
raise ValueError(errmsg_str)
elif form in CPEComponent2_3_URI.pce_char_to_decode.keys():
value = CPEComponent2_3_URI.pce_char_to_decode[form]
result.append(value)
else:
errmsg.append("Invalid percent-encoded character: ")
errmsg.append(s)
raise ValueError("".join(errmsg))
idx += 3
embedded = True # a non-%01 encountered.
self._standard_value = "".join(result) | [
"def",
"_decode",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"idx",
"=",
"0",
"s",
"=",
"self",
".",
"_encoded_value",
"embedded",
"=",
"False",
"errmsg",
"=",
"[",
"]",
"errmsg",
".",
"append",
"(",
"\"Invalid value: \"",
")",
"while",
"(",
"id... | 37.085366 | 21.817073 |
def run(self, options, args):
"""Prints the completion code of the given shell"""
shells = COMPLETION_SCRIPTS.keys()
shell_options = ['--' + shell for shell in sorted(shells)]
if options.shell in shells:
script = COMPLETION_SCRIPTS.get(options.shell, '')
print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
else:
sys.stderr.write(
'ERROR: You must pass %s\n' % ' or '.join(shell_options)
) | [
"def",
"run",
"(",
"self",
",",
"options",
",",
"args",
")",
":",
"shells",
"=",
"COMPLETION_SCRIPTS",
".",
"keys",
"(",
")",
"shell_options",
"=",
"[",
"'--'",
"+",
"shell",
"for",
"shell",
"in",
"sorted",
"(",
"shells",
")",
"]",
"if",
"options",
"... | 45.363636 | 18.363636 |
def folderitems(self):
"""TODO: Refactor to non-classic mode
"""
items = super(AnalysisSpecificationView, self).folderitems()
self.categories.sort()
return items | [
"def",
"folderitems",
"(",
"self",
")",
":",
"items",
"=",
"super",
"(",
"AnalysisSpecificationView",
",",
"self",
")",
".",
"folderitems",
"(",
")",
"self",
".",
"categories",
".",
"sort",
"(",
")",
"return",
"items"
] | 32.666667 | 12.666667 |
def _make_2d_array(self, data):
"""
Convert a 1D array of mesh values to a masked 2D mesh array
given the 1D mesh indices ``mesh_idx``.
Parameters
----------
data : 1D `~numpy.ndarray`
A 1D array of mesh values.
Returns
-------
result : 2D `~numpy.ma.MaskedArray`
A 2D masked array. Pixels not defined in ``mesh_idx`` are
masked.
"""
if data.shape != self.mesh_idx.shape:
raise ValueError('data and mesh_idx must have the same shape')
if np.ma.is_masked(data):
raise ValueError('data must not be a masked array')
data2d = np.zeros(self._mesh_shape).astype(data.dtype)
data2d[self.mesh_yidx, self.mesh_xidx] = data
if len(self.mesh_idx) == self.nboxes:
# no meshes were masked
return data2d
else:
# some meshes were masked
mask2d = np.ones(data2d.shape).astype(np.bool)
mask2d[self.mesh_yidx, self.mesh_xidx] = False
return np.ma.masked_array(data2d, mask=mask2d) | [
"def",
"_make_2d_array",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
".",
"shape",
"!=",
"self",
".",
"mesh_idx",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'data and mesh_idx must have the same shape'",
")",
"if",
"np",
".",
"ma",
".",
"is_masked... | 31.314286 | 19.771429 |
def _join_paragraphs(cls, lines, use_indent=False, leading_blanks=False, trailing_blanks=False):
"""Join adjacent lines together into paragraphs using either a blank line or indent as separator."""
curr_para = []
paragraphs = []
for line in lines:
if use_indent:
if line.startswith(' '):
curr_para.append(line.lstrip())
continue
elif line == '':
continue
else:
if len(curr_para) > 0:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
curr_para = [line.lstrip()]
else:
if len(line) != 0:
curr_para.append(line)
else:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
curr_para = []
# Finish the last paragraph if there is one
if len(curr_para) > 0:
paragraphs.append(cls._join_paragraph(curr_para, leading_blanks, trailing_blanks))
return paragraphs | [
"def",
"_join_paragraphs",
"(",
"cls",
",",
"lines",
",",
"use_indent",
"=",
"False",
",",
"leading_blanks",
"=",
"False",
",",
"trailing_blanks",
"=",
"False",
")",
":",
"curr_para",
"=",
"[",
"]",
"paragraphs",
"=",
"[",
"]",
"for",
"line",
"in",
"line... | 38.2 | 22.133333 |
def pre_encrypt_assertion(response):
"""
Move the assertion to within a encrypted_assertion
:param response: The response with one assertion
:return: The response but now with the assertion within an
encrypted_assertion.
"""
assertion = response.assertion
response.assertion = None
response.encrypted_assertion = EncryptedAssertion()
if assertion is not None:
if isinstance(assertion, list):
response.encrypted_assertion.add_extension_elements(assertion)
else:
response.encrypted_assertion.add_extension_element(assertion)
return response | [
"def",
"pre_encrypt_assertion",
"(",
"response",
")",
":",
"assertion",
"=",
"response",
".",
"assertion",
"response",
".",
"assertion",
"=",
"None",
"response",
".",
"encrypted_assertion",
"=",
"EncryptedAssertion",
"(",
")",
"if",
"assertion",
"is",
"not",
"No... | 38.1875 | 13.9375 |
def resolve_variables(self, task):
"""
Resolve task variables based on input variables and the default
values.
Raises
------
LookupError
If a variable is missing.
"""
variables = {**task.variables, **self.project.variables}
values = {}
for variable in variables.values():
value = self.variables.get(variable.name) or variable.default
if value is None:
raise LookupError(variable)
values[variable.name] = value
return values | [
"def",
"resolve_variables",
"(",
"self",
",",
"task",
")",
":",
"variables",
"=",
"{",
"*",
"*",
"task",
".",
"variables",
",",
"*",
"*",
"self",
".",
"project",
".",
"variables",
"}",
"values",
"=",
"{",
"}",
"for",
"variable",
"in",
"variables",
".... | 25.409091 | 20.590909 |
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace):
"""
Word-level n-grams in a string
By default, whitespace is assumed to be a word boundary.
>>> ng.word_ngrams('This is not a test!')
[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]
If the sequence's length is less than or equal to n, the n-grams are
simply the sequence itself.
>>> ng.word_ngrams('Test!')
[('Test!')]
Args:
s: a string
Returns:
list: tuples of word-level n-grams
"""
tokens = token_fn(s)
return __ngrams(tokens, n=min(len(tokens), n)) | [
"def",
"word_ngrams",
"(",
"s",
",",
"n",
"=",
"3",
",",
"token_fn",
"=",
"tokens",
".",
"on_whitespace",
")",
":",
"tokens",
"=",
"token_fn",
"(",
"s",
")",
"return",
"__ngrams",
"(",
"tokens",
",",
"n",
"=",
"min",
"(",
"len",
"(",
"tokens",
")",... | 27.434783 | 21.347826 |
def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
"""
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response | [
"def",
"get_similar_users",
"(",
"self",
",",
"users",
"=",
"None",
",",
"k",
"=",
"10",
")",
":",
"if",
"users",
"is",
"None",
":",
"get_all_users",
"=",
"True",
"users",
"=",
"_SArray",
"(",
")",
"else",
":",
"get_all_users",
"=",
"False",
"if",
"i... | 38.359375 | 24.15625 |
def get_object_by_name(self, name):
"""Find an object
:param name: Name of object. Case-sensitive.
:rtype: Object if found, otherwise ValueError
"""
for obj in self.objects:
if obj.name == name:
return obj
raise ValueError | [
"def",
"get_object_by_name",
"(",
"self",
",",
"name",
")",
":",
"for",
"obj",
"in",
"self",
".",
"objects",
":",
"if",
"obj",
".",
"name",
"==",
"name",
":",
"return",
"obj",
"raise",
"ValueError"
] | 29.1 | 11.7 |
def logItems(self, level=logging.DEBUG):
""" rootItem
"""
rootItem = self.rootItem()
if rootItem is None:
logger.debug("No items in: {}".format(self))
else:
rootItem.logBranch(level=level) | [
"def",
"logItems",
"(",
"self",
",",
"level",
"=",
"logging",
".",
"DEBUG",
")",
":",
"rootItem",
"=",
"self",
".",
"rootItem",
"(",
")",
"if",
"rootItem",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"No items in: {}\"",
".",
"format",
"(",
"self... | 30.625 | 8 |
def backbone_bond_angles(self):
"""Dictionary containing backbone bond angles as lists of floats.
Returns
-------
bond_angles : dict
Keys are `n_ca_c`, `ca_c_o`, `ca_c_n` and `c_n_ca`, referring
to the N-CA-C, CA-C=O, CA-C-N and C-N-CA angles respectively.
Values are lists of floats : the bond angles in degrees.
The lists of n_ca_c, ca_c_o are of length k for a `Polypeptide`
containing k `Residues`. The list of ca_c_n and c_n_ca are of
length k-1 for a `Polypeptide` containing k `Residues` (These
angles are across the peptide bond, and are therefore formed
between successive `Residue` pairs).
"""
bond_angles = dict(
n_ca_c=[angle_between_vectors(r['N'] - r['CA'], r['C'] - r['CA'])
for r in self.get_monomers(ligands=False)],
ca_c_o=[angle_between_vectors(r['CA'] - r['C'], r['O'] - r['C'])
for r in self.get_monomers(ligands=False)],
ca_c_n=[angle_between_vectors(r1['CA'] - r1['C'], r2['N'] - r1['C'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
c_n_ca=[angle_between_vectors(r1['C'] - r2['N'], r2['CA'] - r2['N'])
for r1, r2 in [(self[i], self[i + 1]) for i in range(len(self) - 1)]],
)
return bond_angles | [
"def",
"backbone_bond_angles",
"(",
"self",
")",
":",
"bond_angles",
"=",
"dict",
"(",
"n_ca_c",
"=",
"[",
"angle_between_vectors",
"(",
"r",
"[",
"'N'",
"]",
"-",
"r",
"[",
"'CA'",
"]",
",",
"r",
"[",
"'C'",
"]",
"-",
"r",
"[",
"'CA'",
"]",
")",
... | 54.115385 | 27.115385 |
def get_self_ip():
"""获取本机公网ip地址"""
TEST_IP = 'http://httpbin.org/ip'
html = requests.get(TEST_IP)
ip = json.loads(html.text)['origin']
return ip | [
"def",
"get_self_ip",
"(",
")",
":",
"TEST_IP",
"=",
"'http://httpbin.org/ip'",
"html",
"=",
"requests",
".",
"get",
"(",
"TEST_IP",
")",
"ip",
"=",
"json",
".",
"loads",
"(",
"html",
".",
"text",
")",
"[",
"'origin'",
"]",
"return",
"ip"
] | 26.666667 | 10 |
def char_beam_search(out):
"""
Description : apply beam search for prediction result
"""
out_conv = list()
for idx in range(out.shape[0]):
probs = out[idx]
prob = probs.softmax().asnumpy()
line_string_proposals = ctcBeamSearch(prob, ALPHABET, None, k=4, beamWidth=25)
out_conv.append(line_string_proposals[0])
return out_conv | [
"def",
"char_beam_search",
"(",
"out",
")",
":",
"out_conv",
"=",
"list",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"out",
".",
"shape",
"[",
"0",
"]",
")",
":",
"probs",
"=",
"out",
"[",
"idx",
"]",
"prob",
"=",
"probs",
".",
"softmax",
"(",
... | 33.727273 | 13.363636 |
def get_leapdays(init_date, final_date):
"""
Find the number of leap days between arbitrary dates. Returns a
timedelta object.
FIXME: calculate this instead of iterating.
"""
curr_date = init_date
leap_days = 0
while curr_date != final_date:
if curr_date.month == 2 and curr_date.day == 29:
leap_days += 1
curr_date += datetime.timedelta(days=1)
return datetime.timedelta(days=leap_days) | [
"def",
"get_leapdays",
"(",
"init_date",
",",
"final_date",
")",
":",
"curr_date",
"=",
"init_date",
"leap_days",
"=",
"0",
"while",
"curr_date",
"!=",
"final_date",
":",
"if",
"curr_date",
".",
"month",
"==",
"2",
"and",
"curr_date",
".",
"day",
"==",
"29... | 23.105263 | 19.947368 |
def output_to_terminal(sources):
"""Print statistics to the terminal"""
results = OrderedDict()
for source in sources:
if source.get_is_available():
source.update()
results.update(source.get_summary())
for key, value in results.items():
sys.stdout.write(str(key) + ": " + str(value) + ", ")
sys.stdout.write("\n")
sys.exit() | [
"def",
"output_to_terminal",
"(",
"sources",
")",
":",
"results",
"=",
"OrderedDict",
"(",
")",
"for",
"source",
"in",
"sources",
":",
"if",
"source",
".",
"get_is_available",
"(",
")",
":",
"source",
".",
"update",
"(",
")",
"results",
".",
"update",
"(... | 34.363636 | 11.090909 |
def like(self):
""" Like a clip.
"""
r = requests.post(
"https://kippt.com/api/clips/%s/likes" % (self.id),
headers=self.kippt.header
)
return (r.json()) | [
"def",
"like",
"(",
"self",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"\"https://kippt.com/api/clips/%s/likes\"",
"%",
"(",
"self",
".",
"id",
")",
",",
"headers",
"=",
"self",
".",
"kippt",
".",
"header",
")",
"return",
"(",
"r",
".",
"json",
... | 23.333333 | 16.777778 |
def get_species(taxdump_file, select_divisions=None,
exclude_divisions=None, nrows=None):
"""Get a dataframe with species information."""
if select_divisions and exclude_divisions:
raise ValueError('Cannot specify "select_divisions" and '
'"exclude_divisions" at the same time.')
select_taxon_ids = _get_species_taxon_ids(
taxdump_file,
select_divisions=select_divisions,
exclude_divisions=exclude_divisions)
select_taxon_ids = set(select_taxon_ids)
with tarfile.open(taxdump_file) as tf:
with tf.extractfile('names.dmp') as fh:
df = pd.read_csv(fh, header=None, sep='|',
encoding='ascii', nrows=nrows)
# only keep information we need
df = df.iloc[:, [0, 1, 3]]
# only select selected species
df = df.loc[df.iloc[:, 0].isin(select_taxon_ids)]
# remove tab characters flanking each "name class" entry
df.iloc[:, 2] = df.iloc[:, 2].str.strip('\t')
# select only "scientific name" and "common name" rows
df = df.loc[df.iloc[:, 2].isin(['scientific name', 'common name'])]
# remove tab characters flanking each "name" entry
df.iloc[:, 1] = df.iloc[:, 1].str.strip('\t')
# collapse common names for each scientific name
common_names = defaultdict(list)
cn = df.loc[df.iloc[:, 2] == 'common name']
for _, row in cn.iterrows():
common_names[row.iloc[0]].append(row.iloc[1])
# build final dataframe (this is very slow)
sn = df.loc[df.iloc[:, 2] == 'scientific name']
species = []
for i, row in sn.iterrows():
species.append([row.iloc[0], row.iloc[1],
'|'.join(common_names[row.iloc[0]])])
species_df = pd.DataFrame(species).set_index(0)
species_df.columns = ['scientific_name', 'common_names']
species_df.index.name = 'taxon_id'
return species_df | [
"def",
"get_species",
"(",
"taxdump_file",
",",
"select_divisions",
"=",
"None",
",",
"exclude_divisions",
"=",
"None",
",",
"nrows",
"=",
"None",
")",
":",
"if",
"select_divisions",
"and",
"exclude_divisions",
":",
"raise",
"ValueError",
"(",
"'Cannot specify \"s... | 37.88 | 17.06 |
def load(self, name):
"""
If not yet in the cache, load the named template and compiles it,
placing it into the cache.
If in cache, return the cached template.
"""
if self.reload:
self._maybe_purge_cache()
template = self.cache.get(name)
if template:
return template
path = self.resolve(name)
if not path:
raise OSError(errno.ENOENT, "File not found: %s" % name)
with codecs.open(path, 'r', encoding='UTF-8') as f:
contents = f.read()
mtime = os.fstat(f.fileno()).st_mtime
template = self.load_string(contents, filename=path)
template.mtime = mtime
template.path = path
self.cache[name] = template
return template | [
"def",
"load",
"(",
"self",
",",
"name",
")",
":",
"if",
"self",
".",
"reload",
":",
"self",
".",
"_maybe_purge_cache",
"(",
")",
"template",
"=",
"self",
".",
"cache",
".",
"get",
"(",
"name",
")",
"if",
"template",
":",
"return",
"template",
"path"... | 26.896552 | 19.172414 |
def empty_platform(cls, arch):
"""
Create a platform without an ELF loaded.
:param str arch: The architecture of the new platform
:rtype: Linux
"""
platform = cls(None)
platform._init_cpu(arch)
platform._init_std_fds()
return platform | [
"def",
"empty_platform",
"(",
"cls",
",",
"arch",
")",
":",
"platform",
"=",
"cls",
"(",
"None",
")",
"platform",
".",
"_init_cpu",
"(",
"arch",
")",
"platform",
".",
"_init_std_fds",
"(",
")",
"return",
"platform"
] | 27 | 13 |
def user_feature_update(self, userid, payload):
''' update features by user id '''
response, status_code = self.__pod__.User.post_v1_admin_user_uid_features_update(
sessionToken=self.__session__,
uid=userid,
payload=payload
).result()
self.logger.debug('%s: %s' % (status_code, response))
return status_code, response | [
"def",
"user_feature_update",
"(",
"self",
",",
"userid",
",",
"payload",
")",
":",
"response",
",",
"status_code",
"=",
"self",
".",
"__pod__",
".",
"User",
".",
"post_v1_admin_user_uid_features_update",
"(",
"sessionToken",
"=",
"self",
".",
"__session__",
","... | 42.777778 | 15.222222 |
def options(self, parser, env=os.environ):
"""Register commandline options."""
super(TimerPlugin, self).options(parser, env)
# timer top n
parser.add_option(
"--timer-top-n",
action="store",
default="-1",
dest="timer_top_n",
help=(
"When the timer plugin is enabled, only show the N tests that "
"consume more time. The default, -1, shows all tests."
),
)
parser.add_option(
"--timer-json-file",
action="store",
default=None,
dest="json_file",
help=(
"Save the results of the timing and status of each tests in "
"said Json file."
),
)
_time_units_help = ("Default time unit is a second, but you can set "
"it explicitly (e.g. 1s, 500ms)")
# timer ok
parser.add_option(
"--timer-ok",
action="store",
default=1,
dest="timer_ok",
help=(
"Normal execution time. Such tests will be highlighted in "
"green. {units_help}.".format(units_help=_time_units_help)
),
)
# time warning
parser.add_option(
"--timer-warning",
action="store",
default=3,
dest="timer_warning",
help=(
"Warning about execution time to highlight slow tests in "
"yellow. Tests which take more time will be highlighted in "
"red. {units_help}.".format(units_help=_time_units_help)
),
)
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
parser.add_option(
"--timer-no-color",
action="store_true",
default=False,
dest="timer_no_color",
help="Don't colorize output (useful for non-tty output).",
)
# timer filter
parser.add_option(
"--timer-filter",
action="store",
default=None,
dest="timer_filter",
help="Show filtered results only (ok,warning,error).",
)
# timer fail
parser.add_option(
"--timer-fail",
action="store",
default=None,
dest="timer_fail",
choices=('warning', 'error'),
help="Fail tests that exceed a threshold (warning,error)",
) | [
"def",
"options",
"(",
"self",
",",
"parser",
",",
"env",
"=",
"os",
".",
"environ",
")",
":",
"super",
"(",
"TimerPlugin",
",",
"self",
")",
".",
"options",
"(",
"parser",
",",
"env",
")",
"# timer top n",
"parser",
".",
"add_option",
"(",
"\"--timer-... | 30.554217 | 20.951807 |
def parse_limits_list(path, limits):
"""Parse a structured list of flux limits as obtained from a YAML file
Yields tuples of reaction ID, lower and upper flux bounds. Path can be
given as a string or a context.
"""
context = FilePathContext(path)
for limit_def in limits:
if 'include' in limit_def:
include_context = context.resolve(limit_def['include'])
for limit in parse_limits_file(include_context):
yield limit
else:
yield parse_limit(limit_def) | [
"def",
"parse_limits_list",
"(",
"path",
",",
"limits",
")",
":",
"context",
"=",
"FilePathContext",
"(",
"path",
")",
"for",
"limit_def",
"in",
"limits",
":",
"if",
"'include'",
"in",
"limit_def",
":",
"include_context",
"=",
"context",
".",
"resolve",
"(",... | 33.125 | 17.0625 |
def dump(database, output, min_occurences=1, max_occurences=250, returncmd=False):
"""
Dumps output from kmc database into tab-delimited format.
:param database: Database generated by kmc.
:param output: Name for output.
:param min_occurences: Minimum number of times kmer must be in database to be dumped.
:param max_occurences: Maximum number of times a kmer can be seen and still be dumped.
:param returncmd: If true, will return the command used to call KMC as well as out and err.
:return: Stdout and stderr from kmc.
"""
cmd = 'kmc_tools dump -ci{} -cx{} {} {}'.format(min_occurences, max_occurences, database, output)
out, err = accessoryfunctions.run_subprocess(cmd)
if returncmd:
return out, err, cmd
else:
return out, err | [
"def",
"dump",
"(",
"database",
",",
"output",
",",
"min_occurences",
"=",
"1",
",",
"max_occurences",
"=",
"250",
",",
"returncmd",
"=",
"False",
")",
":",
"cmd",
"=",
"'kmc_tools dump -ci{} -cx{} {} {}'",
".",
"format",
"(",
"min_occurences",
",",
"max_occur... | 49 | 24.125 |
def validate_authorization_request(self, uri, http_method='GET', body=None,
headers=None):
"""Extract response_type and route to the designated handler."""
request = Request(
uri, http_method=http_method, body=body, headers=headers)
request.scopes = utils.scope_to_list(request.scope)
response_type_handler = self.response_types.get(
request.response_type, self.default_response_type_handler)
return response_type_handler.validate_authorization_request(request) | [
"def",
"validate_authorization_request",
"(",
"self",
",",
"uri",
",",
"http_method",
"=",
"'GET'",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"request",
"=",
"Request",
"(",
"uri",
",",
"http_method",
"=",
"http_method",
",",
"body",... | 50.545455 | 24.727273 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.