content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def maxSubArray2(nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0: return 0
sums = []
sums.append(nums[0])
mini = maxi = 0
for i in range(1, len(nums)):
sums.append(sums[-1] + nums[i])
mini = i if sums[mini] > sums[-1] else mini
maxi = i if sums[maxi] < sums[-1] else maxi
return max(nums[mini], nums[maxi]) if maxi <= mini else sums[maxi] - sums[mini] | 0df3c1f37e1b5ce396d14916282c9b1d88d3f3a9 | 690,364 |
import os
import yaml
def get_config(config_vars):
""" Load and validate config """
os.chdir(os.path.dirname(os.path.abspath(__file__)))
config = yaml.safe_load(open("config.yml"))
# Check for missing config keys
is_config_missing = False
for config_var in config_vars:
if config_var not in config:
is_config_missing = True
print('Missing key in config.yml: ' + config_var)
if is_config_missing:
exit('Some required keys are missing from config.yml.')
return config | a99f7a487f9ed2a254cd6232f03fbe84028ec556 | 690,365 |
def _get2DArea(box):
"""Get area of a 2D box"""
return (box['right']-box['left']) * (box['bottom']-box['top']) | 1e6675b93717263851ac8c01edfd3c4e9d5b1889 | 690,366 |
from typing import List
def to_base_digits(value: int, base: int) -> List[int]:
"""Returns value in base 'base' from base 10 as a list of digits"""
ret = []
n = value
while n > base:
n, digit = divmod(n, base)
ret.append(digit)
ret.append(n)
return ret[::-1] | 86209a380f04a28b9252ee2935edaa7db236a019 | 690,368 |
def get_neighbors(nodeid, adjmatrix):
"""returns all the direct neighbors of nodeid in the graph
Args:
nodeid: index of the datapoint
adjmatrix: adjmatrix with true=edge, false=noedge
Returns:
list of neighbors and the number of neighbors
"""
neighbors = []
num = 0
for i in range(0, adjmatrix.shape[0], 1):
if adjmatrix[i][nodeid]:
neighbors.append(i)
num += 1
return neighbors, num | d1a7407d23c0c27ac17339a2fe1453e46994cf56 | 690,369 |
from pathlib import Path
import yaml
def get_vocabularies():
"""Returns dictionary of vocabularies"""
path = Path(__file__).parent
vocab_file = path / "vocabularies.yaml"
vocabularies = {}
with open(vocab_file) as f:
data = yaml.safe_load(f) or {}
for id_, yaml_entry in data.items():
individual_vocab = {}
vocab_id = yaml_entry["pid-type"]
data_file = path / yaml_entry["data-file"]
with open(data_file) as fp:
# Allow empty files
data = yaml.safe_load(fp) or []
for entry in data:
props = entry["props"]
if "datacite_general" in props:
# Resource type two layer vocab
datacite = (
props["datacite_general"] + ";" + props["datacite_type"]
)
else:
datacite = props["datacite"]
individual_vocab[datacite] = entry["id"]
vocabularies[vocab_id] = individual_vocab
return vocabularies | 56afd4be355612fea0d59a4223f40dd45e5c7d6f | 690,370 |
import os
import jinja2
def render(tpl_path, context):
"""
Renders the template at tpl_path with the context (dict-like)
"""
# source: http://matthiaseisen.com/pp/patterns/p0198/
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
).get_template(filename).render(context) | 6451daf1edc8c6db766f1c9ab99e90887284828e | 690,371 |
def get_ucr_class_name(id):
"""
This returns the module and class name for a ucr from its id as used in report permissions.
It takes an id and returns the string that needed for `user.can_view_report(string)`.
The class name comes from corehq.reports._make_report_class, if something breaks, look there first.
:param id: the id of the ucr report config
:return: string class name
"""
return 'corehq.reports.DynamicReport{}'.format(id) | 5cdda11d8e79c725a0b2c48b573dad4b3a0f1b39 | 690,372 |
def arguments(o):
"""Extract arguments from an expression."""
return o.args | 33a10a51ee4c75dfc022f5456b88afde664e5e54 | 690,373 |
def _get_hover_text(df, snpname=None, genename=None, annotationname=None):
"""Format the hover text used in Manhattan and Volcano plots.
:param (dataFrame) df: A pandas dataframe.
:param (string) snpname: A string denoting the column name for the SNP
names (e.g., rs number). More generally, this column could be anything
that identifies each point being plotted. For example,
in an Epigenomewide association study (EWAS), this could be the probe
name or cg number. This column should be a character. This argument is
optional, however it is necessary to specify it if you want to
highlight points on the plot using the highlight argument in the
figure method.
:param (string) genename: A string denoting the column name for the
GENE names.
:param (string) annotationname: A string denoting the column name for
annotations. This could be any annotation information that you
want to include in the plot (e.g., zscore, effect size, minor allele
frequency).
"""
hover_text = ''
if snpname is not None and snpname in df.columns:
hover_text = 'SNP: ' + df[snpname].astype(str)
if genename is not None and genename in df.columns:
hover_text = hover_text \
+ '<br>GENE: ' \
+ df[genename].astype(str)
if annotationname is not None and annotationname in df.columns:
hover_text = hover_text \
+ '<br>' \
+ df[annotationname].astype(str)
return hover_text | cfdc0832b8eabda5aaed70b402c023eed0bbb10d | 690,374 |
def _run_test_func(mod, funcpath):
"""
Run the given TestCase method or test function in the given module.
Parameters
----------
mod : module
The module where the test resides.
funcpath : str
Either <testcase>.<method_name> or <func_name>.
Returns
-------
object
In the case of a module level function call, returns whatever the function returns.
"""
parts = funcpath.split('.', 1)
if len(parts) == 2:
tcase_name, method_name = parts
testcase = getattr(mod, tcase_name)(methodName=method_name)
setup = getattr(testcase, 'setUp', None)
if setup is not None:
setup()
getattr(testcase, method_name)()
teardown = getattr(testcase, 'tearDown', None)
if teardown:
teardown()
else:
funcname = parts[0]
return getattr(mod, funcname)() | 9be3ad768d01a03da6038d059fa7bf9c5d845b07 | 690,375 |
def cli(ctx):
""".. deprecated:: 0.5.2 Use :meth:`get_most_recently_used_history` instead.
Output:
"""
return ctx.gi.histories.get_current_history() | 81cd23ad202ddcfe1d762452561d08a4be6d70b4 | 690,377 |
def process_exploit(exploit):
""" Processes exploit data into a more usable format. """
# turn an exploit section id into an exploit category
category = exploit['category'].split('_')[2]
exploit['category'] = {
'DataListTypes': 'normal',
'DataList1' : 'greater',
'DataList2' : 'outer rift'
}[category]
return exploit | 05a11e666c435134a0e8eb471db0653cc672ff3b | 690,378 |
def get_watershed_alliance_locations():
"""Names of default destinations added in migration 0012 in the Watershed Alliance
All of them are in the watershed alliance: https://www.watershedalliance.org/centers/
"""
return [
'Bartram\'s Garden',
'Fairmount Waterworks Interpretive Center',
'Independence Seaport Museum',
'John Heinz National Wildlife Refuge',
'NJ Academy of Aquatic Sciences',
'Palmyra Cove Nature Park',
'Schuylkill Environmental Education Center',
'Schuylkill River Greenway Association',
'Tulpehaking Nature Center at Abbott Marshland',
] | 0499b09d082e0c726293e0f7505cf3c950870928 | 690,379 |
from typing import TextIO
from typing import List
def csv_to_list(csv_file: TextIO) -> List[List[str]]:
"""Return the contents of the open CSV file csv_file as a list of
lists, where each inner list contains the values from one line of
csv_file.
Docstring examples not given since results depend on data to be
input.
"""
csv_file.readline() # read and discard header
data = []
for line in csv_file:
data.append(line.strip().split(','))
return data | 9abf7b11f6906f90cdab7682626634be37e01e51 | 690,380 |
def extract(
ctx,
kzip,
extractor,
srcs,
opts,
deps = [],
vnames_config = None,
mnemonic = "ExtractCompilation"):
"""Run the extractor tool under an environment to produce the given kzip
output file. The extractor is passed each string from opts after expanding
any build artifact locations and then each File's path from the srcs
collection.
Args:
kzip: Declared .kzip output File
extractor: Executable extractor tool to invoke
srcs: Files passed to extractor tool; the compilation's source file inputs
opts: List of options passed to the extractor tool before source files
deps: Dependencies for the extractor's action (not passed to extractor on command-line)
vnames_config: Optional path to a VName configuration file
mnemonic: Mnemonic of the extractor's action
"""
env = {
"KYTHE_ROOT_DIRECTORY": ".",
"KYTHE_OUTPUT_FILE": kzip.path,
}
inputs = srcs + deps
if vnames_config:
env["KYTHE_VNAMES"] = vnames_config.path
inputs += [vnames_config]
ctx.actions.run(
inputs = inputs,
tools = [extractor],
outputs = [kzip],
mnemonic = mnemonic,
executable = extractor,
arguments = (
[ctx.expand_location(o) for o in opts] +
[src.path for src in srcs]
),
env = env,
)
return kzip | db9f2a669636c0b4c83767f046ea953681614ab4 | 690,381 |
def scope_minimal_nr_tokens(df_in, min_nr_tokens=1):
"""
Remove destinations with fewer tokens than the set minimum (default: at least 1).
"""
return df_in.loc[lambda df: df["nr_tokens"] >= min_nr_tokens] | e4104be6245dedd777378d43593999a0926ca512 | 690,382 |
def data_concatenate(data, concatenated):
"""
Concatenates multiple data frames
"""
for item in data:
concatenated = concatenated.append(item, sort=False)
return concatenated | 017b7ca78324759b763182b35ed17b66f8fbf37c | 690,383 |
def eval_en(x, mol):
"""
Evaluate the energy of an atom.
"""
mol.set_positions(x)
return [mol.get_potential_energy()] | 6cfef7c3eb81b6156f30f15c5180c8394c05ee2f | 690,384 |
def getind(stream, utc):
"""get index of first trace after given time in stream"""
times = stream.getHI('event.datetime')
for i, time in enumerate(times):
if time > utc:
return i | f804444cabe736bd3e6951ccc1894aa8365090c0 | 690,386 |
def generic(question, *comments, default_value=None, prefix='> ', suffix=' ', default_string=None, answer_type=None,
auto_enter=False):
"""a generic prompt"""
if default_string is None:
default_string = f'[{str(default_value)}]' if default_value else ''
da = f' {default_string}' if default_string else ''
q = prefix + question.strip()
if not q.endswith('?'):
q += '?'
q += da
for c in comments:
q += f'\n{prefix}{c}'
if comments:
q += f'\n{prefix}'
else:
q += suffix
if not auto_enter:
a = input(q).strip()
else:
print(q)
return default_value
if not a:
return default_value
elif callable(answer_type):
return answer_type(a)
else:
return a | 655b566f4e63d46d90d04b00a884ca26364415fd | 690,387 |
def default_before_hook(*args, **kwargs):
"""The default before hook, will act like it's not even there
"""
return args, kwargs | 4341322ee7fb1969c1fe88b06dfcb1a873b02ddf | 690,388 |
def yml_with_no_metadata_fixture():
"""Return some yml that doesn't have any "MetaData"
"""
data = """---
apiVersion: v1
kind: Deployment
"""
return data | ee79ea6aca75e824633b33b5165ce19bc1cb1e8f | 690,390 |
def parse_dict(raw_dict, ignore_keys=[]):
"""
Parses the values in the dictionary as booleans, ints, and floats as
appropriate
Parameters
----------
raw_dict : dict
Flat dictionary whose values are mainly strings
ignore_keys : list, optional
Keys in the dictionary to remove
Returns
-------
dict
Flat dictionary with values of expected dtypes
"""
def __parse_str(mystery):
if not isinstance(mystery, str):
return mystery
if mystery.lower() == 'true':
return True
if mystery.lower() == 'false':
return False
# try to convert to number
try:
mystery = float(mystery)
if mystery % 1 == 0:
mystery = int(mystery)
return mystery
except ValueError:
return mystery
if not ignore_keys:
ignore_keys = list()
else:
if isinstance(ignore_keys, str):
ignore_keys = [ignore_keys]
elif not isinstance(ignore_keys, (list, tuple)):
raise TypeError('ignore_keys should be a list of strings')
clean_dict = dict()
for key, val in raw_dict.items():
if key in ignore_keys:
continue
val = __parse_str(val)
if isinstance(val, (tuple, list)):
val = [__parse_str(item) for item in val]
elif isinstance(val, dict):
val = parse_dict(val, ignore_keys=ignore_keys)
clean_dict[key] = val
return clean_dict | fd9d2991ab2385850b5092b17e4e294e263a69db | 690,392 |
def get_feature_names(npcs=3):
"""
Create the list of feature names depending on the number of principal components.
Parameters
----------
npcs : int
number of principal components to use
Returns
-------
list
name of the features.
"""
names_root = ["coeff" + str(i + 1) + "_" for i in range(npcs)] + [
"residuo_",
"maxflux_",
]
return [i + j for j in ["g", "r"] for i in names_root] | 4ff4ea02ec88b11010a5f675be31ff598a820b68 | 690,393 |
def _map_action_index_to_output_files(actions, artifacts):
"""Constructs a map from action index to output files.
Args:
actions: a list of actions from the action graph container
artifacts: a map {artifact_id: artifact path}
Returns:
A map from action index (in action graph container) to a string of
concatenated output artifacts paths.
"""
action_index_to_output_files = {}
for i, action in enumerate(actions):
output_files = " ".join(
sorted([artifacts[output_id] for output_id in action.output_ids]))
action_index_to_output_files[i] = output_files
return action_index_to_output_files | ee38063460654d0f30fdf400f99591198592115c | 690,394 |
def post_index(root, request, nodes):
"""
Function to post-process requests for index view. It is used to
post-process the nodes.
"""
return nodes | 14824df63b11707eb755b8ebe172ce0abf8ae278 | 690,395 |
import pwd
def name_to_uid( name ):
"""用户名转换为用户ID ."""
return pwd.getpwnam( name ).pw_uid | d4ad749c1d21211b6f30457fad5a6e514b872787 | 690,396 |
def find(db, description):
"""
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param description:
job description, used in a case-insensitive LIKE clause
"""
query = '''-- completed jobs
SELECT id, description, user_name,
(julianday(stop_time) - julianday(start_time)) * 24 AS hours
FROM job WHERE status='complete' AND description LIKE lower(?x)
ORDER BY julianday(stop_time) - julianday(start_time)'''
return db(query, description.lower()) | 1f5abb9742fd774b10ad7197a5f60e0896d63cc3 | 690,397 |
import re
def split(ln):
"""
This function ...
:param ln: String
:return fout:
"""
# horridly simple splitter
ln = ln.replace(". ", ".\n\n").replace("? ","?\n\n").replace("! ","!\n\n")
ln = ln.replace('."', '."\n\n')
f = ln.split("\n")
fout = []
for s in f:
s = s.rstrip()
s = re.sub(r'^\s+', '', s)
if (s!=""):
fout.append(s)
return fout | b5bda79e8826852687944bd51eedc3df0de94f58 | 690,398 |
def add_mutually_exclusive_group(*add_argument_calls):
""" Create mutually exclusive argument group inside argparsed_func. """
return (add_mutually_exclusive_group, add_argument_calls) | 6bc12e3331d4a14350a5a874b4310e7fe239e5a3 | 690,399 |
def adsorption(CGF, CGRA, CET, cg, epsl, KdF, KdR, KI):
"""
Adsorption equilibrium of enzyme between facile and recalcitrant glucan,
and accounting for inhibition by glucose (and other sugars if present)
"""
CEGF = CET/(1 + KdF/KdR*CGRA/CGF + epsl*KdF/CGF*(1 + cg/KI))
CEGR = CET/(1 + KdR/KdF*CGF/CGRA + epsl*KdR/CGRA*(1 + cg/KI))
return CEGF, CEGR | ca0c4044299253138ca513f39debc2c6189a48a6 | 690,400 |
def container_logs(client, resource_group_name, name, container_name=None):
"""Tail a container instance log. """
if container_name is None:
container_name = name
log = client.container_logs.list(resource_group_name, container_name, name)
return log.content | 6ce1ff396e6993bbe1995ab7f3576e5c952ee675 | 690,401 |
import subprocess
def RunCommand(*cmd, **kwargs):
"""Runs a command.
Returns what have been printed to stdout by this command.
kwargs:
raise_on_failure: Indicates if an exception should be raised on failure, if
set to false then the function will return None.
"""
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('universal_newlines', True)
raise_on_failure = kwargs.pop('raise_on_failure', True)
proc = subprocess.Popen(cmd, **kwargs)
ret, err = proc.communicate()
if proc.returncode != 0:
if raise_on_failure:
print('Error: %s' % err)
raise subprocess.CalledProcessError(proc.returncode, cmd)
return
ret = (ret or '').rstrip('\n')
return ret | 55fb94797124f1f222df319d199488fbcfea7551 | 690,402 |
import os
def compile_imgs(root_dir):
"""
Deprecated. Used previously when custom Dataset compiles sample paths on instatiation.
Current custom dataset instead accepts a pre-cooked path list to pos/neg samples.
Use compile_data.
"""
_ = [root_dir/i for i in os.listdir(root_dir)]
heap_main = [root_dir/j/i for j in _ for i in os.listdir(j)] #These are folders for 3862 train seqs
heap_main.sort()
heap = [i/j for i in heap_main for j in os.listdir(i)]
heap.sort()
return heap | dd58da326059b1553a09c8331d6ead3694ee6de2 | 690,403 |
import re
def customTokenizer(input):
"""Tokenise transformed Smali instructions.
Args:
input: Lines of transformed instructions
Returns:
An array of transformed instructions
"""
return re.split("\n", input) | 4945dd72302553406c3b4cedab8b8069417f379f | 690,406 |
import sys
def functionId(nFramesUp):
""" Create a string naming the function n frames up on the stack. """
co = sys._getframe(nFramesUp+1).f_code
return "%s (%s @ %d)" % (co.co_name, co.co_filename, co.co_firstlineno) | 4f97b1649715f9f080f0628edc83bf991e4aff77 | 690,407 |
from typing import OrderedDict
def build_dim_map(tensor):
"""Returns a map of { dim: dim_name } where dim is a name if the dim is named
and the dim index otherwise."""
return OrderedDict([(idx if name is None else name, name)
for idx, name in enumerate(tensor.names)]) | eb51615d09dc671bdb4d38ff84fb48e0cbef4825 | 690,409 |
def float_list_string(vals, nchar=7, ndec=3, nspaces=2, mesg='', left=False):
"""return a string to display the floats:
vals : the list of float values
nchar : [7] number of characters to display per float
ndec : [3] number of decimal places to print to
nspaces : [2] number of spaces between each float
"""
if left:
format = '%-*.*f%*s'
else:
format = '%*.*f%*s'
istr = mesg
for val in vals:
istr += format % (nchar, ndec, val, nspaces, '')
return istr | 87be23b879df35e672f32a89bfa0266d73812083 | 690,410 |
def tiles_from(state_pkt):
"""
Given a Tile State packet, return the tile devices that are valid.
This works by taking into account ``tile_devices_count`` and ``start_index``
on the packet.
"""
amount = state_pkt.tile_devices_count - state_pkt.start_index
return state_pkt.tile_devices[:amount] | b3d89aadf5530ca391876ffc2c6956769e455555 | 690,411 |
def balance_queue_modifier(count_per_day: float) -> float:
"""
Create a modifier to use when setting filter values.
Because our queue is only ever 1k posts long (reddit limitation), then
we never want any given sub to take up any more than 1/100th of the queue
(seeing as how we have ~73 partners right now, seems like a reasonable
amount). This is so that if a sub gets 3 posts per day, we can adequately
bring in everything, but if it has 800 posts a day (r/pics) then the value
is adjusted appropriately so that it doesn't overwhelm the queue.
"""
target_queue_percentage = 0.01
queue_percentage = count_per_day / 1000
return target_queue_percentage / queue_percentage | cc5637811b4fb461f6996924ac8143a8e8453e59 | 690,412 |
def seq_mult_scalar(a, s):
"""Takes a list of numbers a and a scalar s. For the input a=[a0, a1, a2,.., an]
the function returns [s * a0, s * a1, s * a2, ..., s * an]"""
return [s * i for i in a] | 7d59162d19ec5bd445e7b960bf8f7369ce3b8693 | 690,413 |
def fix_z_dir(job):
"""
Rather than fixing all directions only fix the z-direction during an NPT simulation
Args:
job (LAMMPS): Lammps job object
Returns:
LAMMPS: Return updated job object
"""
job.input.control["fix___ensemble"] = job.input.control["fix___ensemble"].replace(
"x 0.0 0.0 1.0 y 0.0 0.0 1.0 z 0.0 0.0 1.0", "z 0.0 0.0 1.0"
)
return job | 534918de30bdc1bd852d2fffd9e6da7bec33ed7e | 690,414 |
import json
def read_blast_json(json_file):
""" Process input BLAST JSON and return hits as a list. """
hits = []
json_data = None
with open(json_file, 'rt') as blast_fh:
json_data = json.load(blast_fh)
for entry in json_data['BlastOutput2']:
hit = entry['report']['results']['search']
if len(hit['hits']):
# Only storing the top hit
hsp = hit['hits'][0]['hsps'][0]
# Includes mismatches and gaps
mismatch = hsp['align_len'] - hsp['identity']
# Hamming distance
hd = mismatch
if hit['query_len'] > hsp['align_len']:
# Include those bases that weren't aligned
hd = hit['query_len'] - hsp['align_len'] + mismatch
hits.append({
'sample': entry['report']['search_target']['db'].replace('-contigs', ''),
'title': hit['query_title'],
'length': hit['query_len'],
'bitscore': int(hsp['bit_score']),
'evalue': hsp['evalue'],
'identity': hsp['identity'],
'mismatch': mismatch,
'gaps': hsp['gaps'],
'hamming_distance': hd,
'query_from': hsp['query_from'],
'query_to': hsp['query_to'],
'hit_from': hsp['hit_from'],
'hit_to': hsp['hit_to'],
'align_len': hsp['align_len'],
'qseq': hsp['qseq'],
'hseq': hsp['hseq'],
'midline': hsp['midline']
})
return hits | 36b013fc64ec1a7b270f4ed770d5f0a9fa41cc6f | 690,415 |
def get_uid(instance):
""" 获取实例的 uid (hex). get hex uid from instance.
Examples::
data = {
'uid': instance.uid.hex if instance else None,
'related_uid': instance.related.uid.hex if instance.related else None,
}
data = {
'uid': get_uid(instance),
'related_uid': get_uid(instance.related),
}
:rtype: str | None
"""
return instance.uid.hex if instance else None | 5f0dade02edc61b7c78a450451e4a7f02d65267f | 690,416 |
def idx_tuple_in_df(tuple_x, df):
"""Find the first row index of tuple_x in df."""
res=None
for i,v in enumerate(df.values):
if tuple_x == tuple(v):
res = i
break
else:
res=None
return res | 83335128ba7894e848ef0ce4e83e8b37fbdbb07f | 690,417 |
def account_main_purse_uref(CLIENT, account_key: bytes) -> str:
"""Returns an on-chain account's main purse unforgeable reference.
"""
return CLIENT.queries.get_account_main_purse_uref(account_key) | e7da0e9182b5a16c7308ddebdc93c453fd2d03b1 | 690,418 |
def getDeviceInfo():
"""Returns a table of information about installed devices in the computer."""
return dict | 42848305c9c7f423469630f1c4630ad080355574 | 690,419 |
from typing import List
def _make_pod_command() -> List[str]:
"""Generate pod command.
Returns:
List[str]: pod command.
"""
return ["./init_icscf.sh", "&"] | 72a3fc87a37166d87fcb673a0b8fb97db0583d4d | 690,420 |
def parseSendchangeArguments(args):
"""This function parses the arguments that the Buildbot patch uploader
sends to Buildbot via the "changed files". It takes an argument of a
list of files and returns a dictionary with key/value pairs
"""
parsedArgs = {}
for arg in args:
try:
(key, value) = arg.split(":", 1)
value = value.lstrip().rstrip()
parsedArgs[key] = value
except:
pass
return parsedArgs | 39a1363718a1748fc00a85b4aea9b5fbe2f8d4b5 | 690,421 |
def time_content_log_split(log_line):
"""
Splits a portal.log line into the Time Elapsed and Content sections
:param log_line: A line from the portal.log file
:return: Values for time elapsed and content of line
"""
tmp = log_line.replace('[', '')
tmp = tmp.split('] ')
time = tmp[0]
content = tmp[1]
return time, content | 6a42961a36588bd2a739b9879d705f21a4750df4 | 690,422 |
def get_number_rows(settings, star_height):
"""Determine the number of rows of stars that fit on the screen."""
avaiable_space_y = (settings.screen_height - star_height)
number_rows = int(avaiable_space_y / (2 * star_height))
return number_rows | b89c11f82e9060a19e7291908eff6815a777b005 | 690,423 |
def minify_python(s):
"""
Minify python code.
@param s: python code to minify
@type s: L{str}
@return: the minified python code
@rtype: L{str}
"""
# not working with brython :(
return s
if python_minifier is None:
raise NotImplementedError("Dependency 'python-minifier' required, but not found!")
return python_minifier.minify(
s,
remove_annotations=True,
remove_pass=True,
# remove_literal_statements=
combine_imports=True,
hoist_literals=True,
rename_locals=True,
rename_globals=True,
) | 438597b12fd709801209815a0eb17501f3ac849a | 690,424 |
import os
import torch
def load_checkpoint(checkpoint, model, optimizer=None, key='state_dict'):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise FileNotFoundError("File doesn't exist {}".format(checkpoint))
try:
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint)
else:
checkpoint = torch.load(checkpoint, map_location=torch.device('cpu'))
cleaned_dict = {}
for k in checkpoint['state_dict']:
cleaned_dict[k.replace('module.', '')] = checkpoint[key][k]
model.load_state_dict(cleaned_dict)
except KeyError:
print('loading model partly')
pretrained_dict = {k: v for k, v in checkpoint[key].items() if k in model.state_dict()}
model.state_dict().update(pretrained_dict)
model.load_state_dict(model.state_dict())
if optimizer:
optimizer.load_state_dict(checkpoint['optim_dict'])
return checkpoint | 39c25c75590b2f7057f3f27454a29e928e139f5c | 690,425 |
def unbindReferences(par, modeOnly=False):
"""
Erase bind strings or change modes for all bindReferences of a parameter
:param par: the bindMaster parameter
:param modeOnly: if True, just change the references modes to prevMode
:return: the references that were changed
"""
refs = par.bindReferences
for p in refs:
p.mode = p.prevMode
if not modeOnly:
p.bindExpr = ''
return refs | 5c7ad211a808e3b2e69d5a74def521bf22b567ae | 690,426 |
import torch
def pearsonCorrLoss(outputs, targets):
"""
Calculates and returns the negative pearson correlation loss
"""
vx = outputs - torch.mean(outputs)
vy = targets - torch.mean(targets)
cost = torch.sum(vx * vy) / (torch.sqrt(torch.sum(vx ** 2)) * torch.sqrt(torch.sum(vy ** 2)))
return -1 * cost | 5c07ecf7bb96b2b315abd2be085205cc714c7493 | 690,427 |
def tids_setbit_counter(tset):
"""
Return the number of elements present in a token ids set, aka. the set
cardinality.
Default for bitarrays.
"""
return tset.count() | 25afa6c70119589bda9ca9439b2db6ad6c1d0180 | 690,428 |
def join_hints(hints, output):
""" """
inputs = hints
outputs = [output]
options = {
'cores': 1,
'memory': '4g',
'account': 'NChain',
'walltime': '01:00:00'
}
spec = "cat"
for hint in hints:
spec += " {}".format(hint)
spec += " > {}".format(output)
return inputs, outputs, options, spec | 6c61b6f5a44a62d10b92018665656b04c8a404d1 | 690,429 |
def km_to_mi(km):
""" Represent: 公里轉英里 """
mi = km / 1.61
return mi | 667015febf59bba2b622cf6a421261c01e356edb | 690,430 |
def fake_fft(signal, phases=1, offset=0):
"""Beyond the halfway point the new value is always the sum of all items starting from position"""
signal = signal[offset:]
for _ in range(phases):
value = 0
new_signal = []
for _ in range(len(signal)):
value = (value + signal.pop()) % 10
new_signal.append(value)
new_signal.reverse()
signal = new_signal
return signal | 04b83fb973f78f98f8958b8b179b968787c77d08 | 690,431 |
def redshiftFromScale(scale):
"""
Converts a scale factor to redshift.
:param scale: scale factor
:type scale: float or ndarray
:return: redshift
:rtype: float or ndarray
"""
return 1. / scale - 1. | 5f06a52f06ffbea0381389587c801de008e007d4 | 690,432 |
import requests
def get_rirs(url, headers):
"""
Get dictionary of existing rirs
"""
api_url = f"{url}/api/ipam/rirs/"
response = requests.request("GET", api_url, headers=headers)
all_rirs = response.json()["results"]
rirs = []
for rir in all_rirs:
rir_info = dict()
rir_info["is_private"] = bool(rir["is_private"])
rir_info["name"] = rir["name"]
rir_info["state"] = "present"
rirs.append(rir_info)
return rirs | d038d1bef375a5afc188adc4efb1d3099273bbb3 | 690,433 |
def normalize_url(url: str) -> str:
"""
Remove leading and trailing slashes from a URL
:param url: URL
:return: URL with no leading and trailing slashes
:private:
"""
if url.startswith('/'):
url = url[1:]
if url.endswith('/'):
url = url[:-1]
return url | 53f6b26aeac2530010f1804325bd248024f3f07a | 690,434 |
def get_checksums_and_file_names(path):
""" Reads the local checksums file """
with open(path) as in_f:
return zip(*[map(lambda x: x.strip('\n\r\t '), l.strip(" ").split(" ", maxsplit=1)) for l in in_f.readlines()]) | 069182b7388b7034fe76e89c19b5d4d4bc7ef4e0 | 690,435 |
import re
def remove_brackets_text(input):
"""remove brackets and text inside
Args:
input (string): text to apply the regex func to
"""
return re.sub(r"\([^()]*\)", " ", input) | d784b7043110fb3893f05ec2a418b07dd22a3b94 | 690,437 |
import os
def travers_dir(dir):
""" Traverse directory and return directory / file name tuple"""
paths = []
for root, dirs, files in os.walk(dir):
for f in files:
paths.append((root, f))
return paths | 9a6197309e231cca17743974e7995ab932c2bbe4 | 690,438 |
def reliability_calc(RACC, ACC):
"""
Calculate Reliability.
:param RACC: random accuracy
:type RACC: float
:param ACC: accuracy
:type ACC: float
:return: reliability as float
"""
try:
result = (ACC - RACC) / (1 - RACC)
return result
except Exception:
return "None" | f33d1d81dffb21c8379b6e135c967809061dcf10 | 690,439 |
from typing import Union
from typing import Any
def to_list(data: Union[tuple, list, Any]):
"""
If input is tuple, it is converted to list. If it's list, it is returned untouched. Otherwise
returns a single-element list of the data.
:return: list-ified data
"""
if isinstance(data, list):
pass
elif isinstance(data, tuple):
data = list(data)
else:
data = [data]
return data | ee9b981f2e44c84b150c46ce8d6450f1c2597f1e | 690,440 |
def get_root(T):
"""
:param T: tree
:return: root node of the tree
"""
root_found = False
root = 1
for (i, j, l) in T:
if j == 0:
if root_found:
raise ValueError("Tree cannot have more than one root!")
else:
root = i
root_found = True
if not root_found:
raise ValueError("Tree does not have a root! All trees must have a root")
else:
return root | 3194126dac466922d3996bb213f87d5453c9f833 | 690,441 |
def createJDL(jooID, directory, jobCE):
"""
_createJDL_
Create a simple JDL string list
"""
jdl = []
jdl.append("universe = globus\n")
jdl.append("should_transfer_executable = TRUE\n")
jdl.append("notification = NEVER\n")
jdl.append("Executable = %s/submit.sh\n" % (directory))
jdl.append("Output = condor.$(Cluster).$(Process).out\n")
jdl.append("Error = condor.$(Cluster).$(Process).err\n")
jdl.append("Log = condor.$(Cluster).$(Process).log\n")
jdl.append("initialdir = %s\n" % directory)
jdl.append("globusscheduler = %s\n" % (jobCE))
jdl.append("+WMAgent_JobID = %s\n" % jooID)
jdl.append("+WMAgent_AgentName = testAgent\n")
jdl.append("Queue 1")
return jdl | 4de5eaf7513a2fea3b16df24da72b205c56c3589 | 690,442 |
def value_to_print(value, optype):
"""String of code that represents a value according to its type
"""
if (value is None):
return "NULL"
if (optype == 'numeric'):
return value
return u"'%s'" % value.replace("'", '\\\'') | 588fa94da31a35c2dec69a0e0f6b0de2cc390271 | 690,444 |
def _clean_annotations(annotations_dict):
"""Fix the formatting of annotation dict.
:type annotations_dict: dict[str,str] or dict[str,set[str]] or dict[str,dict[str,bool]]
:rtype: dict[str,dict[str,bool]]
"""
return {
key: (
values if isinstance(values, dict) else
{v: True for v in values} if isinstance(values, set) else
{values: True}
)
for key, values in annotations_dict.items()
} | 574b557fc05186253e4b2f0038782835af59c612 | 690,445 |
def replace(s, replace):
"""Replace multiple values in a string"""
for r in replace:
s = s.replace(*r)
return s | ce8cd88b26f2b329fbc1127a4b76f08f03deddc5 | 690,446 |
def gmm_kl(gmm_p, gmm_q, n_samples=10**5):
"""
Compute the KL-divergence value for 2 given
Gaussian Mixture models
:param gmm_p: 1st GMM model
:param gmm_q: 2nd GMM model
:return: Float indicating KL-divergence value
"""
# From https://stackoverflow.com/a/26079963/5768407
X = gmm_p.sample(n_samples)[0]
log_p_X = gmm_p.score_samples(X)
log_q_X = gmm_q.score_samples(X)
return log_p_X.mean() - log_q_X.mean() | 94af8afbf2b610526d88fd4ee310826415ad8a98 | 690,447 |
import ipaddress
def validate_ip_address(ip_address: str) -> bool:
"""
Checks whether a given string is a valid IP address (supports both IPv4 and IPv6).
Parameters:
ip_address: `str`, input string to be validated
"""
try:
ipaddress.ip_address(ip_address)
return True
except ValueError:
return False | 1f8e7a6e8667940a206d378bf86b4c917b108428 | 690,448 |
import secrets
import math
import sympy
def gen_keys(_p: int, _q: int) -> tuple:
"""Generating private and public keys
:param _p: first prime number
:param _q: second prime number
:return: the public and private key pairs
"""
# modulus for public and private keys
n = _p * _q
# totient
# see https://simple.wikipedia.org/wiki/Euler's_totient_function
phi = (_p - 1) * (_q - 1)
# picking e > 1 corpime to phi
# see https://simple.wikipedia.org/wiki/Coprime
e = secrets.randbelow(phi) + 1
while math.gcd(e, phi) != 1:
e = secrets.randbelow(phi) + 1
# evaluate d using Extended Euclidean algorithm
# see: https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
d = sympy.mod_inverse(e, phi)
# (e, n) -> public key pair
# (d, n) -> private key pair
return (e, n), (d, n) | f6cce244dfbb973f5059401daf9f3a618ad48048 | 690,449 |
def dual_id_dict(dict_values, G, node_attribute):
"""
It can be used when one deals with a dual graph and wants to link analyses conducted on this representation to
the primal graph. For instance, it takes the dictionary containing the betweennes-centrality values of the
nodes in the dual graph, and associates these variables to the corresponding edgeID.
Parameters
----------
dict_values: dictionary
it should be in the form {nodeID: value} where values is a measure that has been computed on the graph, for example
G: networkx graph
the graph that was used to compute or to assign values to nodes or edges
node_attribute: string
the attribute of the node to link to the edges GeoDataFrame
Returns
-------
ed_dict: dictionary
a dictionary where each item consists of a edgeID (key) and centrality values (for example) or other attributes (values)
"""
view = dict_values.items()
ed_list = list(view)
ed_dict = {}
for p in ed_list:
ed_dict[G.nodes[p[0]][node_attribute]] = p[1] # attribute and measure
return ed_dict | d401946bcde7cb0bd5c276e770337115beb93fbf | 690,450 |
from typing import Callable
import asyncio
def add_async_job(target: Callable, *args):
"""Add a callable to the event loop."""
loop = asyncio.get_event_loop()
if asyncio.iscoroutine(target):
task = loop.create_task(target)
elif asyncio.iscoroutinefunction(target):
task = loop.create_task(target(*args))
else:
task = loop.run_in_executor(None, target, *args)
return task | 590bce904241c598e742d6c7370ebf2563aba5f1 | 690,451 |
def GCF(a, b):
"""
Finds Greatest Common Factor of two given numbers
:param a: arbitrary first number
:type a: int
:param b: arbitrary second number
:type b: int
:return: greatest common factor
:rtype: int
"""
if type(a) is not int or type(b) is not int:
raise TypeError('Input must be float type.')
if b > a:
return GCF(b, a)
if a % b == 0:
return b
return GCF(b, a % b) | 125e890918525e82ddf985fa33956facef8e497d | 690,452 |
def construct_pandoc_command(
input_file=None,
lua_filter=None,
):
"""
Construct the Pandoc command.
# Parameters
input_file:pathlib.Path
- The file that we want to apply the lua filter too.
lua_filter:pathlib.Path
- The path to the lua filter to use for the word counts.
# Return
A list of CLI elements that will be used by subprocess.
"""
# --------
# Basic Commands
return [
"pandoc",
"--lua-filter",
lua_filter,
input_file,
] | 357f4bf76aed2328b86b21f0b706348e5306d6bc | 690,453 |
def is_same_behavior_id_str(behavior_id_str_1, behavior_id_str_2):
"""Checks if the two behavior ID strings are the same (e.g. 'fd' vs. 'fd_r' should return
True).
Args:
behavior_id_str_1: Behavior ID as a string.
behavior_id_str_2: Behavior ID as a string.
"""
return behavior_id_str_1.startswith(behavior_id_str_2) or behavior_id_str_2.startswith(behavior_id_str_1) | 7fd7c2a4474cf9ee867e78ac4219c5de611115f5 | 690,454 |
import re
import requests
def get_resource_tables(resource_url):
"""
Returns a list of all the HTML tables for the resource documented at
resource_url
"""
pattern = re.compile(r'(?ims)(\<table\>.*?\</table\>)')
response = requests.get(resource_url)
return pattern.findall(response.text) | b31ea54a649646d4f00627b09f5cdf3189c0efd4 | 690,456 |
def evalMultiRefToken(mref, ixname, val):
"""Helper function for evaluating multi-reference tokens for
given index values."""
return eval(mref.replace(ixname, str(val)), {}, {}) | a9539fe2a71bbfe2e4526782c756b132082ea458 | 690,457 |
def compute_score(triples1, triples2):
"""
Compute precision, recall, and f-score. Variable names must be identical.
"""
t1 = set(triples1)
t2 = set(triples2)
prec = len(t1.intersection(t2)) / float(len(t2))
rec = len(t1.intersection(t2)) / float(len(t1))
if prec == 0.0 and rec == 0.0:
return 0.0, 0.0, 0.0
f = 2 * prec * rec / (prec + rec)
return prec, rec, f | c9c0dbfdcda67018e3bbecde28f11d010e970679 | 690,458 |
import re
def split_blurb(lines):
""" Split blurb on horizontal rules."""
blurbs = [""]
for line in lines.split('\n')[:-1]:
if re.match(r'\*{3,}',line):
blurbs.append("")
else:
blurbs[-1] += line + '\n'
return blurbs | 1c808dc88b889b3d8b3462e0abcea8589f04d66f | 690,461 |
def splitConsecutive(collection, length):
"""
Split the elements of the list @collection into consecutive disjoint lists of length @length. If @length is greater than the no. of elements in the collection, the collection is returned as is.
"""
# Insufficient collection size for grouping
if len(collection) < length: return collection
# Iterate over the collection and collect groupings into results.
groupings = []
index = 0
while index < len(collection):
groupings.append(collection[index: index +length])
index += length
return groupings | 77beeef283424e34872122c44b3c48a5ca2bcbc7 | 690,462 |
import csv
def csv_to_fasta(csv_path, delimiter=","):
"""Convert a csv-file of the format: <sequence> <name> to a FASTA file."""
result = ""
with csv_path.open() as csv_file:
rd = csv.reader(csv_file, delimiter=delimiter)
for row in rd:
result += '> {}\n{}\n'.format(row[1], row[0])
return result | 5b88c14b098ed2270636758677d6695f7ce3f2fe | 690,463 |
import argparse
def get_args():
"""! Command line parser """
parser = argparse.ArgumentParser(
description='EMODB Dataset parser')
parser.add_argument("-i", "--emodb_path", type=str,
help="""The path where EMODB dataset is stored""",
required=True)
_args = parser.parse_args()
return _args | 16355f9622d5ac9b41c1b1efa02c13cacd1a574b | 690,464 |
def style_file(stylers, path, read_fn, write_fn):
"""Style a file.
Args:
styler: the styler to use to style the file.
path: the file path.
read_fn: function used to read in the file contents.
write_fn: function used to write out the styled file, or None
"""
input_string = read_fn(path)
styled_string = input_string
for styler in stylers:
styled_string = styler.style_string(styled_string, path)
violation = styled_string != input_string
if violation and callable(write_fn):
write_fn(path, input_string, styled_string)
return path, stylers, violation | 755a4910cb368459bfef9099c79efdea65d029a9 | 690,466 |
def _create_titled_group(root, key, title):
"""Helper to create a titled group in h5py"""
out = root.create_group(key)
out.attrs['TITLE'] = title
return out | 678cd39fc37b85cede98f6940a07b6b1bef479a3 | 690,467 |
import re
def run_and_parse_first_match(run_lambda, command, regex):
"""
Runs command using run_lambda, returns the first regex match if it exists
"""
rc, out, _ = run_lambda(command)
if rc != 0:
return None
match = re.search(regex, out)
if match is None:
return None
return match.group(1) | 469b90fd65c70d09bc61b9d992673eb96a859c7d | 690,469 |
import re
def polydict(seq, nuc='ACGT'):
"""
Computes largest homopolymer for all specified
nucleotides.
Args:
seq (str): Nucleotide sequence
Examples:
>>> sequtils.polydict('AAAACCGT')
{'A': 4, 'C': 2, 'G': 1, 'T': 1}
"""
ret = {}
seq = seq.upper()
for base in nuc:
lens = []
for i in re.findall(base + '+', seq):
lens.append(len(i))
ret[base] = max(lens) if lens else 0
return ret | dba2eba180a639435905b2c8e7fe7fe1a4a8162c | 690,470 |
def __uglify(text: str) -> str:
"""
csv and json format output contain this non human readable header string:
no CamelCase and no space.
"""
return text.lower().replace(' ', '_') | 2310c7c7fcd0e9d10dd1d7ed6677f2e24da23854 | 690,471 |
def fetch_courses(soups):
"""Fetches each course inside a given page."""
courses = []
for soup in soups:
course = soup.find_all('div', class_='item-frame')
courses.append(course)
return courses | 1b728f7b8a42343ced3e84e3fce069a9dc7a1739 | 690,472 |
def input_a_number():
"""Input a number and return the number."""
return int(input("Saisir un numéro : ")) | 8913e920a172e30df91635590925ca378a6eac7d | 690,473 |
import requests
def get_wikipedia_description(search):
""" Using wikipedia api to fetch descriptions """
"""
It's found that wikipedia's api is too slow that it takes a lot of time to
ingest the data, for now I decided to deactivate this as I want this up and
running quickly.
Descriptions will have to be called via the app (front-end)
"""
disable = True
if disable is False:
wiki_req = requests.get(
'https://en.wikipedia.org/w/api.php'
+ '?format=json'
+ '&action=query'
+ '&prop=extracts'
+ '&exintro='
+ '&explaintext='
+ '&titles={query}'
.format(query=search))
response = wiki_req.json()
pages = response['query']['pages']
description = ""
for value in pages.values():
if 'extract' in value:
description = value['extract']
else:
description = ""
break
else:
description = ""
return description | 22eace9f39d0ddc2839b2fce26e4a9a150dcbf22 | 690,474 |
import sys
def prompt(msg):
"""Output message and return striped input."""
sys.stdout.write('{0}: '.format(msg))
sys.stdout.flush()
return sys.stdin.readline().strip() | fed2b25fa92b4319a3fc873fe21c1aa8d8c8dd97 | 690,475 |
import os
def read_config_file():
"""
Reads and returns a dictionary from the key-value pair found in app.config
:return: Dictionary
"""
result = {}
if not os.path.exists('app.config'):
print('app.config not found')
return result
with open('app.config', 'r', encoding='utf-8') as f:
line = f.readline()
while line:
split1 = line.strip().split('=')
if len(split1) == 2:
result[split1[0]] = split1[1]
line = f.readline()
return result | 7076affff7dd01321a894518acbf1e44ed4e9fd0 | 690,477 |
def is_numeric(value):
"""
Check if a var is a single element
:param value:
:return:
"""
return isinstance(value, (int, float)) | 2739b055cda364efe4172701c1e178a3b41ca928 | 690,478 |
def iter_reduce_ufunc(ufunc, arr_iter, out=None):
"""
constant memory iteration and reduction
applys ufunc from left to right over the input arrays
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.other import * # NOQA
>>> arr_list = [
... np.array([0, 1, 2, 3, 8, 9]),
... np.array([4, 1, 2, 3, 4, 5]),
... np.array([0, 5, 2, 3, 4, 5]),
... np.array([1, 1, 6, 3, 4, 5]),
... np.array([0, 1, 2, 7, 4, 5])
... ]
>>> memory = np.array([9, 9, 9, 9, 9, 9])
>>> gen_memory = memory.copy()
>>> def arr_gen(arr_list, gen_memory):
... for arr in arr_list:
... gen_memory[:] = arr
... yield gen_memory
>>> print('memory = %r' % (memory,))
>>> print('gen_memory = %r' % (gen_memory,))
>>> ufunc = np.maximum
>>> res1 = iter_reduce_ufunc(ufunc, iter(arr_list), out=None)
>>> res2 = iter_reduce_ufunc(ufunc, iter(arr_list), out=memory)
>>> res3 = iter_reduce_ufunc(ufunc, arr_gen(arr_list, gen_memory), out=memory)
>>> print('res1 = %r' % (res1,))
>>> print('res2 = %r' % (res2,))
>>> print('res3 = %r' % (res3,))
>>> print('memory = %r' % (memory,))
>>> print('gen_memory = %r' % (gen_memory,))
>>> assert np.all(res1 == res2)
>>> assert np.all(res2 == res3)
"""
# Get first item in iterator
try:
initial = next(arr_iter)
except StopIteration:
return None
# Populate the outvariable if specified otherwise make a copy of the first
# item to be the output memory
if out is not None:
out[:] = initial
else:
out = initial.copy()
# Iterate and reduce
for arr in arr_iter:
ufunc(out, arr, out=out)
return out | f73c4e556763852450443825bc12224f791f7583 | 690,479 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.