docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Get the first and second lines
Args:
f (filelike): File that is opened for ascii.
Returns:
bytes
|
def _get_two_lines(f):
l0 = f.readline()
l1 = f.readline()
return l0, l1
| 712,219
|
Determine the format of word embedding file by their content. This operation
only looks at the first two lines and does not check the sanity of input
file.
Args:
f (Filelike):
Returns:
class
|
def classify_format(f):
l0, l1 = _get_two_lines(f)
if loader.glove.check_valid(l0, l1):
return _glove
elif loader.word2vec_text.check_valid(l0, l1):
return _word2vec_text
elif loader.word2vec_bin.check_valid(l0, l1):
return _word2vec_bin
else:
raise OSError(b"Invalid format")
| 712,220
|
Reload all running or pending jobs of Grid'5000 from their ids
Args:
oargrid_jobids (list): list of ``(site, oar_jobid)`` identifying the
jobs on each site
Returns:
The list of python-grid5000 jobs retrieved
|
def grid_reload_from_ids(oargrid_jobids):
gk = get_api_client()
jobs = []
for site, job_id in oargrid_jobids:
jobs.append(gk.sites[site].jobs[job_id])
return jobs
| 712,489
|
Destroy all the jobs with a given name.
Args:
job_name (str): the job name
|
def grid_destroy_from_name(job_name):
jobs = grid_reload_from_name(job_name)
for job in jobs:
job.delete()
logger.info("Killing the job (%s, %s)" % (job.site, job.uid))
| 712,490
|
Destroy all the jobs with corresponding ids
Args:
oargrid_jobids (list): the ``(site, oar_job_id)`` list of tuple
identifying the jobs for each site.
|
def grid_destroy_from_ids(oargrid_jobids):
jobs = grid_reload_from_ids(oargrid_jobids)
for job in jobs:
job.delete()
logger.info("Killing the jobs %s" % oargrid_jobids)
| 712,491
|
Submit a job
Args:
job_spec (dict): The job specifiation (see Grid'5000 API reference)
|
def submit_jobs(job_specs):
gk = get_api_client()
jobs = []
try:
for site, job_spec in job_specs:
logger.info("Submitting %s on %s" % (job_spec, site))
jobs.append(gk.sites[site].jobs.create(job_spec))
except Exception as e:
logger.error("An error occured during the job submissions")
logger.error("Cleaning the jobs created")
for job in jobs:
job.delete()
raise(e)
return jobs
| 712,492
|
Waits for all the jobs to be runnning.
Args:
jobs(list): list of the python-grid5000 jobs to wait for
Raises:
Exception: if one of the job gets in error state.
|
def wait_for_jobs(jobs):
all_running = False
while not all_running:
all_running = True
time.sleep(5)
for job in jobs:
job.refresh()
scheduled = getattr(job, "scheduled_at", None)
if scheduled is not None:
logger.info("Waiting for %s on %s [%s]" % (job.uid,
job.site,
_date2h(scheduled)))
all_running = all_running and job.state == "running"
if job.state == "error":
raise Exception("The job %s is in error state" % job)
logger.info("All jobs are Running !")
| 712,493
|
Deploy and wait for the deployment to be finished.
Args:
site(str): the site
nodes(list): list of nodes (str) to depoy
options(dict): option of the deployment (refer to the Grid'5000 API
Specifications)
Returns:
tuple of deployed(list), undeployed(list) nodes.
|
def grid_deploy(site, nodes, options):
gk = get_api_client()
environment = options.pop("env_name")
options.update(environment=environment)
options.update(nodes=nodes)
key_path = DEFAULT_SSH_KEYFILE
options.update(key=key_path.read_text())
logger.info("Deploying %s with options %s" % (nodes, options))
deployment = gk.sites[site].deployments.create(options)
while deployment.status not in ["terminated", "error"]:
deployment.refresh()
print("Waiting for the end of deployment [%s]" % deployment.uid)
time.sleep(10)
deploy = []
undeploy = []
if deployment.status == "terminated":
deploy = [node for node, v in deployment.result.items()
if v["state"] == "OK"]
undeploy = [node for node, v in deployment.result.items()
if v["state"] == "KO"]
elif deployment.status == "error":
undeploy = nodes
return deploy, undeploy
| 712,494
|
Set the interface of the nodes in a specific vlan.
It is assumed that the same interface name is available on the node.
Args:
site(str): site to consider
nodes(list): nodes to consider
interface(str): the network interface to put in the vlan
vlan_id(str): the id of the vlan
|
def set_nodes_vlan(site, nodes, interface, vlan_id):
def _to_network_address(host):
splitted = host.split('.')
splitted[0] = splitted[0] + "-" + interface
return ".".join(splitted)
gk = get_api_client()
network_addresses = [_to_network_address(n) for n in nodes]
gk.sites[site].vlans[str(vlan_id)].submit({"nodes": network_addresses})
| 712,495
|
Get all the corresponding sites of the passed clusters.
Args:
clusters(list): list of string uid of sites (e.g 'rennes')
Return:
dict corresponding to the mapping cluster uid to python-grid5000 site
|
def clusters_sites_obj(clusters):
result = {}
all_clusters = get_all_clusters_sites()
clusters_sites = {c: s for (c, s) in all_clusters.items()
if c in clusters}
for cluster, site in clusters_sites.items():
# here we want the site python-grid5000 site object
result.update({cluster: get_site_obj(site)})
return result
| 712,496
|
Get all the nodes of a given cluster.
Args:
cluster(string): uid of the cluster (e.g 'rennes')
|
def get_nodes(cluster):
gk = get_api_client()
site = get_cluster_site(cluster)
return gk.sites[site].clusters[cluster].nodes.list()
| 712,498
|
Get the network interfaces names corresponding to a criteria.
Note that the cluster is passed (not the individual node names), thus it is
assumed that all nodes in a cluster have the same interface names same
configuration. In addition to ``extra_cond``, only the mountable and
Ehernet interfaces are returned.
Args:
cluster(str): the cluster to consider
extra_cond(lambda): boolean lambda that takes the nic(dict) as
parameter
|
def get_cluster_interfaces(cluster, extra_cond=lambda nic: True):
nics = get_nics(cluster)
# NOTE(msimonin): Since 05/18 nics on g5k nodes have predictable names but
# the api description keep the legacy name (device key) and the new
# predictable name (key name). The legacy names is still used for api
# request to the vlan endpoint This should be fixed in
# https://intranet.grid5000.fr/bugzilla/show_bug.cgi?id=9272
# When its fixed we should be able to only use the new predictable name.
nics = [(nic['device'], nic['name']) for nic in nics
if nic['mountable']
and nic['interface'] == 'Ethernet'
and not nic['management']
and extra_cond(nic)]
nics = sorted(nics)
return nics
| 712,499
|
Constructor.
Args:
excluded_sites(list): sites to forget about when reloading the
jobs. The primary use case was to exclude unreachable sites and
allow the program to go on.
|
def __init__(self, excluded_sites=None, **kwargs):
super().__init__(**kwargs)
self.excluded_site = excluded_sites
if excluded_sites is None:
self.excluded_site = []
| 712,503
|
Reserve and deploys the vagrant boxes.
Args:
force_deploy (bool): True iff new machines should be started
|
def init(self, force_deploy=False):
machines = self.provider_conf.machines
networks = self.provider_conf.networks
_networks = []
for network in networks:
ipnet = IPNetwork(network.cidr)
_networks.append({
"netpool": list(ipnet)[10:-10],
"cidr": network.cidr,
"roles": network.roles,
"gateway": ipnet.ip
})
vagrant_machines = []
vagrant_roles = {}
j = 0
for machine in machines:
for _ in range(machine.number):
vagrant_machine = {
"name": "enos-%s" % j,
"cpu": machine.flavour_desc["core"],
"mem": machine.flavour_desc["mem"],
"ips": [n["netpool"].pop() for n in _networks],
}
vagrant_machines.append(vagrant_machine)
# Assign the machines to the right roles
for role in machine.roles:
vagrant_roles.setdefault(role, []).append(vagrant_machine)
j = j + 1
logger.debug(vagrant_roles)
loader = FileSystemLoader(searchpath=TEMPLATE_DIR)
env = Environment(loader=loader, autoescape=True)
template = env.get_template('Vagrantfile.j2')
vagrantfile = template.render(machines=vagrant_machines,
provider_conf=self.provider_conf)
vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile")
with open(vagrantfile_path, 'w') as f:
f.write(vagrantfile)
# Build env for Vagrant with a copy of env variables (needed by
# subprocess opened by vagrant
v_env = dict(os.environ)
v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf.backend
v = vagrant.Vagrant(root=os.getcwd(),
quiet_stdout=False,
quiet_stderr=False,
env=v_env)
if force_deploy:
v.destroy()
v.up()
v.provision()
roles = {}
for role, machines in vagrant_roles.items():
for machine in machines:
keyfile = v.keyfile(vm_name=machine['name'])
port = v.port(vm_name=machine['name'])
address = v.hostname(vm_name=machine['name'])
roles.setdefault(role, []).append(
Host(address,
alias=machine['name'],
user=self.provider_conf.user,
port=port,
keyfile=keyfile))
networks = [{
'cidr': str(n["cidr"]),
'start': str(n["netpool"][0]),
'end': str(n["netpool"][-1]),
'dns': '8.8.8.8',
'gateway': str(n["gateway"]),
'roles': n["roles"]
} for n in _networks]
logger.debug(roles)
logger.debug(networks)
return (roles, networks)
| 713,010
|
Saves one environment.
Args:
env (dict): the env dict to save.
|
def _save_env(env):
env_path = os.path.join(env["resultdir"], "env")
if os.path.isdir(env["resultdir"]):
with open(env_path, "w") as f:
yaml.dump(env, f)
| 713,372
|
Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`.
Args:
mass_fractions (dict): mass fraction :class:`dict`.
The composition is specified by a dictionary.
The keys are atomic numbers and the values weight fractions.
No wildcard are accepted.
|
def convert_mass_to_atomic_fractions(mass_fractions):
atomic_fractions = {}
for z, mass_fraction in mass_fractions.items():
atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z)
total_fraction = sum(atomic_fractions.values())
for z, fraction in atomic_fractions.items():
try:
atomic_fractions[z] = fraction / total_fraction
except ZeroDivisionError:
atomic_fractions[z] = 0.0
return atomic_fractions
| 713,915
|
Converts an atomic fraction :class:`dict` to a mass fraction :class:`dict`.
Args:
atomic_fractions (dict): atomic fraction :class:`dict`.
The composition is specified by a dictionary.
The keys are atomic numbers and the values atomic fractions.
No wildcard are accepted.
|
def convert_atomic_to_mass_fractions(atomic_fractions):
# Calculate total atomic mass
atomic_masses = {}
total_atomic_mass = 0.0
for z, atomic_fraction in atomic_fractions.items():
atomic_mass = pyxray.element_atomic_weight(z)
atomic_masses[z] = atomic_mass
total_atomic_mass += atomic_fraction * atomic_mass
# Create mass fraction
mass_fractions = {}
for z, atomic_fraction in atomic_fractions.items():
mass_fractions[z] = atomic_fraction * atomic_masses[z] / total_atomic_mass
return mass_fractions
| 713,916
|
Converts a chemical formula to an atomic fraction :class:`dict`.
Args:
formula (str): chemical formula, like Al2O3. No wildcard are accepted.
|
def convert_formula_to_atomic_fractions(formula):
mole_fractions = {}
total_mole_fraction = 0.0
for match in CHEMICAL_FORMULA_PATTERN.finditer(formula):
symbol, mole_fraction = match.groups()
z = pyxray.element_atomic_number(symbol.strip())
if mole_fraction == '':
mole_fraction = 1.0
mole_fraction = float(mole_fraction)
mole_fraction = float(mole_fraction)
mole_fractions[z] = mole_fraction
total_mole_fraction += mole_fraction
# Calculate atomic fractions
atomic_fractions = {}
for z, mole_fraction in mole_fractions.items():
atomic_fractions[z] = mole_fraction / total_mole_fraction
return atomic_fractions
| 713,917
|
Creates a pure composition.
Args:
z (int): atomic number
|
def from_pure(cls, z):
return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
| 713,920
|
Creates a composition from a mass fraction :class:`dict`.
Args:
mass_fractions (dict): mass fraction :class:`dict`.
The keys are atomic numbers and the values weight fractions.
Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron
will get a mass fraction of 0.6.
formula (str): optional chemical formula for the composition.
If ``None``, a formula will be generated for the composition.
|
def from_mass_fractions(cls, mass_fractions, formula=None):
mass_fractions = process_wildcard(mass_fractions)
atomic_fractions = convert_mass_to_atomic_fractions(mass_fractions)
if not formula:
formula = generate_name(atomic_fractions)
return cls(cls._key, mass_fractions, atomic_fractions, formula)
| 713,921
|
Transform the roles to use enoslib.host.Host hosts.
Args:
roles (dict): roles returned by
:py:func:`enoslib.infra.provider.Provider.init`
|
def _to_enos_roles(roles):
def to_host(h):
extra = {}
# create extra_vars for the nics
# network_role = ethX
for nic, roles in h["nics"]:
for role in roles:
extra[role] = nic
return Host(h["host"], user="root", extra=extra)
enos_roles = {}
for role, hosts in roles.items():
enos_roles[role] = [to_host(h) for h in hosts]
logger.debug(enos_roles)
return enos_roles
| 713,955
|
Transform the networks returned by deploy5k.
Args:
networks (dict): networks returned by
:py:func:`enoslib.infra.provider.Provider.init`
|
def _to_enos_networks(networks):
nets = []
for roles, network in networks:
nets.append(network.to_enos(roles))
logger.debug(nets)
return nets
| 713,956
|
Reserve and deploys the nodes according to the resources section
In comparison to the vagrant provider, networks must be characterized
as in the networks key.
Args:
force_deploy (bool): True iff the environment must be redeployed
Raises:
MissingNetworkError: If one network is missing in comparison to
what is claimed.
NotEnoughNodesError: If the `min` constraints can't be met.
|
def init(self, force_deploy=False, client=None):
_force_deploy = self.provider_conf.force_deploy
self.provider_conf.force_deploy = _force_deploy or force_deploy
self._provider_conf = self.provider_conf.to_dict()
r = api.Resources(self._provider_conf, client=client)
r.launch()
roles = r.get_roles()
networks = r.get_networks()
return (_to_enos_roles(roles),
_to_enos_networks(networks))
| 713,957
|
Reset the network constraints (latency, bandwidth ...)
Remove any filter that have been applied to shape the traffic.
Args:
roles (dict): role->hosts mapping as returned by
:py:meth:`enoslib.infra.provider.Provider.init`
inventory (str): path to the inventory
|
def reset_network(roles, extra_vars=None):
logger.debug('Reset the constraints')
if not extra_vars:
extra_vars = {}
tmpdir = os.path.join(os.getcwd(), TMP_DIRNAME)
_check_tmpdir(tmpdir)
utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml')
options = {'enos_action': 'tc_reset',
'tc_output_dir': tmpdir}
options.update(extra_vars)
run_ansible([utils_playbook], roles=roles, extra_vars=options)
| 714,124
|
Wait for all the machines to be ssh-reachable
Let ansible initiates a communication and retries if needed.
Args:
inventory (string): path to the inventoy file to test
retries (int): Number of time we'll be retrying an SSH connection
interval (int): Interval to wait in seconds between two retries
|
def wait_ssh(roles, retries=100, interval=30):
utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml')
options = {'enos_action': 'ping'}
for i in range(0, retries):
try:
run_ansible([utils_playbook],
roles=roles,
extra_vars=options,
on_error_continue=False)
break
except EnosUnreachableHostsError as e:
logger.info("Hosts unreachable: %s " % e.hosts)
logger.info("Retrying... %s/%s" % (i + 1, retries))
time.sleep(interval)
else:
raise EnosSSHNotReady('Maximum retries reached')
| 714,125
|
Expand group names.
Args:
grp (string): group names to expand
Returns:
list of groups
Examples:
* grp[1-3] will be expanded to [grp1, grp2, grp3]
* grp1 will be expanded to [grp1]
|
def expand_groups(grp):
p = re.compile(r"(?P<name>.+)\[(?P<start>\d+)-(?P<end>\d+)\]")
m = p.match(grp)
if m is not None:
s = int(m.group('start'))
e = int(m.group('end'))
n = m.group('name')
return list(map(lambda x: n + str(x), range(s, e + 1)))
else:
return [grp]
| 714,126
|
Make a function compatible with xarray.DataArray.
This function is intended to be used as a decorator like::
>>> @dc.xarrayfunc
>>> def func(array):
... # do something
... return newarray
>>>
>>> result = func(array)
Args:
func (function): Function to be wrapped. The first argument
of the function must be an array to be processed.
Returns:
wrapper (function): Wrapped function.
|
def xarrayfunc(func):
@wraps(func)
def wrapper(*args, **kwargs):
if any(isinstance(arg, xr.DataArray) for arg in args):
newargs = []
for arg in args:
if isinstance(arg, xr.DataArray):
newargs.append(arg.values)
else:
newargs.append(arg)
return dc.full_like(args[0], func(*newargs, **kwargs))
else:
return func(*args, **kwargs)
return wrapper
| 714,447
|
Calculate power spectrum density of data.
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
ax (matplotlib.axes): Axis you want to plot on.
doplot (bool): Plot how averaging works.
overlap_half (bool): Split data to half-overlapped regions.
Returns:
vk (np.ndarray): Frequency.
psd (np.ndarray): PSD
|
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False):
logger = getLogger('decode.utils.ndarray.psd')
if overlap_half:
step = int(len(data) / (ndivide + 1))
size = step * 2
else:
step = int(len(data) / ndivide)
size = step
if bin(len(data)).count('1') != 1:
logger.warning('warning: length of data is not power of 2: {}'.format(len(data)))
size = int(len(data) / ndivide)
if bin(size).count('1') != 1.:
if overlap_half:
logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size))
else:
logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size))
psd = np.zeros(size)
T = (size - 1) * dt
vs = 1 / dt
vk_ = fftfreq(size, dt)
vk = vk_[np.where(vk_ >= 0)]
for i in range(ndivide):
d = data[i * step:i * step + size]
if window is None:
w = np.ones(size)
corr = 1.0
else:
w = window(size)
corr = np.mean(w**2)
psd = psd + 2 * (np.abs(fft(d * w)))**2 / size * dt / corr
return vk, psd[:len(vk)] / ndivide
| 714,595
|
Calculate Allan variance.
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
tmax (float): Maximum time.
Returns:
vk (np.ndarray): Frequency.
allanvar (np.ndarray): Allan variance.
|
def allan_variance(data, dt, tmax=10):
allanvar = []
nmax = len(data) if len(data) < tmax / dt else int(tmax / dt)
for i in range(1, nmax+1):
databis = data[len(data) % i:]
y = databis.reshape(len(data)//i, i).mean(axis=1)
allanvar.append(((y[1:] - y[:-1])**2).mean() / 2)
return dt * np.arange(1, nmax+1), np.array(allanvar)
| 714,596
|
Covert a decode cube to a decode array.
Args:
cube (decode.cube): Decode cube to be cast.
template (decode.array): Decode array whose shape the cube is cast on.
Returns:
decode array (decode.array): Decode array.
Notes:
This functions is under development.
|
def fromcube(cube, template):
array = dc.zeros_like(template)
y, x = array.y.values, array.x.values
gy, gx = cube.y.values, cube.x.values
iy = interp1d(gy, np.arange(len(gy)))(y)
ix = interp1d(gx, np.arange(len(gx)))(x)
for ch in range(len(cube.ch)):
array[:,ch] = map_coordinates(cube.values[:,:,ch], (ix, iy))
return array
| 714,652
|
Make a continuum array.
Args:
cube (decode.cube): Decode cube which will be averaged over channels.
kwargs (optional): Other arguments.
inchs (list): Included channel kidids.
exchs (list): Excluded channel kidids.
Returns:
decode cube (decode.cube): Decode cube (2d).
|
def makecontinuum(cube, **kwargs):
### pick up kwargs
inchs = kwargs.pop('inchs', None)
exchs = kwargs.pop('exchs', None)
if (inchs is not None) or (exchs is not None):
raise KeyError('Inchs and exchs are no longer supported. Use weight instead.')
# if inchs is not None:
# logger.info('inchs')
# logger.info('{}'.format(inchs))
# subcube = cube[:, :, inchs]
# else:
# mask = np.full(len(cube.ch), True)
# if exchs is not None:
# logger.info('exchs')
# logger.info('{}'.format(exchs))
# mask[exchs] = False
# subcube = cube[:, :, mask]
if weight is None:
weight = 1.
# else:
# cont = (subcube * (1 / subcube.noise**2)).sum(dim='ch') / (1 / subcube.noise**2).sum(dim='ch')
# cont = cont.expand_dims(dim='ch', axis=2)
cont = (cube * (1 / weight**2)).sum(dim='ch') / (1 / weight**2).sum(dim='ch')
### define coordinates
xcoords = {'x': cube.x.values}
ycoords = {'y': cube.y.values}
chcoords = {'masterid': np.array([0]), # np.array([int(subcube.masterid.mean(dim='ch'))]),
'kidid': np.array([0]), # np.array([int(subcube.kidid.mean(dim='ch'))]),
'kidfq': np.array([0]), # np.array([float(subcube.kidfq.mean(dim='ch'))]),
'kidtp': np.array([1])} # np.array([1])}
scalarcoords = {'coordsys': cube.coordsys.values, 'datatype': cube.datatype.values,
'xref': cube.xref.values, 'yref': cube.yref.values}
return dc.cube(cont.values, xcoords=xcoords, ycoords=ycoords, chcoords=chcoords,
scalarcoords=scalarcoords)
| 714,654
|
u"""Will ask a question and keeps prompting until
answered.
Args:
question (str): Question to ask end user
default (str): Default answer if user just press enter at prompt
answer (str): Used for testing
Returns:
(bool) Meaning:
True - Answer is yes
False - Answer is no
|
def ask_yes_no(question, default='no', answer=None):
u
default = default.lower()
yes = [u'yes', u'ye', u'y']
no = [u'no', u'n']
if default in no:
help_ = u'[N/y]?'
default = False
else:
default = True
help_ = u'[Y/n]?'
while 1:
display = question + '\n' + help_
if answer is None:
log.debug(u'Under None')
answer = six.moves.input(display)
answer = answer.lower()
if answer == u'':
log.debug(u'Under blank')
return default
if answer in yes:
log.debug(u'Must be true')
return True
elif answer in no:
log.debug(u'Must be false')
return False
else:
sys.stdout.write(u'Please answer yes or no only!\n\n')
sys.stdout.flush()
answer = None
six.moves.input(u'Press enter to continue')
sys.stdout.write('\n\n\n\n\n')
sys.stdout.flush()
| 714,682
|
u"""Ask user a question and confirm answer
Args:
question (str): Question to ask user
default (str): Default answer if no input from user
required (str): Require user to input answer
answer (str): Used for testing
is_answer_correct (str): Used for testing
|
def get_correct_answer(question, default=None, required=False,
answer=None, is_answer_correct=None):
u
while 1:
if default is None:
msg = u' - No Default Available'
else:
msg = (u'\n[DEFAULT] -> {}\nPress Enter To '
u'Use Default'.format(default))
prompt = question + msg + u'\n--> '
if answer is None:
answer = six.moves.input(prompt)
if answer == '' and required and default is not None:
print(u'You have to enter a value\n\n')
six.moves.input(u'Press enter to continue')
print(u'\n\n')
answer = None
continue
if answer == u'' and default is not None:
answer = default
_ans = ask_yes_no(u'You entered {}, is this '
u'correct?'.format(answer),
answer=is_answer_correct)
if _ans:
return answer
else:
answer = None
| 714,683
|
Provides hash of given filename.
Args:
filename (str): Name of file to hash
Returns:
(str): sha256 hash
|
def get_package_hashes(filename):
log.debug('Getting package hashes')
filename = os.path.abspath(filename)
with open(filename, 'rb') as f:
data = f.read()
_hash = hashlib.sha256(data).hexdigest()
log.debug('Hash for file %s: %s', filename, _hash)
return _hash
| 714,931
|
Save a cube to a 3D-cube FITS file.
Args:
cube (xarray.DataArray): Cube to be saved.
fitsname (str): Name of output FITS file.
kwargs (optional): Other arguments common with astropy.io.fits.writeto().
|
def savefits(cube, fitsname, **kwargs):
### pick up kwargs
dropdeg = kwargs.pop('dropdeg', False)
ndim = len(cube.dims)
### load yaml
FITSINFO = get_data('decode', 'data/fitsinfo.yaml')
hdrdata = yaml.load(FITSINFO, dc.utils.OrderedLoader)
### default header
if ndim == 2:
header = fits.Header(hdrdata['dcube_2d'])
data = cube.values.T
elif ndim == 3:
if dropdeg:
header = fits.Header(hdrdata['dcube_2d'])
data = cube.values[:, :, 0].T
else:
header = fits.Header(hdrdata['dcube_3d'])
kidfq = cube.kidfq.values
freqrange = ~np.isnan(kidfq)
orderedfq = np.argsort(kidfq[freqrange])
newcube = cube[:, :, orderedfq]
data = newcube.values.T
else:
raise TypeError(ndim)
### update Header
if cube.coordsys == 'AZEL':
header.update({'CTYPE1': 'dAZ', 'CTYPE2': 'dEL'})
elif cube.coordsys == 'RADEC':
header.update({'OBSRA': float(cube.xref), 'OBSDEC': float(cube.yref)})
else:
pass
header.update({'CRVAL1': float(cube.x[0]),
'CDELT1': float(cube.x[1] - cube.x[0]),
'CRVAL2': float(cube.y[0]),
'CDELT2': float(cube.y[1] - cube.y[0]),
'DATE': datetime.now(timezone('UTC')).isoformat()})
if (ndim == 3) and (not dropdeg):
header.update({'CRVAL3': float(newcube.kidfq[0]),
'CDELT3': float(newcube.kidfq[1] - newcube.kidfq[0])})
fitsname = str(Path(fitsname).expanduser())
fits.writeto(fitsname, data, header, **kwargs)
logger.info('{} has been created.'.format(fitsname))
| 714,961
|
Load a dataarray from a NetCDF file.
Args:
filename (str): Filename (*.nc).
copy (bool): If True, dataarray is copied in memory. Default is True.
Returns:
dataarray (xarray.DataArray): Loaded dataarray.
|
def loadnetcdf(filename, copy=True):
filename = str(Path(filename).expanduser())
if copy:
dataarray = xr.open_dataarray(filename).copy()
else:
dataarray = xr.open_dataarray(filename, chunks={})
if dataarray.name is None:
dataarray.name = filename.rstrip('.nc')
for key, val in dataarray.coords.items():
if val.dtype.kind == 'S':
dataarray[key] = val.astype('U')
elif val.dtype == np.int32:
dataarray[key] = val.astype('i8')
return dataarray
| 714,962
|
Save a dataarray to a NetCDF file.
Args:
dataarray (xarray.DataArray): Dataarray to be saved.
filename (str): Filename (used as <filename>.nc).
If not spacified, random 8-character name will be used.
|
def savenetcdf(dataarray, filename=None):
if filename is None:
if dataarray.name is not None:
filename = dataarray.name
else:
filename = uuid4().hex[:8]
else:
filename = str(Path(filename).expanduser())
if not filename.endswith('.nc'):
filename += '.nc'
dataarray.to_netcdf(filename)
logger.info('{} has been created.'.format(filename))
| 714,963
|
Create an array of given shape and type, filled with zeros.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array filled with zeros.
|
def zeros(shape, dtype=None, **kwargs):
data = np.zeros(shape, dtype)
return dc.array(data, **kwargs)
| 714,988
|
Create an array of given shape and type, filled with ones.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array filled with ones.
|
def ones(shape, dtype=None, **kwargs):
data = np.ones(shape, dtype)
return dc.array(data, **kwargs)
| 714,989
|
Create an array of given shape and type, filled with `fill_value`.
Args:
shape (sequence of ints): 2D shape of the array.
fill_value (scalar or numpy.ndarray): Fill value or array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array filled with `fill_value`.
|
def full(shape, fill_value, dtype=None, **kwargs):
return (dc.zeros(shape, **kwargs) + fill_value).astype(dtype)
| 714,990
|
Create an array of given shape and type, without initializing entries.
Args:
shape (sequence of ints): 2D shape of the array.
dtype (data-type, optional): Desired data-type for the array.
kwargs (optional): Other arguments of the array (*coords, attrs, and name).
Returns:
array (decode.array): Decode array without initializing entries.
|
def empty(shape, dtype=None, **kwargs):
data = np.empty(shape, dtype)
return dc.array(data, **kwargs)
| 714,991
|
Plot coordinates related to the time axis.
Args:
array (xarray.DataArray): Array which the coodinate information is included.
coords (list): Name of x axis and y axis.
scantypes (list): Scantypes. If None, all scantypes are used.
ax (matplotlib.axes): Axis you want to plot on.
kwargs (optional): Plot options passed to ax.plot().
|
def plot_tcoords(array, coords, scantypes=None, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if scantypes is None:
ax.plot(array[coords[0]], array[coords[1]], label='ALL', **kwargs)
else:
for scantype in scantypes:
ax.plot(array[coords[0]][array.scantype == scantype],
array[coords[1]][array.scantype == scantype], label=scantype, **kwargs)
ax.set_xlabel(coords[0])
ax.set_ylabel(coords[1])
ax.set_title('{} vs {}'.format(coords[1], coords[0]))
ax.legend()
logger.info('{} vs {} has been plotted.'.format(coords[1], coords[0]))
| 715,097
|
Plot timestream data.
Args:
array (xarray.DataArray): Array which the timestream data are included.
kidid (int): Kidid.
xtick (str): Type of x axis.
'time': Time.
'index': Time index.
scantypes (list): Scantypes. If None, all scantypes are used.
ax (matplotlib.axes): Axis you want to plot on.
kwargs (optional): Plot options passed to ax.plot().
|
def plot_timestream(array, kidid, xtick='time', scantypes=None, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
index = np.where(array.kidid == kidid)[0]
if len(index) == 0:
raise KeyError('Such a kidid does not exist.')
index = int(index)
if scantypes is None:
if xtick == 'time':
ax.plot(array.time, array[:, index], label='ALL', **kwargs)
elif xtick == 'index':
ax.plot(np.ogrid[:len(array.time)], array[:, index], label='ALL', **kwargs)
else:
for scantype in scantypes:
if xtick == 'time':
ax.plot(array.time[array.scantype == scantype],
array[:, index][array.scantype == scantype], label=scantype, **kwargs)
elif xtick == 'index':
ax.plot(np.ogrid[:len(array.time[array.scantype == scantype])],
array[:, index][array.scantype == scantype], label=scantype, **kwargs)
ax.set_xlabel('{}'.format(xtick))
ax.set_ylabel(str(array.datatype.values))
ax.legend()
kidtpdict = {0: 'wideband', 1: 'filter', 2: 'blind'}
try:
kidtp = kidtpdict[int(array.kidtp[index])]
except KeyError:
kidtp = 'filter'
ax.set_title('ch #{} ({})'.format(kidid, kidtp))
logger.info('timestream data (ch={}) has been plotted.'.format(kidid))
| 715,098
|
Plot an intensity map.
Args:
cube (xarray.DataArray): Cube which the spectrum information is included.
kidid (int): Kidid.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.imshow().
|
def plot_chmap(cube, kidid, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
index = np.where(cube.kidid == kidid)[0]
if len(index) == 0:
raise KeyError('Such a kidid does not exist.')
index = int(index)
im = ax.pcolormesh(cube.x, cube.y, cube[:, :, index].T, **kwargs)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title('intensity map ch #{}'.format(kidid))
return im
| 715,100
|
Plot PSD (Power Spectral Density).
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
overlap_half (bool): Split data to half-overlapped regions.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
|
def plotpsd(data, dt, ndivide=1, window=hanning, overlap_half=False, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
vk, psddata = psd(data, dt, ndivide, window, overlap_half)
ax.loglog(vk, psddata, **kwargs)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('PSD')
ax.legend()
| 715,101
|
Plot Allan variance.
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
tmax (float): Maximum time.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
|
def plotallanvar(data, dt, tmax=10, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
tk, allanvar = allan_variance(data, dt, tmax)
ax.loglog(tk, allanvar, **kwargs)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Allan Variance')
ax.legend()
| 715,102
|
Returns parent directory of mac .app
Args:
directory (str): Current directory
Returns:
(str): Parent directory of mac .app
|
def get_mac_dot_app_dir(directory):
return os.path.dirname(os.path.dirname(os.path.dirname(directory)))
| 715,166
|
Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
|
def copy_function(func, name=None):
code = func.__code__
newname = name or func.__name__
newcode = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
newname,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
newfunc = FunctionType(
newcode,
func.__globals__,
newname,
func.__defaults__,
func.__closure__,
)
newfunc.__dict__.update(func.__dict__)
return newfunc
| 715,176
|
Open youtube.
Args:
keyword (optional): Search word.
|
def youtube(keyword=None):
if keyword is None:
web.open('https://www.youtube.com/watch?v=L_mBVT2jBFw')
else:
web.open(quote('https://www.youtube.com/results?search_query={}'.format(keyword), RESERVED))
| 715,268
|
Parse the output analysis files from MIP for adding info
to trend database
Args:
mip_config_raw (dict): raw YAML input from MIP analysis config file
qcmetrics_raw (dict): raw YAML input from MIP analysis qc metric file
sampleinfo_raw (dict): raw YAML input from MIP analysis qc sample info file
Returns:
dict: parsed data
|
def parse_mip_analysis(mip_config_raw: dict, qcmetrics_raw: dict, sampleinfo_raw: dict) -> dict:
outdata = _define_output_dict()
_config(mip_config_raw, outdata)
_qc_metrics(outdata, qcmetrics_raw)
_qc_sample_info(outdata, sampleinfo_raw)
return outdata
| 715,598
|
Parse MIP config file.
Args:
data (dict): raw YAML input from MIP analysis config file
Returns:
dict: parsed data
|
def parse_config(data: dict) -> dict:
return {
'email': data.get('email'),
'family': data['family_id'],
'samples': [{
'id': sample_id,
'type': analysis_type,
} for sample_id, analysis_type in data['analysis_type'].items()],
'config_path': data['config_file_analysis'],
'is_dryrun': True if 'dry_run_all' in data else False,
'log_path': data['log_file'],
'out_dir': data['outdata_dir'],
'priority': data['slurm_quality_of_service'],
'sampleinfo_path': data['sample_info_file'],
}
| 716,281
|
Parse MIP sample info file.
Args:
data (dict): raw YAML input from MIP qc sample info file
Returns:
dict: parsed data
|
def parse_sampleinfo(data: dict) -> dict:
genome_build = data['human_genome_build']
genome_build_str = f"{genome_build['source']}{genome_build['version']}"
if 'svdb' in data['program']:
svdb_outpath = (f"{data['program']['svdb']['path']}")
else:
svdb_outpath = ''
outdata = {
'date': data['analysis_date'],
'family': data['family'],
'genome_build': genome_build_str,
'rank_model_version': data['program']['genmod']['rank_model']['version'],
'is_finished': True if data['analysisrunstatus'] == 'finished' else False,
'pedigree_path': data['pedigree_minimal'],
'peddy': {
'ped': (data['program']['peddy']['peddy']['path'] if
'peddy' in data['program'] else None),
'ped_check': (data['program']['peddy']['ped_check']['path'] if
'peddy' in data['program'] else None),
'sex_check': (data['program']['peddy']['sex_check']['path'] if
'peddy' in data['program'] else None),
},
'qcmetrics_path': data['program']['qccollect']['path'],
'samples': [],
'snv': {
'bcf': data['most_complete_bcf']['path'],
'clinical_vcf': data['vcf_binary_file']['clinical']['path'],
'gbcf': data['gbcf_file']['path'],
'research_vcf': data['vcf_binary_file']['research']['path'],
},
'svdb_outpath': svdb_outpath,
'sv': {
'bcf': data.get('sv_bcf_file', {}).get('path'),
'clinical_vcf': (data['sv_vcf_binary_file']['clinical']['path'] if
'sv_vcf_binary_file' in data else None),
'merged': svdb_outpath,
'research_vcf': (data['sv_vcf_binary_file']['research']['path'] if
'sv_vcf_binary_file' in data else None),
},
'version': data['mip_version'],
}
for sample_id, sample_data in data['sample'].items():
sample = {
'id': sample_id,
'bam': sample_data['most_complete_bam']['path'],
'sambamba': list(sample_data['program']['sambamba_depth'].values())[0]['path'],
'sex': sample_data['sex'],
# subsample mt is only for wgs data
'subsample_mt': (list(sample_data['program']['samtools_subsample_mt'].values())[0]['path'] if
'samtools_subsample_mt' in sample_data['program'] else None),
'vcf2cytosure': list(sample_data['program']['vcf2cytosure'].values())[0]['path'],
}
chanjo_sexcheck = list(sample_data['program']['chanjo_sexcheck'].values())[0]
sample['chanjo_sexcheck'] = chanjo_sexcheck['path']
outdata['samples'].append(sample)
return outdata
| 716,282
|
Parse MIP qc metrics file.
Args:
metrics (dict): raw YAML input from MIP qc metrics file
Returns:
dict: parsed data
|
def parse_qcmetrics(metrics: dict) -> dict:
data = {
'versions': {
'freebayes': metrics['program']['freebayes']['version'],
'gatk': metrics['program']['gatk']['version'],
'manta': metrics['program'].get('manta', {}).get('version'),
'bcftools': metrics['program']['bcftools']['version'],
'vep': metrics['program']['varianteffectpredictor']['version'],
},
'samples': [],
}
plink_samples = {}
plink_sexcheck = metrics['program'].get('plink_sexcheck', {}).get('sample_sexcheck')
if isinstance(plink_sexcheck, str):
sample_id, sex_number = plink_sexcheck.strip().split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
elif isinstance(plink_sexcheck, list):
for sample_raw in plink_sexcheck:
sample_id, sex_number = sample_raw.split(':', 1)
plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number))
for sample_id, sample_metrics in metrics['sample'].items():
## Bam stats metrics
bam_stats = [values['bamstats'] for key, values in sample_metrics.items()
if key[:-1].endswith('.lane')]
total_reads = sum(int(bam_stat['raw_total_sequences']) for bam_stat in bam_stats)
total_mapped = sum(int(bam_stat['reads_mapped']) for bam_stat in bam_stats)
## Picard metrics
main_key = [key for key in sample_metrics.keys() if '_lanes_' in key][0]
hs_metrics = sample_metrics[main_key]['collecthsmetrics']['header']['data']
multiple_inst_metrics = sample_metrics[main_key]['collectmultiplemetricsinsertsize']['header']['data']
multiple_metrics = sample_metrics[main_key]['collectmultiplemetrics']['header']['pair']
sample_data = {
'at_dropout': hs_metrics['AT_DROPOUT'],
'completeness_target': {
10: hs_metrics['PCT_TARGET_BASES_10X'],
20: hs_metrics['PCT_TARGET_BASES_20X'],
50: hs_metrics['PCT_TARGET_BASES_50X'],
100: hs_metrics['PCT_TARGET_BASES_100X'],
},
'duplicates': float(sample_metrics[main_key]['markduplicates']['fraction_duplicates']),
'gc_dropout': hs_metrics['GC_DROPOUT'],
'id': sample_id,
'median_insert_size': multiple_inst_metrics['MEDIAN_INSERT_SIZE'],
'mapped': total_mapped / total_reads,
'plink_sex': plink_samples.get(sample_id),
'predicted_sex': sample_metrics[main_key]['chanjo_sexcheck']['gender'],
'reads': total_reads,
'insert_size_standard_deviation': float(multiple_inst_metrics['STANDARD_DEVIATION']),
'strand_balance': float(multiple_metrics['STRAND_BALANCE']),
'target_coverage': float(hs_metrics['MEAN_TARGET_COVERAGE']),
}
data['samples'].append(sample_data)
return data
| 716,283
|
Start a task.
This function depends on the underlying implementation of _start, which
any subclass of ``Task`` should implement.
Args:
wait (bool): Whether or not to wait on the task to finish before
returning from this function. Default `False`.
Raises:
RuntimeError: If the task has already been started without a
subsequent call to ``reset()``.
|
def start(self, wait=False):
if self._status is not TaskStatus.IDLE:
raise RuntimeError("Cannot start %s in state %s" %
(self, self._status))
self._status = TaskStatus.STARTED
STARTED_TASKS.add(self)
self._start()
if wait:
self.wait()
return self.return_values
| 716,502
|
Send a file to a remote host with rsync.
Args:
file_name (str): The relative location of the file on the local
host.
remote_destination (str): The destination for the file on the remote
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
|
def send_file(self, file_name, remote_destination=None, **kwargs):
if not remote_destination:
remote_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', file_name, '%s:%s' % (self.hostname, remote_destination)],
**kwargs)
| 716,688
|
Get a file from a remote host with rsync.
Args:
file_name (str): The relative location of the file on the remote
host.
local_destination (str): The destination for the file on the local
host. If `None`, will be assumed to be the same as
**file_name**. Default `None`.
**kwargs: Passed to ``SubprocessTask``'s init method.
Return:
``pyrem.task.SubprocessTask``: The resulting task.
|
def get_file(self, file_name, local_destination=None, **kwargs):
if not local_destination:
local_destination = file_name
return SubprocessTask(
self._rsync_cmd() +
['-ut', '%s:%s' % (self.hostname, file_name), local_destination],
**kwargs)
| 716,689
|
Serialize a migration session state to yaml using nicer formatting
Args:
raw: object to serialize
Returns: string (of yaml)
Specifically, this forces the "output" member of state step dicts (e.g.
state[0]['output']) to use block formatting. For example, rather than this:
- migration: [app, migration_name]
output: "line 1\nline2\nline3"
You get this:
- migration: [app, migration_name]
output: |
line 1
line 2
line 3
|
def dump_migration_session_state(raw):
class BlockStyle(str): pass
class SessionDumper(yaml.SafeDumper): pass
def str_block_formatter(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')
SessionDumper.add_representer(BlockStyle, str_block_formatter)
raw = deepcopy(raw)
for step in raw:
step['output'] = BlockStyle(step['output'])
step['traceback'] = BlockStyle(step['traceback'])
return yaml.dump(raw, Dumper=SessionDumper)
| 716,977
|
Add migrations to be applied.
Args:
migrations: a list of migrations to add of the form [(app, migration_name), ...]
Raises:
MigrationSessionError if called on a closed MigrationSession
|
def add_migrations(self, migrations):
if self.__closed:
raise MigrationSessionError("Can't change applied session")
self._to_apply.extend(migrations)
| 716,979
|
Create a repr from an instance of a class
Args:
inst: The class instance we are generating a repr of
attrs: The attributes that should appear in the repr
|
def make_repr(inst, attrs):
# type: (object, Sequence[str]) -> str
arg_str = ", ".join(
"%s=%r" % (a, getattr(inst, a)) for a in attrs if hasattr(inst, a))
repr_str = "%s(%s)" % (inst.__class__.__name__, arg_str)
return repr_str
| 717,445
|
Annotate a type with run-time accessible metadata
Args:
description: A one-line description for the argument
typ: The type of the Anno, can also be set via context manager
name: The name of the Anno, can also be set via context manager
|
def __init__(self, description, typ=None, name=None, default=NO_DEFAULT):
# type: (str, Any, str, Any) -> None
self._names_on_enter = None # type: Optional[Set[str]]
self.default = default # type: Any
self.typ = typ # type: Any
self.name = name # type: Optional[str]
self.is_array = None # type: Optional[bool]
self.is_mapping = None # type: Optional[bool]
self.description = description
| 717,446
|
Make a call_types dictionary that describes what arguments to pass to f
Args:
f: The function to inspect for argument names (without self)
globals_d: A dictionary of globals to lookup annotation definitions in
|
def make_call_types(f, globals_d):
# type: (Callable, Dict) -> Tuple[Dict[str, Anno], Anno]
arg_spec = getargspec(f)
args = [k for k in arg_spec.args if k != "self"]
defaults = {} # type: Dict[str, Any]
if arg_spec.defaults:
default_args = args[-len(arg_spec.defaults):]
for a, default in zip(default_args, arg_spec.defaults):
defaults[a] = default
if not getattr(f, "__annotations__", None):
# Make string annotations from the type comment if there is one
annotations = make_annotations(f, globals_d)
else:
annotations = f.__annotations__
call_types = OrderedDict() # type: Dict[str, Anno]
for a in args:
anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT))
assert isinstance(anno, Anno), \
"Argument %r has type %r which is not an Anno" % (a, anno)
call_types[a] = anno
return_type = anno_with_default(annotations.get("return", None))
if return_type is Any:
return_type = Anno("Any return value", Any, "return")
assert return_type is None or isinstance(return_type, Anno), \
"Return has type %r which is not an Anno" % (return_type,)
return call_types, return_type
| 717,477
|
Create an annotations dictionary from Python2 type comments
http://mypy.readthedocs.io/en/latest/python2.html
Args:
f: The function to examine for type comments
globals_d: The globals dictionary to get type idents from. If not
specified then make the annotations dict contain strings rather
than the looked up objects
|
def make_annotations(f, globals_d=None):
# type: (Callable, Dict) -> Dict[str, Any]
locals_d = {} # type: Dict[str, Any]
if globals_d is None:
# If not given a globals_d then we should just populate annotations with
# the strings in the type comment.
globals_d = {}
# The current approach is to use eval, which means manufacturing a
# dict like object that will just echo the string back to you. This
# has a number of complexities for somthing like numpy.number or
# Callable[..., int], which are handled in EchoStr above, so it might be
# better off as an ast.parse in the future...
locals_d = EchoDict()
lines, _ = inspect.getsourcelines(f)
arg_spec = getargspec(f)
args = list(arg_spec.args)
if arg_spec.varargs is not None:
args.append(arg_spec.varargs)
if arg_spec.keywords is not None:
args.append(arg_spec.keywords)
it = iter(lines)
types = [] # type: List
found = None
for token in tokenize.generate_tokens(lambda: next(it)):
typ, string, start, end, line = token
if typ == tokenize.COMMENT:
found = type_re.match(string)
if found:
parts = found.groups()
# (...) is used to represent all the args so far
if parts[0] != "(...)":
expr = parts[0].replace("*", "")
try:
ob = eval(expr, globals_d, locals_d)
except Exception as e:
raise ValueError(
"Error evaluating %r: %s" % (expr, e))
if isinstance(ob, tuple):
# We got more than one argument
types += list(ob)
else:
# We got a single argument
types.append(ob)
if parts[1]:
# Got a return, done
try:
ob = eval(parts[2], globals_d, locals_d)
except Exception as e:
raise ValueError(
"Error evaluating %r: %s" % (parts[2], e))
if args and args[0] in ["self", "cls"]:
# Allow the first argument to be inferred
if len(args) == len(types) + 1:
args = args[1:]
assert len(args) == len(types), \
"Args %r Types %r length mismatch" % (args, types)
ret = dict(zip(args, types))
ret["return"] = ob
return ret
if found:
# If we have ever found a type comment, but not the return value, error
raise ValueError("Got to the end of the function without seeing ->")
return {}
| 717,478
|
Convert the confusion matrix to the Matthews correlation coefficient
Parameters:
-----------
cm : ndarray
2x2 confusion matrix with np.array([[tn, fp], [fn, tp]])
tn, fp, fn, tp : float
four scalar variables
- tn : number of true negatives
- fp : number of false positives
- fn : number of false negatives
- tp : number of true positives
Return:
-------
r : float
Matthews correlation coefficient
|
def confusion_to_mcc(*args):
if len(args) is 1:
tn, fp, fn, tp = args[0].ravel().astype(float)
elif len(args) is 4:
tn, fp, fn, tp = [float(a) for a in args]
else:
raise Exception((
"Input argument is not an 2x2 matrix, "
"nor 4 elements tn, fp, fn, tp."))
return (tp * tn - fp * fn) / np.sqrt(
(tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
| 717,794
|
Initialize the service registry.
Creates the database table if it does not exist.
Args:
rr (doublethink.Rethinker): a doublethink.Rethinker, which must
have `dbname` set
|
def __init__(self, rr, table='services'):
self.rr = rr
self.table = table
self._ensure_table()
| 718,347
|
Look up healthy services in the registry.
A service is considered healthy if its 'last_heartbeat' was less than
'ttl' seconds ago
Args:
role (str, optional): role name
Returns:
If `role` is supplied, returns list of healthy services for the
given role, otherwise returns list of all healthy services. May
return an empty list.
|
def healthy_services(self, role=None):
try:
query = self.rr.table(self.table)
if role:
query = query.get_all(role, index='role')
query = query.filter(
lambda svc: r.now().sub(svc["last_heartbeat"]) < svc["ttl"] #.default(20.0)
).order_by("load")
result = query.run()
return result
except r.ReqlNonExistenceError:
return []
| 718,352
|
Get the module specified by the value of option_name. The value of the
configuration option will be used to load the module by name from the known
module list or treated as a path if not found in known_modules.
Args:
option_name: name of persistence module
known_modules: dictionary of module names and module paths,
ie: {'ndb':'furious.extras.appengine.ndb_persistence'}
Returns:
module of the module path matching the name in known_modules
|
def _get_configured_module(option_name, known_modules=None):
from furious.job_utils import path_to_reference
config = get_config()
option_value = config[option_name]
# If no known_modules were give, make it an empty dict.
if not known_modules:
known_modules = {}
module_path = known_modules.get(option_value) or option_value
return path_to_reference(module_path)
| 718,408
|
Traverse directory trees to find a furious.yaml file
Begins with the location of this file then checks the
working directory if not found
Args:
config_file: location of this file, override for
testing
Returns:
the path of furious.yaml or None if not found
|
def find_furious_yaml(config_file=__file__):
checked = set()
result = _find_furious_yaml(os.path.dirname(config_file), checked)
if not result:
result = _find_furious_yaml(os.getcwd(), checked)
return result
| 718,409
|
Traverse the directory tree identified by start
until a directory already in checked is encountered or the path
of furious.yaml is found.
Checked is present both to make the loop termination easy
to reason about and so the same directories do not get
rechecked
Args:
start: the path to start looking in and work upward from
checked: the set of already checked directories
Returns:
the path of the furious.yaml file or None if it is not found
|
def _find_furious_yaml(start, checked):
directory = start
while directory not in checked:
checked.add(directory)
for fs_yaml_name in FURIOUS_YAML_NAMES:
yaml_path = os.path.join(directory, fs_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
directory = os.path.dirname(directory)
return None
| 718,410
|
Creates a segment cost function for a time series with a
Normal distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
def normal_mean(data, variance):
if not isinstance(data, np.ndarray):
data = np.array(data)
i_variance_2 = 1 / (variance ** 2)
cmm = [0.0]
cmm.extend(np.cumsum(data))
cmm2 = [0.0]
cmm2.extend(np.cumsum(np.abs(data)))
def cost(start, end):
cmm2_diff = cmm2[end] - cmm2[start]
cmm_diff = pow(cmm[end] - cmm[start], 2)
i_diff = end - start
diff = cmm2_diff - cmm_diff
return (diff/i_diff) * i_variance_2
return cost
| 718,440
|
Creates a segment cost function for a time series with a
Normal distribution with changing variance
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
def normal_var(data, mean):
if not isinstance(data, np.ndarray):
data = np.array(data)
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs(data - mean), 2)))
def cost(s, t):
dist = float(t - s)
diff = cumm[t] - cumm[s]
return dist * np.log(diff/dist)
return cost
| 718,441
|
Creates a segment cost function for a time series with a
Normal distribution with changing mean and variance
Args:
data (:obj:`list` of float): 1D time series data
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
def normal_meanvar(data):
data = np.hstack(([0.0], np.array(data)))
cumm = np.cumsum(data)
cumm_sq = np.cumsum([val**2 for val in data])
def cost(s, t):
ts_i = 1.0 / (t-s)
mu = (cumm[t] - cumm[s]) * ts_i
sig = (cumm_sq[t] - cumm_sq[s]) * ts_i - mu**2
sig_i = 1.0 / sig
return (t-s) * np.log(sig) + (cumm_sq[t] - cumm_sq[s]) * sig_i - 2*(cumm[t] - cumm[s])*mu*sig_i + ((t-s)*mu**2)*sig_i
return cost
| 718,442
|
Creates a segment cost function for a time series with a
poisson distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
def poisson(data):
data = np.hstack(([0.0], np.array(data)))
cumm = np.cumsum(data)
def cost(s, t):
diff = cumm[t]-cumm[s]
if diff == 0:
return -2 * diff * (- np.log(t-s) - 1)
else:
return -2 * diff * (np.log(diff) - np.log(t-s) - 1)
return cost
| 718,443
|
Creates a segment cost function for a time series with a
exponential distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
|
def exponential(data):
data = np.hstack(([0.0], np.array(data)))
cumm = np.cumsum(data)
def cost(s, t):
return -1*(t-s) * (np.log(t-s) - np.log(cumm[t] - cumm[s]))
return cost
| 718,444
|
Get BEL Specification
The json file this depends on is generated by belspec_yaml2json as
part of the update_specifications function
Args:
version: e.g. 2.0.0 where the filename
|
def get_specification(version: str) -> Mapping[str, Any]:
spec_dir = config["bel"]["lang"]["specifications"]
spec_dict = {}
bel_versions = get_bel_versions()
if version not in bel_versions:
log.error("Cannot get unknown version BEL specification")
return {"error": "unknown version of BEL"}
# use this variable to find our parser file since periods aren't recommended in python module names
version_underscored = version.replace(".", "_")
json_fn = f"{spec_dir}/bel_v{version_underscored}.json"
with open(json_fn, "r") as f:
spec_dict = json.load(f)
return spec_dict
| 721,195
|
Get belspec files from Github repo
Args:
spec_dir: directory to store the BEL Specification and derived files
force: force update of BEL Specifications from Github - skipped if local files less than 1 day old
|
def github_belspec_files(spec_dir, force: bool = False):
if not force:
dtnow = datetime.datetime.utcnow()
delta = datetime.timedelta(1)
yesterday = dtnow - delta
for fn in glob.glob(f"{spec_dir}/bel*yaml"):
if datetime.datetime.fromtimestamp(os.path.getmtime(fn)) > yesterday:
log.info("Skipping BEL Specification update - specs less than 1 day old")
return
repo_url = "https://api.github.com/repos/belbio/bel_specifications/contents/specifications"
params = {}
github_access_token = os.getenv("GITHUB_ACCESS_TOKEN", "")
if github_access_token:
params = {"access_token": github_access_token}
r = requests.get(repo_url, params=params)
if r.status_code == 200:
results = r.json()
for f in results:
url = f["download_url"]
fn = os.path.basename(url)
if "yaml" not in fn and "yml" in fn:
fn = fn.replace("yml", "yaml")
r = requests.get(url, params=params, allow_redirects=True)
if r.status_code == 200:
open(f"{spec_dir}/{fn}", "wb").write(r.content)
else:
sys.exit(
f"Could not get BEL Spec file {url} from Github -- Status: {r.status_code} Msg: {r.content}"
)
else:
sys.exit(
f"Could not get BEL Spec directory listing from Github -- Status: {r.status_code} Msg: {r.content}"
)
| 721,198
|
Enhance BEL specification and save as JSON file
Load all BEL Specification YAML files and convert to JSON files
after enhancing them. Also create a bel_versions.json file with
all available BEL versions for fast loading.
Args:
yaml_fn: original YAML version of BEL Spec
json_fn: enhanced JSON version of BEL Spec
Returns:
str: version of BEL Spec
|
def belspec_yaml2json(yaml_fn: str, json_fn: str) -> str:
try:
spec_dict = yaml.load(open(yaml_fn, "r").read(), Loader=yaml.SafeLoader)
# admin-related keys
spec_dict["admin"] = {}
spec_dict["admin"]["version_underscored"] = spec_dict["version"].replace(".", "_")
spec_dict["admin"]["parser_fn"] = yaml_fn.replace(".yaml", "_parser.py")
# add relation keys list, to_short, to_long
add_relations(spec_dict)
# add function keys list, to_short, to_long
add_functions(spec_dict)
# add namespace keys list, list_short, list_long, to_short, to_long
add_namespaces(spec_dict)
enhance_function_signatures(spec_dict)
add_function_signature_help(spec_dict)
with open(json_fn, "w") as f:
json.dump(spec_dict, f)
except Exception as e:
log.error(
"Warning: BEL Specification {yaml_fn} could not be read. Cannot proceed.".format(
yaml_fn
)
)
sys.exit()
return spec_dict["version"]
| 721,199
|
Add relation keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added relation keys
|
def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["relations"]["list"] = []
spec_dict["relations"]["list_short"] = []
spec_dict["relations"]["list_long"] = []
spec_dict["relations"]["to_short"] = {}
spec_dict["relations"]["to_long"] = {}
for relation_name in spec_dict["relations"]["info"]:
abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"]
spec_dict["relations"]["list"].extend((relation_name, abbreviated_name))
spec_dict["relations"]["list_long"].append(relation_name)
spec_dict["relations"]["list_short"].append(abbreviated_name)
spec_dict["relations"]["to_short"][relation_name] = abbreviated_name
spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["relations"]["to_long"][abbreviated_name] = relation_name
spec_dict["relations"]["to_long"][relation_name] = relation_name
return spec_dict
| 721,201
|
Add function keys to spec_dict
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: bel specification dictionary with added function keys
|
def add_functions(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
# Class 'Mapping' does not define '__setitem__', so the '[]' operator cannot be used on its instances
spec_dict["functions"]["list"] = []
spec_dict["functions"]["list_long"] = []
spec_dict["functions"]["list_short"] = []
spec_dict["functions"]["primary"] = {}
spec_dict["functions"]["primary"]["list_long"] = []
spec_dict["functions"]["primary"]["list_short"] = []
spec_dict["functions"]["modifier"] = {}
spec_dict["functions"]["modifier"]["list_long"] = []
spec_dict["functions"]["modifier"]["list_short"] = []
spec_dict["functions"]["to_short"] = {}
spec_dict["functions"]["to_long"] = {}
for func_name in spec_dict["functions"]["info"]:
abbreviated_name = spec_dict["functions"]["info"][func_name]["abbreviation"]
spec_dict["functions"]["list"].extend((func_name, abbreviated_name))
spec_dict["functions"]["list_long"].append(func_name)
spec_dict["functions"]["list_short"].append(abbreviated_name)
if spec_dict["functions"]["info"][func_name]["type"] == "primary":
spec_dict["functions"]["primary"]["list_long"].append(func_name)
spec_dict["functions"]["primary"]["list_short"].append(abbreviated_name)
else:
spec_dict["functions"]["modifier"]["list_long"].append(func_name)
spec_dict["functions"]["modifier"]["list_short"].append(abbreviated_name)
spec_dict["functions"]["to_short"][abbreviated_name] = abbreviated_name
spec_dict["functions"]["to_short"][func_name] = abbreviated_name
spec_dict["functions"]["to_long"][abbreviated_name] = func_name
spec_dict["functions"]["to_long"][func_name] = func_name
return spec_dict
| 721,202
|
Enhance function signatures
Add required and optional objects to signatures objects for semantic validation
support.
Args:
spec_dict (Mapping[str, Any]): bel specification dictionary
Returns:
Mapping[str, Any]: return enhanced bel specification dict
|
def enhance_function_signatures(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]:
for func in spec_dict["functions"]["signatures"]:
for i, sig in enumerate(spec_dict["functions"]["signatures"][func]["signatures"]):
args = sig["arguments"]
req_args = []
pos_args = []
opt_args = []
mult_args = []
for arg in args:
# Multiple argument types
if arg.get("multiple", False):
if arg["type"] in ["Function", "Modifier"]:
mult_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
# Complex signature has this
mult_args.append(arg["type"])
# Optional, position dependent - will be added after req_args based on order in bel_specification
elif arg.get("optional", False) and arg.get("position", False):
if arg["type"] in ["Function", "Modifier"]:
pos_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
pos_args.append(arg["type"])
# Optional, position independent
elif arg.get("optional", False):
if arg["type"] in ["Function", "Modifier"]:
opt_args.extend(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
opt_args.append(arg["type"])
# Required arguments, position dependent
else:
if arg["type"] in ["Function", "Modifier"]:
req_args.append(arg.get("values", []))
elif arg["type"] in ["StrArgNSArg", "NSArg", "StrArg"]:
req_args.append(arg["type"])
spec_dict["functions"]["signatures"][func]["signatures"][i]["req_args"] = copy.deepcopy(
req_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["pos_args"] = copy.deepcopy(
pos_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i]["opt_args"] = copy.deepcopy(
opt_args
)
spec_dict["functions"]["signatures"][func]["signatures"][i][
"mult_args"
] = copy.deepcopy(mult_args)
return spec_dict
| 721,204
|
Find BEL function or argument at cursor location
Args:
belstr: BEL String used to create the completion_text
ast (Mapping[str, Any]): AST (dict) of BEL String
cursor_loc (int): given cursor location from input field
cursor_loc starts at 0, think of it like a block cursor covering each char
result: used to recursively return the result
Returns:
result dict
|
def cursor(
belstr: str, ast: AST, cursor_loc: int, result: Mapping[str, Any] = None
) -> Mapping[str, Any]:
log.debug(f"SubAST: {json.dumps(ast, indent=4)}")
# Recurse down through subject, object, nested to functions
log.debug(f"Cursor keys {ast.keys()}, BELStr: {belstr}")
if len(belstr) == 0:
return {"type": "Function", "replace_span": (0, 0), "completion_text": ""}
if "relation" in ast and in_span(cursor_loc, ast["relation"]["span"]):
log.debug("In relation")
completion_text = belstr[ast["relation"]["span"][0] : cursor_loc + 1]
return {
"type": "Relation",
"replace_span": ast["relation"]["span"],
"completion_text": completion_text,
}
# Handle subject, object and nested keys in tree
elif "span" not in ast and isinstance(ast, dict):
for key in ast:
if key in ["subject", "object", "nested"]:
log.debug(f"Recursing Keys {key}")
result = cursor(belstr, ast[key], cursor_loc, result=result)
if result:
return result
# Matches Functions, NSArgs and StrArgs/StrArgNSArg
if "span" in ast and in_span(cursor_loc, ast["span"]):
log.debug("Inside subject/object subAST")
if "function" in ast:
name_span = ast["function"]["name_span"]
if in_span(cursor_loc, name_span):
return {
"type": "Function",
"replace_span": name_span,
"completion_text": belstr[name_span[0] : cursor_loc + 1],
}
for idx, arg in enumerate(ast["args"]):
if (
cursor_loc == ast["function"]["parens_span"][0]
and ast["function"]["parens_span"][1] == -1
):
return {
"type": "StrArg", # default type if unknown
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": "",
}
elif in_span(cursor_loc, arg["span"]):
log.debug(
f'In argument span {arg["span"]} Cursor_loc: {cursor_loc}'
)
if arg["type"] == "Function":
if in_span(cursor_loc, arg["function"]["name_span"]):
log.debug("Found replace_span in args: Function type")
return {
"type": "Function",
"replace_span": arg["function"][
"name_span"
], # replace function name only
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
"completion_text": belstr[
arg["function"]["name_span"][0] : cursor_loc + 1
],
}
else:
log.debug(f'Recursing Function {arg["span"]}')
result = cursor(belstr, arg, cursor_loc, result=result)
elif arg["type"] == "NSArg":
result = {
"type": "NSArg",
"replace_span": arg["span"], # replace entire nsarg
"arg_idx": idx,
"args": copy.deepcopy(ast["args"]),
"parent_function": ast["function"]["name"],
}
# Filter on namespace and query on ns_val chars up to cursor_loc
if in_span(cursor_loc, arg["nsarg"]["ns_val_span"]):
result["namespace"] = arg["nsarg"][
"ns"
] # provide namespace for nsarg filtering
result["completion_text"] = belstr[
arg["nsarg"]["ns_val_span"][0] : cursor_loc + 1
]
# Query on nsarg chars up to cursor_loc
else:
result["completion_text"] = belstr[
arg["nsarg"]["ns_span"][0] : cursor_loc + 1
]
log.debug(f"Found replace_span in args: NSArg {result}")
return result
elif (
arg["type"] == "StrArg"
): # in case this is a default namespace StrArg
if (
arg["span"][0] == arg["span"][1]
): # handle case like p() cursor=2
completion_text = arg["arg"]
else:
completion_text = belstr[arg["span"][0] : cursor_loc + 1]
return {
"type": "StrArg",
"arg_idx": idx,
"replace_span": arg["span"], # replace entire strarg
"parent_function": ast["function"]["name"],
"completion_text": completion_text.lstrip(),
}
return result
| 721,236
|
Namespace completions
Args:
completion_text
entity_types: used to filter namespace search results
bel_spec: used to search default namespaces
namespace: used to filter namespace search results
species_id: used to filter namespace search results
bel_fmt: used to select full name or abbrev for default namespaces
size: how many completions to return
Results:
list of replacement text objects
|
def nsarg_completions(
completion_text: str,
entity_types: list,
bel_spec: BELSpec,
namespace: str,
species_id: str,
bel_fmt: str,
size: int,
):
minimal_nsarg_completion_len = 1
species = [species_id]
namespaces = [namespace]
replace_list = []
if len(completion_text) >= minimal_nsarg_completion_len:
# Use BEL.bio API module if running bel module in BEL.bio API, otherwise call BEL.bio API endpoint
# is there a better way to handle this?
url = f'{config["bel_api"]["servers"]["api_url"]}/terms/completions/{url_path_param_quoting(completion_text)}'
params = {
"size": size,
"entity_types": entity_types,
"namespaces": namespaces,
"species": species,
}
r = get_url(url, params=params)
if r.status_code == 200:
ns_completions = r.json()
else:
log.error(f"Status code of {r.status_code} for {url}")
ns_completions = {}
for complete in ns_completions.get("completions", []):
replace_list.append(
{
"replacement": complete["id"],
"label": f"{complete['id']} ({complete['label']})",
"highlight": complete["highlight"][-1],
"type": "NSArg",
}
)
# Check default namespaces
for entity_type in entity_types:
default_namespace = bel_spec["namespaces"].get(entity_type, [])
if default_namespace:
for obj in default_namespace["info"]:
replacement = None
if bel_fmt == "long" and re.match(
completion_text, obj["name"], re.IGNORECASE
):
replacement = obj["name"]
elif bel_fmt in ["short", "medium"] and re.match(
completion_text, obj["abbreviation"], re.IGNORECASE
):
replacement = obj["abbreviation"]
if replacement:
highlight = replacement.replace(
completion_text, f"<em>{completion_text}</em>"
)
replace_list.insert(
0,
{
"replacement": replacement,
"label": replacement,
"highlight": highlight,
"type": "NSArg",
},
)
return replace_list[:size]
| 721,237
|
Filter BEL relations by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL relations that match prefix
|
def relation_completions(
completion_text: str, bel_spec: BELSpec, bel_fmt: str, size: int
) -> list:
if bel_fmt == "short":
relation_list = bel_spec["relations"]["list_short"]
else:
relation_list = bel_spec["relations"]["list_long"]
matches = []
for r in relation_list:
if re.match(completion_text, r):
matches.append(r)
replace_list = []
for match in matches:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
replace_list.append(
{
"replacement": match,
"label": match,
"highlight": highlight,
"type": "Relation",
}
)
return replace_list[:size]
| 721,238
|
Filter BEL functions by prefix
Args:
prefix: completion string
bel_fmt: short, medium, long BEL formats
spec: BEL specification
Returns:
list: list of BEL functions that match prefix
|
def function_completions(
completion_text: str,
bel_spec: BELSpec,
function_list: list,
bel_fmt: str,
size: int,
) -> list:
# Convert provided function list to correct bel_fmt
if isinstance(function_list, list):
if bel_fmt in ["short", "medium"]:
function_list = [
bel_spec["functions"]["to_short"][fn] for fn in function_list
]
else:
function_list = [
bel_spec["functions"]["to_long"][fn] for fn in function_list
]
elif bel_fmt in ["short", "medium"]:
function_list = bel_spec["functions"]["primary"]["list_short"]
else:
function_list = bel_spec["functions"]["primary"]["list_long"]
matches = []
for f in function_list:
escaped_completion_text = completion_text.replace(r"(", r"\(").replace(
r")", r"\)"
)
log.debug(f"Completion match: {escaped_completion_text} F: {f}")
if re.match(escaped_completion_text, f):
matches.append(f)
replace_list = []
for match in matches:
if completion_text:
highlight = match.replace(completion_text, f"<em>{completion_text}</em>")
else:
highlight = completion_text
replace_list.append(
{
"replacement": match,
"label": f"{match}()",
"highlight": highlight,
"type": "Function",
}
)
return replace_list[:size]
| 721,239
|
Create completions to return given replacement list
Args:
replace_list: list of completion replacement values
belstr: BEL String
replace_span: start, stop of belstr to replace
completion_text: text to use for completion - used for creating highlight
Returns:
[{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": highlight,
"label": label,
}]
|
def add_completions(
replace_list: list, belstr: str, replace_span: Span, completion_text: str
) -> List[Mapping[str, Any]]:
completions = []
for r in replace_list:
# if '(' not in belstr:
# replacement = f'{r["replacement"]}()'
# cursor_loc = len(replacement) - 1 # inside parenthesis
# elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1:
if len(belstr) > 0:
belstr_end = len(belstr) - 1
else:
belstr_end = 0
log.debug(
f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}'
)
# Put a space between comma and following function arg
if (
r["type"] == "Function"
and replace_span[0] > 0
and belstr[replace_span[0] - 1] == ","
):
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ f"{r['replacement']}()"
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()"
)
# Put a space between comman and following NSArg or StrArg
elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",":
log.debug("prior char is a comma")
replacement = (
belstr[0 : replace_span[0]]
+ " "
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"])
# Add function to end of belstr
elif r["type"] == "Function" and replace_span[1] >= belstr_end:
replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()"
cursor_loc = len(replacement) - 1 # inside parenthesis
log.debug(f"Replacement: {replacement}")
# Insert replacement in beginning or middle of belstr
else:
replacement = (
belstr[0 : replace_span[0]]
+ r["replacement"]
+ belstr[replace_span[1] + 1 :]
)
cursor_loc = len(
belstr[0 : replace_span[0]] + r["replacement"]
) # move cursor just past replacement
completions.append(
{
"replacement": replacement,
"cursor_loc": cursor_loc,
"highlight": r["highlight"],
"label": r["label"],
}
)
return completions
| 721,241
|
Get BEL Assertion completions
Args:
Results:
|
def get_completions(
belstr: str,
cursor_loc: int,
bel_spec: BELSpec,
bel_comp: str,
bel_fmt: str,
species_id: str,
size: int,
):
ast, errors = pparse.get_ast_dict(belstr)
spans = pparse.collect_spans(ast)
completion_text = ""
completions = []
function_help = []
log.debug(f"Cursor location BELstr: {belstr} Cursor idx: {cursor_loc}")
cursor_results = cursor(belstr, ast, cursor_loc)
log.debug(f"Cursor results: {cursor_results}")
if not cursor_results:
log.debug("Cursor results is empty")
return (completion_text, completions, function_help, spans)
completion_text = cursor_results.get("completion_text", "")
replace_span = cursor_results["replace_span"]
namespace = cursor_results.get("namespace", None)
if "parent_function" in cursor_results:
parent_function = cursor_results["parent_function"]
function_help = bel_specification.get_function_help(
cursor_results["parent_function"], bel_spec
)
args = cursor_results.get("args", [])
arg_idx = cursor_results.get("arg_idx")
replace_list = arg_completions(
completion_text,
parent_function,
args,
arg_idx,
bel_spec,
bel_fmt,
species_id,
namespace,
size,
)
elif cursor_results["type"] == "Function":
function_list = None
replace_list = function_completions(
completion_text, bel_spec, function_list, bel_fmt, size
)
elif cursor_results["type"] == "Relation":
replace_list = relation_completions(completion_text, bel_spec, bel_fmt, size)
completions.extend(
add_completions(replace_list, belstr, replace_span, completion_text)
)
return completion_text, completions, function_help, spans
| 721,242
|
Scan BEL string to map parens, quotes, commas
Args:
bels: bel string as an array of characters
errors: list of error tuples ('<type>', '<msg>')
Returns:
(char_locs, errors): character locations and errors
|
def parse_chars(bels: list, errors: Errors) -> Tuple[CharLocs, Errors]:
pstack, qstack, nested_pstack = [], [], []
parens, nested_parens, quotes, commas = {}, {}, {}, {}
notquoted_flag = True
for i, c in enumerate(bels):
prior_char = i - 1
# print('BEL', prior_char, b[prior_char])
# Find starting quote
if c == '"' and bels[prior_char] != "\\" and len(qstack) == 0:
qstack.append(i)
notquoted_flag = False
# Find closing quote
elif c == '"' and bels[prior_char] != "\\":
quotes[qstack.pop()] = i
notquoted_flag = True
# Find all escaped quotes outside of quoted string
elif c == '"' and bels[prior_char] == "\\" and len(qstack) == 0:
errors.append(
(
"ERROR",
f"Escaped quote outside of quoted string at location: {i - 1}",
(i - 1, i - 1),
)
)
# Find all nested object opening parens
elif notquoted_flag and c == "(" and bels[prior_char] == " ":
if len(nested_pstack) > 1:
errors.append(
(
"ERROR",
f"More than one nested parenthesis or left parenthesis following a space character",
(i, i),
)
)
nested_pstack.append(i)
# Find all opening parens
elif notquoted_flag and c == "(" and bels[prior_char] not in ["\\"]:
pstack.append(i)
# Find all closing parens
elif notquoted_flag and c == ")" and bels[prior_char] != "\\":
if len(pstack):
if len(pstack) > 1:
parens[pstack.pop()] = (i, "child")
else:
parens[pstack.pop()] = (i, "top")
elif len(nested_pstack):
nested_parens[nested_pstack.pop()] = (i, "top")
else:
errors.append(
(
"ERROR",
f"Missing left parenthesis for right parenthesis at location {i}",
(i, i),
)
)
# Find comma outside of quoted string
elif notquoted_flag and c == "," and len(qstack) == 0:
sparen = pstack[-1]
if sparen not in commas:
commas[sparen] = [i]
else:
commas[sparen].append(i)
while len(pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for left parenthesis at location {pstack[-1]}",
(pstack[-1], pstack[-1]),
)
)
if len(pstack) > 1:
parens[pstack.pop()] = (-1, "child")
else:
parens[pstack.pop()] = (-1, "top")
while len(nested_pstack):
errors.append(
(
"ERROR",
f"Missing right parenthesis for nested object left parenthesis at location {nested_pstack[-1]}",
(nested_pstack[-1], nested_pstack[-1]),
)
)
nested_parens[nested_pstack.pop()] = (-1, "top")
if len(qstack):
missing_quote = qstack.pop()
errors.append(
(
"ERROR",
f"Missing right quote for left quote at location {missing_quote}",
(missing_quote, missing_quote),
)
)
return (
{
"parens": parens,
"nested_parens": nested_parens,
"quotes": quotes,
"commas": commas,
},
errors,
)
| 721,247
|
Parse functions from BEL using paren, comma, quote character locations
Args:
bels: BEL string as list of chars
char_locs: paren, comma, quote character locations
errors: Any error messages generated during the parse
Returns:
(functions, errors): function names and locations and error messages
|
def parse_functions(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
parens = char_locs["parens"]
# Handle partial top-level function name
if not parens:
bels_len = len(bels) - 1
span = (0, bels_len)
parsed[span] = {
"name": "".join(bels),
"type": "Function",
"span": span,
"name_span": (span),
"function_level": "top",
}
return parsed, errors
for sp in sorted(parens): # sp = starting paren, ep = ending_paren
ep, function_level = parens[sp]
# Functions can't have a space between function name and left paren
if bels[sp - 1] == " ":
continue
# look in front of start paren for function name
for i in range(sp - 1, 0, -1):
if bels[i] in [" ", ",", "("]: # function name upstream boundary chars
if i < sp - 1:
if ep == -1:
span = (i + 1, len(bels) - 1)
else:
span = (i + 1, ep)
parsed[span] = {
"name": "".join(bels[i + 1 : sp]),
"type": "Function",
"span": span,
"name_span": (i + 1, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
break
else:
if ep == -1:
span = (0, len(bels) - 1)
else:
span = (0, ep)
parsed[span] = {
"name": "".join(bels[0:sp]),
"type": "Function",
"span": span,
"name_span": (0, sp - 1),
"parens_span": (sp, ep),
"function_level": function_level,
}
return parsed, errors
| 721,248
|
Parse arguments from functions
Args:
bels: BEL string as list of chars
char_locs: char locations for parens, commas and quotes
parsed: function locations
errors: error messages
Returns:
(functions, errors): function and arg locations plus error messages
|
def parse_args(
bels: list, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
commas = char_locs["commas"]
# Process each span key in parsed from beginning
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue # Skip if not argument-less
sp, ep = parsed[span]["parens_span"]
# calculate args_end position
if ep == -1: # supports bel completion
args_end = len(bels) - 1 # 1
else:
args_end = ep - 1 # 1
# Parse arguments
args = []
arg_start = sp + 1
each_arg_end_list = sorted([end - 1 for end in commas.get(sp, [])] + [args_end])
for arg_end in each_arg_end_list:
# log.debug(f'Arg_start: {arg_start} Arg_end: {arg_end}')
# Skip blanks at beginning of argument
while arg_start < args_end and bels[arg_start] == " ":
arg_start += 1
# Trim arg_end (e.g. HGNC:AKT1 , HGNC:EGF) - if there are spaces before comma
trimmed_arg_end = arg_end
while trimmed_arg_end > arg_start and bels[trimmed_arg_end] == " ":
trimmed_arg_end -= 1
if trimmed_arg_end < arg_start:
trimmed_arg_end = arg_start
arg = "".join(bels[arg_start : trimmed_arg_end + 1])
# log.debug(f'Adding arg to args: {arg_start} {trimmed_arg_end}')
args.append({"arg": arg, "span": (arg_start, trimmed_arg_end)})
arg_start = arg_end + 2
parsed[span]["args"] = args
return parsed, errors
| 721,249
|
Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages
|
def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:
func_pattern = re.compile(r"\s*[a-zA-Z]+\(")
nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$")
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
for i, arg in enumerate(parsed[span]["args"]):
nsarg_matches = nsarg_pattern.match(arg["arg"])
if func_pattern.match(arg["arg"]):
parsed[span]["args"][i].update({"type": "Function"})
elif nsarg_matches:
(start, end) = arg["span"]
ns = nsarg_matches.group(1)
ns_val = nsarg_matches.group(2)
ns_span = nsarg_matches.span(1)
ns_span = (ns_span[0] + start, ns_span[1] + start - 1)
ns_val_span = nsarg_matches.span(2)
ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)
parsed[span]["args"][i].update(
{
"type": "NSArg",
"ns": ns,
"ns_span": ns_span,
"ns_val": ns_val,
"ns_val_span": ns_val_span,
}
)
else:
parsed[span]["args"][i].update({"type": "StrArg"})
return parsed, errors
| 721,250
|
Parse relations from BEL string
Args:
belstr: BEL string as one single string (not list of chars)
char_locs: paren, comma and quote char locations
parsed: data structure for parsed functions, relations, nested
errors: error messages
Returns:
(parsed, errors):
|
def parse_relations(
belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors
) -> Tuple[Parsed, Errors]:
quotes = char_locs["quotes"]
quoted_range = set([i for start, end in quotes.items() for i in range(start, end)])
for match in relations_pattern_middle.finditer(belstr):
(start, end) = match.span(1)
# log.debug(f'Relation-middle {match}')
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
for match in relations_pattern_end.finditer(belstr):
(start, end) = match.span(1)
log.debug(f"Relation-end {match}")
end = end - 1 # adjust end to match actual end character index
if start != end:
test_range = set(range(start, end))
else:
test_range = set(start)
# Skip if relation overlaps with quoted string
if test_range.intersection(quoted_range):
continue
span_key = (start, end)
parsed[span_key] = {
"type": "Relation",
"name": match.group(1),
"span": (start, end),
}
return parsed, errors
| 721,251
|
Collect flattened list of spans of BEL syntax types
Provide simple list of BEL syntax type spans for highlighting.
Function names, NSargs, NS prefix, NS value and StrArgs will be
tagged.
Args:
ast: AST of BEL assertion
Returns:
List[Tuple[str, Tuple[int, int]]]: list of span objects (<type>, (<start>, <end>))
|
def collect_spans(ast: AST) -> List[Tuple[str, Tuple[int, int]]]:
spans = []
if ast.get("subject", False):
spans.extend(collect_spans(ast["subject"]))
if ast.get("object", False):
spans.extend(collect_spans(ast["object"]))
if ast.get("nested", False):
spans.extend(collect_spans(ast["nested"]))
if ast.get("function", False):
log.debug(f"Processing function")
spans.append(("Function", ast["function"]["name_span"]))
log.debug(f"Spans: {spans}")
if ast.get("args", False):
for idx, arg in enumerate(ast["args"]):
log.debug(f"Arg {arg}")
if arg.get("function", False):
log.debug(f"Recursing on arg function")
results = collect_spans(arg)
log.debug(f"Results {results}")
spans.extend(results) # Recurse arg function
elif arg.get("nsarg", False):
log.debug(f"Processing NSArg Arg {arg}")
spans.append(("NSArg", arg["span"]))
spans.append(("NSPrefix", arg["nsarg"]["ns_span"]))
spans.append(("NSVal", arg["nsarg"]["ns_val_span"]))
elif arg["type"] == "StrArg":
spans.append(("StrArg", arg["span"]))
log.debug(f"Spans: {spans}")
return spans
| 721,254
|
Check full parse for errors
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
|
def parsed_top_level_errors(parsed, errors, component_type: str = "") -> Errors:
# Error check
fn_cnt = 0
rel_cnt = 0
nested_cnt = 0
for key in parsed:
if parsed[key]["type"] == "Function":
fn_cnt += 1
if parsed[key]["type"] == "Relation":
rel_cnt += 1
if parsed[key]["type"] == "Nested":
nested_cnt += 1
if not component_type:
if nested_cnt > 1:
errors.append(
(
"Error",
"Too many nested objects - can only have one per BEL Assertion",
)
)
if nested_cnt:
if rel_cnt > 2:
errors.append(
(
"Error",
"Too many relations - can only have two in a nested BEL Assertion",
)
)
elif fn_cnt > 4:
errors.append(("Error", "Too many BEL subject and object candidates"))
else:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a BEL Assertion",
)
)
elif fn_cnt > 2:
errors.append(("Error", "Too many BEL subject and object candidates"))
elif component_type == "subject":
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
elif component_type == "object":
if nested_cnt:
if rel_cnt > 1:
errors.append(
(
"Error",
"Too many relations - can only have one in a nested BEL object",
)
)
elif fn_cnt > 2:
errors.append(
(
"Error",
"Too many BEL subject and object candidates in a nested BEL object",
)
)
else:
if rel_cnt > 0:
errors.append(
("Error", "Too many relations - cannot have any in a BEL Subject")
)
elif fn_cnt > 1:
errors.append(
("Error", "Too many BEL subject candidates - can only have one")
)
return errors
| 721,257
|
Convert parsed data struct to AST dictionary
Args:
parsed:
errors:
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
|
def parsed_to_ast(parsed: Parsed, errors: Errors, component_type: str = ""):
ast = {}
sorted_keys = sorted(parsed.keys())
# Setup top-level tree
for key in sorted_keys:
if parsed[key]["type"] == "Nested":
nested_component_stack = ["subject", "object"]
if component_type:
component_stack = [component_type]
else:
component_stack = ["subject", "object"]
for key in sorted_keys:
if parsed[key]["type"] == "Function" and parsed[key]["function_level"] == "top":
ast[component_stack.pop(0)] = parsed_function_to_ast(parsed, key)
elif parsed[key]["type"] == "Relation" and "relation" not in ast:
ast["relation"] = {
"name": parsed[key]["name"],
"type": "Relation",
"span": key,
}
elif parsed[key]["type"] == "Nested":
ast["nested"] = {}
for nested_key in sorted_keys:
if nested_key <= key:
continue
if (
parsed[nested_key]["type"] == "Function"
and parsed[nested_key]["function_level"] == "top"
):
ast["nested"][
nested_component_stack.pop(0)
] = parsed_function_to_ast(parsed, nested_key)
elif (
parsed[nested_key]["type"] == "Relation"
and "relation" not in ast["nested"]
):
ast["nested"]["relation"] = {
"name": parsed[nested_key]["name"],
"type": "Relation",
"span": parsed[nested_key]["span"],
}
return ast, errors
return ast, errors
| 721,258
|
Convert BEL string to AST dictionary
Args:
belstr: BEL string
component_type: Empty string or 'subject' or 'object' to indicate that we
are parsing the subject or object field input
|
def get_ast_dict(belstr, component_type: str = ""):
errors = []
parsed = {}
bels = list(belstr)
char_locs, errors = parse_chars(bels, errors)
parsed, errors = parse_functions(belstr, char_locs, parsed, errors)
parsed, errors = parse_args(bels, char_locs, parsed, errors)
parsed, errors = arg_types(parsed, errors)
parsed, errors = parse_relations(belstr, char_locs, parsed, errors)
parsed, errors = parse_nested(bels, char_locs, parsed, errors)
errors = parsed_top_level_errors(parsed, errors)
ast, errors = parsed_to_ast(parsed, errors, component_type=component_type)
return ast, errors
| 721,259
|
Convert dict AST to object AST Function
Args:
ast_fn: AST object Function
d: AST as dictionary
spec: BEL Specification
Return:
ast_fn
|
def add_ast_fn(d, spec, parent_function=None):
if d["type"] == "Function":
ast_fn = Function(d["function"]["name"], spec, parent_function=parent_function)
for arg in d["args"]:
if arg["type"] == "Function":
ast_fn.add_argument(add_ast_fn(arg, spec, parent_function=ast_fn))
elif arg["type"] == "NSArg":
ast_fn.add_argument(
NSArg(arg["nsarg"]["ns"], arg["nsarg"]["ns_val"], ast_fn)
)
elif arg["type"] == "StrArg":
ast_fn.add_argument(StrArg(arg["arg"], ast_fn))
return ast_fn
| 721,261
|
[De]Canonicalize NSArg
Args:
nsarg (str): bel statement string or partial string (e.g. subject or object)
api_url (str): BEL.bio api url to use, e.g. https://api.bel.bio/v1
namespace_targets (Mapping[str, List[str]]): formatted as in configuration file example
canonicalize (bool): use canonicalize endpoint/namespace targets
decanonicalize (bool): use decanonicalize endpoint/namespace targets
Results:
str: converted NSArg
|
def convert_nsarg(
nsarg: str,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
) -> str:
if not api_url:
api_url = config["bel_api"]["servers"]["api_url"]
if not api_url:
log.error("Missing api url - cannot convert namespace")
return None
params = None
if namespace_targets:
namespace_targets_str = json.dumps(namespace_targets)
params = {"namespace_targets": namespace_targets_str}
if not namespace_targets:
if canonicalize:
api_url = api_url + "/terms/{}/canonicalized"
elif decanonicalize:
api_url = api_url + "/terms/{}/decanonicalized"
else:
log.warning("Missing (de)canonical flag - cannot convert namespaces")
return nsarg
else:
api_url = (
api_url + "/terms/{}/canonicalized"
) # overriding with namespace_targets
request_url = api_url.format(url_path_param_quoting(nsarg))
r = get_url(request_url, params=params, timeout=10)
if r and r.status_code == 200:
nsarg = r.json().get("term_id", nsarg)
elif not r or r.status_code == 404:
log.error(f"[de]Canonicalization endpoint missing: {request_url}")
return nsarg
| 721,263
|
Recursively convert namespaces of BEL Entities in BEL AST using API endpoint
Canonicalization and decanonicalization is determined by endpoint used and namespace_targets.
Args:
ast (BEL): BEL AST
api_url (str): endpoint url with a placeholder for the term_id (either /terms/<term_id>/canonicalized or /terms/<term_id>/decanonicalized)
namespace_targets (Mapping[str, List[str]]): (de)canonical targets for converting BEL Entities
Returns:
BEL: BEL AST
|
def convert_namespaces_ast(
ast,
api_url: str = None,
namespace_targets: Mapping[str, List[str]] = None,
canonicalize: bool = False,
decanonicalize: bool = False,
):
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
# Get normalized term if necessary
if (canonicalize and not ast.canonical) or (
decanonicalize and not ast.decanonical
):
normalized_term = convert_nsarg(
given_term_id,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
if canonicalize:
ast.canonical = normalized_term
elif decanonicalize:
ast.decanonical = normalized_term
# Update normalized term
if canonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
elif decanonicalize:
ns, value = ast.canonical.split(":")
ast.change_nsvalue(ns, value)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
convert_namespaces_ast(
arg,
api_url=api_url,
namespace_targets=namespace_targets,
canonicalize=canonicalize,
decanonicalize=decanonicalize,
)
return ast
| 721,265
|
Recursively populate NSArg AST entries for default (de)canonical values
This was added specifically for the BEL Pipeline. It is designed to
run directly against ArangoDB and not through the BELAPI.
Args:
ast (BEL): BEL AST
Returns:
BEL: BEL AST
|
def populate_ast_nsarg_defaults(ast, belast, species_id=None):
if isinstance(ast, NSArg):
given_term_id = "{}:{}".format(ast.namespace, ast.value)
r = bel.terms.terms.get_normalized_terms(given_term_id)
ast.canonical = r["canonical"]
ast.decanonical = r["decanonical"]
r = bel.terms.terms.get_terms(ast.canonical)
if len(r) > 0:
ast.species_id = r[0].get("species_id", False)
ast.species_label = r[0].get("species_label", False)
# Check to see if species is set and if it's consistent
# if species is not consistent for the entire AST - set species_id/label
# on belast to False (instead of None)
if ast.species_id and species_id is None:
species_id = ast.species_id
belast.species.add((ast.species_id, ast.species_label))
elif ast.species_id and species_id and species_id != ast.species_id:
belast.species_id = False
belast.species_label = False
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_defaults(arg, belast, species_id)
return ast
| 721,266
|
Recursively orthologize BEL Entities in BEL AST using API endpoint
NOTE: - will take first ortholog returned in BEL.bio API result (which may return more than one ortholog)
Args:
ast (BEL): BEL AST
endpoint (str): endpoint url with a placeholder for the term_id
Returns:
BEL: BEL AST
|
def orthologize(ast, bo, species_id: str):
# if species_id == 'TAX:9606' and str(ast) == 'MGI:Sult2a1':
# import pdb; pdb.set_trace()
if not species_id:
bo.validation_messages.append(
("WARNING", "No species id was provided for orthologization")
)
return ast
if isinstance(ast, NSArg):
if ast.orthologs:
# log.debug(f'AST: {ast.to_string()} species_id: {species_id} orthologs: {ast.orthologs}')
if ast.orthologs.get(species_id, None):
orthologized_nsarg_val = ast.orthologs[species_id]["decanonical"]
ns, value = orthologized_nsarg_val.split(":")
ast.change_nsvalue(ns, value)
ast.canonical = ast.orthologs[species_id]["canonical"]
ast.decanonical = ast.orthologs[species_id]["decanonical"]
ast.orthologized = True
bo.ast.species.add(
(species_id, ast.orthologs[species_id]["species_label"])
)
else:
bo.ast.species.add((ast.species_id, ast.species_label))
bo.validation_messages.append(
("WARNING", f"No ortholog found for {ast.namespace}:{ast.value}")
)
elif ast.species_id:
bo.ast.species.add((ast.species_id, ast.species_label))
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
orthologize(arg, bo, species_id)
return ast
| 721,267
|
Recursively collect NSArg orthologs for BEL AST
This requires bo.collect_nsarg_norms() to be run first so NSArg.canonical is available
Args:
ast: AST at recursive point in belobj
species: dictionary of species ids vs labels for or
|
def populate_ast_nsarg_orthologs(ast, species):
ortholog_namespace = "EG"
if isinstance(ast, NSArg):
if re.match(ortholog_namespace, ast.canonical):
orthologs = bel.terms.orthologs.get_orthologs(
ast.canonical, list(species.keys())
)
for species_id in species:
if species_id in orthologs:
orthologs[species_id]["species_label"] = species[species_id]
ast.orthologs = copy.deepcopy(orthologs)
# Recursively process every NSArg by processing BELAst and Functions
if hasattr(ast, "args"):
for arg in ast.args:
populate_ast_nsarg_orthologs(arg, species)
return ast
| 721,268
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.