text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Author: Jon Ander Gomez Adrian (jon@dsic.upv.es, http://personales.upv.es/jon)
Version: 1.0
Date: June 2014
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
import sys
import numpy
from . import MyKernel
class MyKernelClassifier:
"""
This class implements a classifier based on Kernel Density Estimator.
The purpose is to classify each sample according to the class with higher probability density.
"""
def __init__(self, h = None):
self.num_classes = 0
self.dim = 0
self.targets = None
self.estimators = None # Kernel Density Estimators, one per class
self.h = h
# ------------------------------------------------------------------------------
def fit(self, X, Y):
self.dim = X.shape[1]
# Establish the value of 'h' if not set previously
if self.h is None: self.h = max(7, 2.5 * self.dim)
self.targets = numpy.unique(Y)
self.num_classes = len(self.targets)
# Separate the training samples of each class in order to do the estimation
samples_per_class = []
for k in range(self.num_classes):
samples_per_class.append(X[Y == self.targets[k]])
kernel = 'gaussian' # This could be a parameter for the constructor, but the
# current implementation of MyKernel.py doesn't allow a
# different kernel type.
self.estimators = []
for k in range(self.num_classes):
self.estimators.append(MyKernel(kernel = kernel, bandwidth = self.h))
self.estimators[k].fit(samples_per_class[k])
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def predict(self, X):
Y = numpy.zeros(len(X), dtype = type(self.targets[0]))
best_log_dens = numpy.zeros(len(X))
for k in range(self.num_classes):
log_dens = self.estimators[k].score_samples(X)
if 0 == k :
best_log_dens[:] = log_dens[:]
Y[:] = self.targets[0]
else:
for n in range(len(X)):
if log_dens[n] > best_log_dens[n]:
best_log_dens[n] = log_dens[n]
Y[n] = self.targets[k]
return Y
# ------------------------------------------------------------------------------
|
jonandergomez/machine_learning_for_students
|
machine_learning/MyKernelClassifier.py
|
Python
|
mit
| 2,519
|
[
"Gaussian"
] |
9763e1af459817a0fc1a87ff4075b6e3c38d5c2d6a2c17ff3e2313c565cacb6c
|
"""
This pipeline is intended to extract pixel information from T2W images.
"""
import os
import numpy as np
from protoclass.data_management import T2WModality
from protoclass.data_management import GTModality
from protoclass.preprocessing import RicianNormalization
from protoclass.preprocessing import GaussianNormalization
from protoclass.extraction import IntensitySignalExtraction
# Define the path where all the patients are
path_patients = '/data/prostate/experiments'
# Define the path of the modality to normalize
path_t2w = 'T2W'
# Define the path of the ground for the prostate
path_gt = 'GT_inv/prostate'
# Define the label of the ground-truth which will be provided
label_gt = ['prostate']
# Define the path where the information for the gaussian normalization are
path_gaussian = '/data/prostate/pre-processing/mp-mri-prostate/gaussian-t2w'
# Define the path where the information for the rician normalization are
path_rician = '/data/prostate/pre-processing/mp-mri-prostate/rician-t2w'
# Define the path to store the Tofts data
path_store = '/data/prostate/extraction/mp-mri-prostate/ise-t2w'
# ID of the patient for which we need to use the Gaussian Normalization
ID_GAUSSIAN = '387'
# Generate the different path to be later treated
path_patients_list_t2w = []
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
for id_patient in id_patient_list:
# Append for the T2W data
path_patients_list_t2w.append(os.path.join(path_patients, id_patient,
path_t2w))
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient,
path_gt)])
# List where to store the different minimum
for id_p, (p_t2w, p_gt) in enumerate(zip(path_patients_list_t2w,
path_patients_list_gt)):
print 'Processing {}'.format(id_patient_list[id_p])
# Remove a part of the string to have only the id
nb_patient = id_patient_list[id_p].replace('Patient ', '')
# Read the image data
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(p_t2w)
# Read the GT
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt, p_gt)
if not nb_patient == ID_GAUSSIAN:
# Rician Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_rician, pat_chg)
t2w_norm = RicianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
else:
# Gaussian Normalization
# Read the normalization information
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_norm.p'
filename = os.path.join(path_gaussian, pat_chg)
t2w_norm = GaussianNormalization.load_from_pickles(filename)
# Normalize the data
t2w_mod = t2w_norm.normalize(t2w_mod)
# Create an object to extract the data in a matrix format using
# the ground-truth
ise = IntensitySignalExtraction(t2w_mod)
# Get the data
print 'Extract the signal intensity for the ROI'
data = ise.transform(t2w_mod, ground_truth=gt_mod, cat=label_gt[0])
# Store the data
print 'Store the matrix'
# Check that the path is existing
if not os.path.exists(path_store):
os.makedirs(path_store)
pat_chg = id_patient_list[id_p].lower().replace(' ', '_') + '_ise_t2w.npy'
filename = os.path.join(path_store, pat_chg)
np.save(filename, data)
|
I2Cvb/mp-mri-prostate
|
pipeline/feature-extraction/t2w/pipeline_extraction_intensity_t2w.py
|
Python
|
mit
| 3,763
|
[
"Gaussian"
] |
2bf9b94e3e021e003155712c6d824c3eee26cd8ad337cd61f976900e319c49c8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author:
- "Jeroen Hoekx (@jhoekx)"
- "Alexander Bulimov (@abulimov)"
module: lvol
short_description: Configure LVM logical volumes
description:
- This module creates, removes or resizes logical volumes.
version_added: "1.1"
options:
vg:
description:
- The volume group this logical volume is part of.
required: true
lv:
description:
- The name of the logical volume.
required: true
size:
description:
- The size of the logical volume, according to lvcreate(8) --size, by
default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
Float values must begin with a digit.
Resizing using percentage values was not supported prior to 2.1.
state:
choices: [ "present", "absent" ]
default: present
description:
- Control if the logical volume exists. If C(present) the C(size) option
is required.
required: false
force:
version_added: "1.5"
choices: [ "yes", "no" ]
default: "no"
description:
- Shrink or remove operations of volumes requires this switch. Ensures that
that filesystems get never corrupted/destroyed by mistake.
required: false
opts:
version_added: "2.0"
description:
- Free-form options to be passed to the lvcreate command
snapshot:
version_added: "2.1"
description:
- The name of the snapshot volume
required: false
pvs:
version_added: "2.2"
description:
- Comma separated list of physical volumes e.g. /dev/sda,/dev/sdb
required: false
shrink:
version_added: "2.2"
description:
- shrink if current size is higher than size requested
required: false
default: yes
notes:
- Filesystems on top of the volume are not resized.
'''
EXAMPLES = '''
# Create a logical volume of 512m.
- lvol: vg=firefly lv=test size=512
# Create a logical volume of 512m with disks /dev/sda and /dev/sdb
- lvol: vg=firefly lv=test size=512 pvs=/dev/sda,/dev/sdb
# Create cache pool logical volume
- lvol: vg=firefly lv=lvcache size=512m opts='--type cache-pool'
# Create a logical volume of 512g.
- lvol: vg=firefly lv=test size=512g
# Create a logical volume the size of all remaining space in the volume group
- lvol: vg=firefly lv=test size=100%FREE
# Create a logical volume with special options
- lvol: vg=firefly lv=test size=512g opts="-r 16"
# Extend the logical volume to 1024m.
- lvol: vg=firefly lv=test size=1024
# Extend the logical volume to consume all remaining space in the volume group
- lvol: vg=firefly lv=test size=+100%FREE
# Extend the logical volume to take all remaining space of the PVs
- lvol: vg=firefly lv=test size=100%PVS
# Resize the logical volume to % of VG
- lvol: vg-firefly lv=test size=80%VG force=yes
# Reduce the logical volume to 512m
- lvol: vg=firefly lv=test size=512 force=yes
# Set the logical volume to 512m and do not try to shrink if size is lower than current one
- lvol: vg=firefly lv=test size=512 shrink=no
# Remove the logical volume.
- lvol: vg=firefly lv=test state=absent force=yes
# Create a snapshot volume of the test logical volume.
- lvol: vg=firefly lv=test snapshot=snap1 size=100m
'''
import re
decimal_point = re.compile(r"(\d+)")
def mkversion(major, minor, patch):
return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
def parse_lvs(data):
lvs = []
for line in data.splitlines():
parts = line.strip().split(';')
lvs.append({
'name': parts[0].replace('[','').replace(']',''),
'size': int(decimal_point.match(parts[1]).group(1))
})
return lvs
def parse_vgs(data):
vgs = []
for line in data.splitlines():
parts = line.strip().split(';')
vgs.append({
'name': parts[0],
'size': int(decimal_point.match(parts[1]).group(1)),
'free': int(decimal_point.match(parts[2]).group(1)),
'ext_size': int(decimal_point.match(parts[3]).group(1))
})
return vgs
def get_lvm_version(module):
ver_cmd = module.get_bin_path("lvm", required=True)
rc, out, err = module.run_command("%s version" % (ver_cmd))
if rc != 0:
return None
m = re.search("LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
if not m:
return None
return mkversion(m.group(1), m.group(2), m.group(3))
def main():
module = AnsibleModule(
argument_spec=dict(
vg=dict(required=True),
lv=dict(required=True),
size=dict(type='str'),
opts=dict(type='str'),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
shrink=dict(type='bool', default='yes'),
snapshot=dict(type='str', default=None),
pvs=dict(type='str')
),
supports_check_mode=True,
)
# Determine if the "--yes" option should be used
version_found = get_lvm_version(module)
if version_found == None:
module.fail_json(msg="Failed to get LVM version number")
version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
if version_found >= version_yesopt:
yesopt = "--yes"
else:
yesopt = ""
vg = module.params['vg']
lv = module.params['lv']
size = module.params['size']
opts = module.params['opts']
state = module.params['state']
force = module.boolean(module.params['force'])
shrink = module.boolean(module.params['shrink'])
size_opt = 'L'
size_unit = 'm'
snapshot = module.params['snapshot']
pvs = module.params['pvs']
if pvs is None:
pvs = ""
else:
pvs = pvs.replace(",", " ")
if opts is None:
opts = ""
# Add --test option when running in check-mode
if module.check_mode:
test_opt = ' --test'
else:
test_opt = ''
if size:
# LVCREATE(8) -l --extents option with percentage
if '%' in size:
size_parts = size.split('%', 1)
size_percent = int(size_parts[0])
if size_percent > 100:
module.fail_json(msg="Size percentage cannot be larger than 100%")
size_whole = size_parts[1]
if size_whole == 'ORIGIN':
module.fail_json(msg="Snapshot Volumes are not supported")
elif size_whole not in ['VG', 'PVS', 'FREE']:
module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
size_opt = 'l'
size_unit = ''
if not '%' in size:
# LVCREATE(8) -L --size option unit
if size[-1].lower() in 'bskmgtpe':
size_unit = size[-1].lower()
size = size[0:-1]
try:
float(size)
if not size[0].isdigit(): raise ValueError()
except ValueError:
module.fail_json(msg="Bad size specification of '%s'" % size)
# when no unit, megabytes by default
if size_opt == 'l':
unit = 'm'
else:
unit = size_unit
# Get information on volume group requested
vgs_cmd = module.get_bin_path("vgs", required=True)
rc, current_vgs, err = module.run_command(
"%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
vgs = parse_vgs(current_vgs)
this_vg = vgs[0]
# Get information on logical volume requested
lvs_cmd = module.get_bin_path("lvs", required=True)
rc, current_lvs, err = module.run_command(
"%s -a --noheadings --nosuffix -o lv_name,size --units %s --separator ';' %s" % (lvs_cmd, unit, vg))
if rc != 0:
if state == 'absent':
module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False)
else:
module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
changed = False
lvs = parse_lvs(current_lvs)
if snapshot is None:
check_lv = lv
else:
check_lv = snapshot
for test_lv in lvs:
if test_lv['name'] == check_lv:
this_lv = test_lv
break
else:
this_lv = None
if state == 'present' and not size:
if this_lv is None:
module.fail_json(msg="No size given.")
else:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
msg = ''
if this_lv is None:
if state == 'present':
### create LV
lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
if snapshot is not None:
cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
else:
cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
rc, _, err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
else:
if state == 'absent':
### remove LV
if not force:
module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name']))
lvremove_cmd = module.get_bin_path("lvremove", required=True)
rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
if rc == 0:
module.exit_json(changed=True)
else:
module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
elif size_opt == 'l':
### Resize LV based on % value
tool = None
size_free = this_vg['free']
if size_whole == 'VG' or size_whole == 'PVS':
size_requested = size_percent * this_vg['size'] / 100
else: # size_whole == 'FREE':
size_requested = size_percent * this_vg['free'] / 100
if '+' in size:
size_requested += this_lv['size']
if this_lv['size'] < size_requested:
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
elif not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
else:
### resize LV based on absolute values
tool = None
if int(size) > this_lv['size']:
tool = module.get_bin_path("lvextend", required=True)
elif shrink and int(size) < this_lv['size']:
if int(size) == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
if not force:
module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name']))
else:
tool = module.get_bin_path("lvreduce", required=True)
tool = '%s %s' % (tool, '--force')
if tool:
cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
rc, out, err = module.run_command(cmd)
if "Reached maximum COW size" in out:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
elif rc == 0:
changed = True
elif "matches existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
elif "not larger than existing size" in err:
module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
else:
module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
module.exit_json(changed=changed, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
sysadmind/ansible-modules-extras
|
system/lvol.py
|
Python
|
gpl-3.0
| 15,209
|
[
"Firefly"
] |
8126854225c3226e3798482431323f5743057191bec0e25ca7969173327701ce
|
#!/usr/bin/env python
#
'''
Calculating densities with DF-MP2, demonstrated for the dipole moment of CH3Cl.
'''
from numpy.linalg import norm
from pyscf.gto import Mole
from pyscf.scf import RHF
from pyscf.mp.dfmp2_native import DFMP2
mol = Mole()
mol.atom = '''
C 0.000000 0.000000 0.000000
Cl 0.000000 0.000000 1.785000
H 1.019297 0.000000 -0.386177
H -0.509649 0.882737 -0.386177
H -0.509649 -0.882737 -0.386177
'''
mol.basis = 'aug-cc-pVTZ'
mol.build()
mf = RHF(mol).run()
pt = DFMP2(mf).run()
# The unrelaxed density always has got natural occupation numbers between 2 and 0.
# However, it is inaccurate for properties.
dm_ur = pt.make_rdm1_unrelaxed(ao_repr=True)
# The relaxed density is more accurate for properties when MP2 is well-behaved,
# whereas the natural occupation numbers can be above 2 or below 0 for ill-behaved systems.
dm_re = pt.make_rdm1_relaxed(ao_repr=True)
print('')
print('HF dipole moment:')
dip = mf.dip_moment() # 2.10
print('Absolute value: {0:.3f} Debye'.format(norm(dip)))
print('')
print('Unrelaxed MP2 dipole moment:')
dip = mf.dip_moment(dm=dm_ur) # 2.07
print('Absolute value: {0:.3f} Debye'.format(norm(dip)))
print('')
print('Relaxed MP2 dipole moment:')
dip = mf.dip_moment(dm=dm_re) # 1.90
print('Absolute value: {0:.3f} Debye'.format(norm(dip)))
print('')
print('Experimental reference: 1.870 Debye')
|
sunqm/pyscf
|
examples/mp/11-dfmp2-density.py
|
Python
|
apache-2.0
| 1,383
|
[
"PySCF"
] |
54c06c52f72bbac4982f7b141c13f22d4d062836dc10f6e484af9900be9836ed
|
from mpi4py import MPI
simulator = 'neuron'
import nineml
if simulator == 'neuron':
from pype9.cells.neuron import CellMetaClass # @UnusedImport
from neuron import h
else:
from pype9.cells.nest import CellMetaClass # @Reimport
import os.path
import sys
import matplotlib.pyplot as plt
h.load_file("multisplit.hoc")
LIBNRNMECHPATH = "/home/nebula/git/CerebellarNuclei/x86_64/.libs/libnrnmech.so"
class MultiCompartmentSplit:
def __init__(self, mc):
self.pc = h.ParallelContext()
self.mc = mc
self.tree = mc.tree
self.name = mc.name
self.sections = []
self.section_def_template = 'create %s[%d]'
self.complexity = 0
self.setup_time = 0
self.calc_time = 0
self.num_compartment = 0
self.tstop = 2000 # [msec]
self.vec_v = 0 # one vec_v is not good
self.vec_t = 0
#def show_mech_cost(self):
def set_vec_t(self):
self.vec_t = h.Vector()
self.vec_t.record(h._ref_t)
def set_vec_v(self, rec_sec_name):
for sec in h.allsec():
if(sec.name() == rec_sec_name):
print "Record Compartment = %s in #%d" % (rec_sec_name, self.pc.id())
self.vec_v = h.Vector()
self.vec_v.record(sec(0.5)._ref_v)
def show_all_sections(self):
for sec in self.sections:
h.psection(sec=sec)
def insert_domain(self, sec, domain):
start = h.startsw()
modlist = []
for subcomp in domain.dynamics.subcomponents:
modlist.append(subcomp.name)
sec.insert(subcomp.name)
for prop in subcomp.dynamics.properties:
############################################
# this is not good and old style
#
sec.push()
h(prop.name+'_'+subcomp.name+' = '+str(prop.value))
h.pop_section()
#
#sec.setter(prop.name+'_'+subcomp.name, prop.value)
############################################
self.setup_time += h.startsw() - start
def check_complexity_file(self):
# show mechanisms
#h.chk_mcomplex()
h.mcomplex()
def setup_sections(self):
start = h.startsw()
###################################################
# set up sections
self.sections = []
# old style, but it is need for section_name in hoc
h(self.section_def_template % (self.name, len(self.tree)))
for sec in h.allsec():
self.sections.append(sec)
###################################################
# connect sections
for i,sec in enumerate(self.sections):
parent = self.tree[i]
#print "%d to %d" % (i, tree[i])
if(parent != 0):
sec.connect(self.sections[parent-1], 1, 0)
self.num_compartment = 0
for sec in h.allsec():
self.num_compartment += 1
self.setup_time += h.startsw() - start
def setup_mechanisms(self):
start = h.startsw()
for i,sec in enumerate(self.sections):
#print "set %s to %d" % (mc.mapping.domain_name(i), i)
self.insert_domain(sec, self.mc.domain(i))
self.setup_time += h.startsw() - start
def multisplit(self):
start = h.startsw()
self.complexity = h.multisplit()
self.pc.multisplit()
self.pc.set_maxstep(10)
self.num_compartment = 0
for sec in h.allsec():
self.num_compartment += 1
self.setup_time += h.startsw() - start
def run_simulation(self):
start = h.startsw()
h.stdinit()
self.pc.psolve(self.tstop)
self.pc.barrier()
self.calc_time = h.startsw() - start
def show_info(self):
sys.stdout.flush()
self.pc.barrier()
if(self.pc.id()==0):
print "\n##############################################################"
print "# setup time = %.2f sec" % self.setup_time
print "# calc time = %.2f sec" % self.calc_time
print "#"
sys.stdout.flush()
self.pc.barrier()
for i in range(int(self.pc.nhost())):
if(i==self.pc.id()):
print "# %d/%d : %d compartment (%d)" % (self.pc.id(), self.pc.nhost(), self.num_compartment, self.complexity)
sys.stdout.flush()
self.pc.barrier()
if(self.pc.id()==0):
print "#"
def show_topology(self, id):
if id < 0:
for i in range(int(self.pc.nhost())):
if(i==pc.id()):
h.topology()
sys.stdout.flush()
self.pc.barrier()
else:
if(id == self.pc.id()):
h.topology()
def show_plot(self):
if(self.vec_v!=0):
t = self.vec_t.as_numpy()
v = self.vec_v.as_numpy()
plt.plot(t, v, color='b')
plt.title('simulation result with '+str(int(self.pc.nhost()))+" CPU cores")
plt.xlabel('time [msec]')
plt.ylabel('Membrane Potential [mv]')
plt.axis(xmin=0, xmax=max(t), ymin=-80, ymax=10)
plt.show()
def main():
h('{nrn_load_dll("'+LIBNRNMECHPATH+'")}')
dcn = nineml.read(os.path.join(
os.environ['HOME'], 'git', 'CerebellarNuclei', '9ml',
'dcn.xml'))['DCN']
mc = MultiCompartmentSplit(dcn)
#dcn_cell = CellMetaClass(dcn)
mc.check_complexity_file()
mc.setup_sections()
mc.setup_mechanisms()
mc.multisplit()
mc.set_vec_t()
mc.set_vec_v('DCN[100]')
#mc.show_all_sections()
mc.run_simulation()
mc.show_info()
mc.show_plot()
if __name__ == '__main__':
main()
|
DaisukeMiyamoto/nineml_test
|
nineml/dcn_test.py
|
Python
|
mit
| 5,923
|
[
"NEURON"
] |
74335441eed39f1b66fee47e9e3a08d8af026fab7a0842305c5d2325c085744b
|
#
# AUTHORS:
# Hakan Ozadam
# Rachel Brown
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import os
import subprocess
from collections import OrderedDict, defaultdict
from .step import Step
from .exceptions import *
from ..genomic_io.fastq import FastqFile
from ..genomic_io.fasta import FastaFile, FastaEntry
from ..genomic_io.functions import make_fasta_from_fastq
from ..annotation.intron import get_intron_sequences
from ..settings import *
import pysam
#################################################################
class BpReference(Step):
'''
To be completed
'''
def __init__(self, name, input_files, output_directory,
executable='', executable_arguments = '' , number_of_nucleotides = 150):
super().__init__(name, [], output_directory, executable, executable_arguments)
self.genome_fasta_file = input_files[0]
self.bp_bed_file = input_files[1]
self.bp_fasta_file = os.path.join(self.output_directory, "bp_sequences.fa")
self.number_of_nucleotides = number_of_nucleotides
self.reference_base = os.path.join(self.output_directory, settings['bp_reference_base'])
###################################################################
def prepare(self):
self.get_bp_sequences()
if not os.path.isfile(self.bp_fasta_file):
raise StepError("There was a problem in getting the bp sequences."
"BP reference file %s doesn't exist."%self.bp_fasta_file)
self.command = " ".join( [ self.executable, self.executable_arguments,
self.bp_fasta_file, self.reference_base ] )
##############################################################################
def get_bp_sequences(self):
with open(self.bp_bed_file, "r") as bed_input,\
FastaFile(self.genome_fasta_file) as genome_input,\
open(self.bp_fasta_file, "w") as bp_output:
# first read the bed file into a dict grouped by chromosome
bps_by_chr = defaultdict(list)
for bp_entry in bed_input:
bp_contents = bp_entry.rstrip().split("\t")
bp_chr = bp_contents[0]
bps_by_chr[bp_chr].append(bp_entry)
# Then go through the fasta file and get the sequences
for chr_entry in genome_input:
this_chr = chr_entry.header
for branchpoint in bps_by_chr[this_chr]:
bp_contents = branchpoint.rstrip().split("\t")
bp_location = int(bp_contents[1])
bp_header_contents = bp_contents[3].split(settings['field_separator'])
five_prime_location = int(bp_header_contents[3])
this_sequence = ''
if bp_contents[5] == '+':
bp_fragment_start = bp_location - self.number_of_nucleotides
if bp_fragment_start < 0 :
bp_fragment_start = 0
this_sequence = chr_entry.sequence[ bp_fragment_start : bp_location + 1 ] +\
chr_entry.sequence[ five_prime_location : five_prime_location +\
self.number_of_nucleotides ]
elif bp_contents[5] == '-':
bp_fragment_raw = chr_entry.sequence[ bp_location :\
bp_location + self.number_of_nucleotides + 1 ]
five_p_fragment_start = five_prime_location - self.number_of_nucleotides
five_p_fragment_raw = chr_entry.sequence[ five_p_fragment_start + 1 :\
five_prime_location + 1 ]
bp_fragment_raw_fasta = FastaEntry('bp' , bp_fragment_raw)
bp_fragment_raw_fasta.reverse_complement()
five_p_fragment_raw_fasta = FastaEntry('five_p' , five_p_fragment_raw )
five_p_fragment_raw_fasta.reverse_complement()
this_sequence = bp_fragment_raw_fasta.sequence + five_p_fragment_raw_fasta.sequence
else:
raise(StepError("Invalid strand type:", bp_contents[5]))
this_bp_sequence_entry = FastaEntry(bp_contents[3], this_sequence)
print(this_bp_sequence_entry, file = bp_output)
###############################################################################
###############################################################################
###############################################################################
def post_run(self):
missing_references = list()
suffixes = ('.1.bt2', '.2.bt2', '.3.bt2', '.4.bt2', '.rev.1.bt2', '.rev.2.bt2')
error_messages = list()
for suffix in suffixes:
if (not os.path.isfile(self.reference_base + suffix) ) :
missing_references.append("Couldn't find the bowtie2 reference: " + self.reference_base + suffix)
if len(missing_references) > 0:
error_messages.append("Couldn't find the following bowtie2 reference(s):\n" +\
"\n".join(missing_references))
if len(error_messages) > 0:
subprocess.call('touch ' + self.failure_file , shell=True )
else:
subprocess.call('touch ' + self.success_file , shell=True )
self.error_messages = error_messages
|
hakanozadam/bal
|
bal/core/make_bp_reference.py
|
Python
|
gpl-2.0
| 5,956
|
[
"pysam"
] |
0a0512b9b14bce269238c742808f9c3f6902b027cc6fad8a0d49264df4e554b7
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
from http import cookies
import dbSession
import dbShared
import cgi
import pymysql
#
def findName(nameString):
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute("SELECT userID FROM tUsers WHERE userID='" + nameString + "' AND userState > 0;")
row = cursor.fetchone()
if row == None:
userid = ""
else:
userid = row[0]
cursor.close()
conn.close()
return userid
# Main program
form = cgi.FieldStorage()
uname = form.getfirst("uname", "")
uname = dbShared.dbInsertSafe(uname)
result = ""
tmpID = findName(uname)
if (tmpID == ""):
result = ""
else:
result = "That user name is not available."
print('Content-type: text/html\n')
print(result)
|
pwillworth/galaxyharvester
|
html/nameAvailable.py
|
Python
|
gpl-3.0
| 1,522
|
[
"Galaxy"
] |
cf8134643c3eda881cc0a580d741a3110b63b6165300980300d100094bde228e
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
import os
import xml.sax
import numpy as np
from gpaw import setup_paths
from gpaw.setup_data import search_for_file
from gpaw.atom.radialgd import EquidistantRadialGridDescriptor
try:
import gzip
except ImportError:
has_gzip = False
else:
has_gzip = True
def parse_basis_name(name):
"""Parse any basis type identifier: 'sz', 'dzp', 'qztp', '4z3p', ... """
letter2number = {'s' : 1, 'd' : 2, 't' : 3, 'q' : 4}
number2letter = 'Xsdtq56789'
newchars = ['', 'z', '', 'p']
zetacount = letter2number.get(name[0])
if zetacount is None:
zetacount = int(name[0])
assert name[1] == 'z'
newchars[0] = number2letter[zetacount]
if len(name) == 2:
polcount = 0
newchars[-1] = ''
elif len(name) == 3:
assert name[-1] == 'p'
polcount = 1
else:
assert len(name) == 4 and name[-1] == 'p'
polcount = letter2number.get(name[2])
if polcount is None:
polcount = int(name[2])
newchars[2] = number2letter[polcount]
return zetacount, polcount, ''.join(newchars)
class Basis:
def __init__(self, symbol, name, readxml=True, world=None):
self.symbol = symbol
self.name = name
self.bf_j = []
self.ng = None
self.d = None
self.generatorattrs = {}
self.generatordata = ''
self.filename = None
if readxml:
self.read_xml(world=world)
def nao(self): # implement as a property so we don't have to
# catch all the places where Basis objects are modified without
# updating it. (we can do that later)
return sum([2 * bf.l + 1 for bf in self.bf_j])
nao = property(nao)
def get_grid_descriptor(self):
return EquidistantRadialGridDescriptor(self.d, self.ng)
def tosplines(self):
gd = self.get_grid_descriptor()
return [gd.spline(bf.phit_g, bf.l) for bf in self.bf_j]
def read_xml(self, filename=None, world=None):
parser = BasisSetXMLParser(self)
parser.parse(filename, world=world)
def write_xml(self):
"""Write basis functions to file.
Writes all basis functions in the given list of basis functions
to the file "<symbol>.<name>.basis".
"""
if self.name is None:
filename = '%s.basis' % self.symbol
else:
filename = '%s.%s.basis' % (self.symbol, self.name)
write = open(filename, 'w').write
write('<paw_basis version="0.1">\n')
generatorattrs = ' '.join(['%s="%s"' % (key, value)
for key, value
in self.generatorattrs.iteritems()])
write(' <generator %s>' % generatorattrs)
for line in self.generatordata.split('\n'):
write('\n '+line)
write('\n </generator>\n')
write((' <radial_grid eq="r=d*i" d="%f" istart="0" iend="%d" ' +
'id="lingrid"/>\n') % (self.d, self.ng - 1))
for bf in self.bf_j:
write(' <basis_function l="%d" rc="%f" type="%s" '
'grid="lingrid" ng="%d">\n'%
(bf.l, bf.rc, bf.type, bf.ng))
write(' ')
for value in bf.phit_g:
write(' %16.12e' % value)
write('\n')
write(' </basis_function>\n')
write('</paw_basis>\n')
def reduce(self, name):
"""Reduce the number of basis functions.
Example: basis.reduce('sz') will remove all non single-zeta
and polarization functions."""
zeta, pol = parse_basis_name(name)[:2]
newbf_j = []
N = {}
p = 0
for bf in self.bf_j:
if 'polarization' in bf.type:
if p < pol:
newbf_j.append(bf)
p += 1
else:
nl = (int(bf.type[0]), 'spdf'.index(bf.type[1]))
if nl not in N:
N[nl] = 0
if N[nl] < zeta:
newbf_j.append(bf)
N[nl] += 1
self.bf_j = newbf_j
def get_description(self):
title = 'LCAO basis set for %s:' % self.symbol
if self.name is not None:
name = 'Name: %s' % self.name
else:
name = 'This basis set does not have a name'
if self.filename is None:
fileinfo = 'This basis set was not loaded from a file'
else:
fileinfo = 'Basis set was loaded from file %s' % self.filename
nj = len(self.bf_j)
count1 = 'Number of radial functions: %d' % nj
count2 = 'Number of spherical harmonics: %d' % self.nao
bf_lines = []
for bf in self.bf_j:
line = ' l=%d, rc=%.4f Bohr: %s' % (bf.l, bf.rc, bf.type)
bf_lines.append(line)
lines = [title, name, fileinfo, count1, count2]
lines.extend(bf_lines)
return '\n '.join(lines)
class BasisFunction:
"""Encapsulates various basis function data."""
def __init__(self, l=None, rc=None, phit_g=None, type=''):
self.l = l
self.rc = rc
self.phit_g = phit_g
self.ng = None
if phit_g is not None:
self.ng = len(phit_g)
self.type = type
class BasisSetXMLParser(xml.sax.handler.ContentHandler):
def __init__(self, basis):
xml.sax.handler.ContentHandler.__init__(self)
self.basis = basis
self.type = None
self.rc = None
self.data = None
self.l = None
def parse(self, filename=None, world=None):
"""Read from symbol.name.basis file.
Example of filename: N.dzp.basis. Use sz(dzp) to read
the sz-part from the N.dzp.basis file."""
basis = self.basis
if '(' in basis.name:
reduced, name = basis.name.split('(')
name = name[:-1]
else:
name = basis.name
reduced = None
fullname = '%s.%s.basis' % (basis.symbol, name)
if filename is None:
basis.filename, source = search_for_file(fullname, world=world)
if source is None:
print """
You need to set the GPAW_SETUP_PATH environment variable to point to
the directory where the basis set files are stored. See
http://wiki.fysik.dtu.dk/gpaw/Setups
for details."""
raise RuntimeError('Could not find "%s" basis for "%s".' %
(name, basis.symbol))
else:
basis.filename = filename
source = open(filename).read()
self.data = None
xml.sax.parseString(source, self)
if reduced:
basis.reduce(reduced)
def startElement(self, name, attrs):
basis = self.basis
if name == 'paw_basis':
basis.version = attrs['version']
elif name == 'generator':
basis.generatorattrs = dict(attrs)
self.data = []
elif name == 'radial_grid':
assert attrs['eq'] == 'r=d*i'
basis.ng = int(attrs['iend']) + 1
basis.d = float(attrs['d'])
assert int(attrs['istart']) == 0
elif name == 'basis_function':
self.l = int(attrs['l'])
self.rc = float(attrs['rc'])
self.type = attrs.get('type')
self.ng = int(attrs.get('ng'))
self.data = []
def characters(self, data):
if self.data is not None:
self.data.append(data)
def endElement(self, name):
basis = self.basis
if name == 'basis_function':
phit_g = np.array([float(x) for x in ''.join(self.data).split()])
bf = BasisFunction(self.l, self.rc, phit_g, self.type)
assert bf.ng == self.ng, ('Bad grid size %d vs ng=%d!'
% (bf.ng, self.ng))
basis.bf_j.append(bf)
elif name == 'generator':
basis.generatordata = ''.join([line for line in self.data])
class BasisPlotter:
def __init__(self, premultiply=True, normalize=False,
show=False, save=False, ext='png'):
self.premultiply = premultiply
self.show = show
self.save = save
self.ext = ext
self.default_filename = '%(symbol)s.%(name)s.' + ext
self.title = 'Basis functions: %(symbol)s %(name)s'
self.xlabel = r'r [Bohr]'
if premultiply:
ylabel = r'$\tilde{\phi} r$'
else:
ylabel = r'$\tilde{\phi}$'
self.ylabel = ylabel
self.normalize = normalize
def plot(self, basis, filename=None, **plot_args):
import pylab # Should not import in module namespace
if plot_args is None:
plot_args = {}
rc = basis.d * (basis.ng - 1)
r_g = np.linspace(0., rc, basis.ng)
print 'Element :', basis.symbol
print 'Name :', basis.name
print
print 'Basis functions'
print '---------------'
norm_j = []
for j, bf in enumerate(basis.bf_j):
rphit_g = r_g[:bf.ng] * bf.phit_g
norm = (np.dot(rphit_g, rphit_g) * basis.d) ** .5
norm_j.append(norm)
print bf.type, '[norm=%0.4f]' % norm
print
print 'Generator'
for key, item in basis.generatorattrs.iteritems():
print ' ', key, ':', item
print
print 'Generator data'
print basis.generatordata
if self.premultiply:
factor = r_g
else:
factor = np.ones_like(r_g)
pylab.figure()
for norm, bf in zip(norm_j, basis.bf_j):
y_g = bf.phit_g * factor[:bf.ng]
if self.normalize:
y_g /= norm
pylab.plot(r_g[:bf.ng], y_g, label=bf.type[:12],
**plot_args)
axis = pylab.axis()
rc = max([bf.rc for bf in basis.bf_j])
newaxis = [0., rc, axis[2], axis[3]]
pylab.axis(newaxis)
pylab.legend()
pylab.title(self.title % basis.__dict__)
pylab.xlabel(self.xlabel)
pylab.ylabel(self.ylabel)
if filename is None:
filename = self.default_filename
if self.save:
pylab.savefig(filename % basis.__dict__)
if self.show:
pylab.show()
|
ajylee/gpaw-rtxs
|
gpaw/basis_data.py
|
Python
|
gpl-3.0
| 10,519
|
[
"GPAW"
] |
73206b717cac6d0c36c9692613bd3c59d61f8f73283d52fe94f5294ac96b072d
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username') is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
if not json_ld:
return {}
return self._json_ld(json_ld, video_id, fatal=kwargs.get('fatal', True))
def _json_ld(self, json_ld, video_id, fatal=True):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if json_ld.get('@context') == 'http://schema.org':
item_type = json_ld.get('@type')
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(json_ld.get('name')),
'episode_number': int_or_none(json_ld.get('episodeNumber')),
'description': unescapeHTML(json_ld.get('description')),
})
part_of_season = json_ld.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = json_ld.get('partOfSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(json_ld.get('datePublished')),
'title': unescapeHTML(json_ld.get('headline')),
'description': unescapeHTML(json_ld.get('articleBody')),
})
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 1 if preference else -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = None
last_media = None
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
last_media = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') not in ('SUBTITLES', 'CLOSED-CAPTIONS') else None
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media_name
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
codecs = last_info.get('CODECS')
if codecs:
vcodec, acodec = [None] * 2
va_codecs = codecs.split(',')
if len(va_codecs) == 1:
# Audio only entries usually come with single codec and
# no resolution. For more robustness we also check it to
# be mp4 audio.
if not resolution and va_codecs[0].startswith('mp4a'):
vcodec, acodec = 'none', va_codecs[0]
else:
vcodec = va_codecs[0]
else:
vcodec, acodec = va_codecs[:2]
f.update({
'acodec': acodec,
'vcodec': vcodec,
})
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
initialization = segment_list.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
start_number = segment_template.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
for s in s_e:
ms_info['total_number'] += 1 + int(s.get('r', '0'))
else:
timescale = segment_template.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = segment_template.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
initialization = segment_template.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to page 41 of ISO/IEC 29001-1:2014, @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
representation_ms_info['segment_urls'] = [
media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth')}
for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
if 'segment_urls' in representation_ms_info:
f.update({
'segment_urls': representation_ms_info['segment_urls'],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
f.update({
'initialization_url': initialization_url,
})
if not f.get('url'):
f['url'] = initialization_url
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
maleficarium/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 81,417
|
[
"VisIt"
] |
9815230fc33d9eda6d4d350119261e0336f96e315abf2d78ccffdfff74128b9a
|
from __future__ import division, print_function
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.mplot3d import Axes3D
import streakline
#import streakline2
import myutils
import ffwd
from streams import load_stream, vcirc_potential, store_progparams, wrap_angles, progenitor_prior
#import streams
import astropy
import astropy.units as u
from astropy.constants import G
from astropy.table import Table
import astropy.coordinates as coord
import gala.coordinates as gc
import scipy.linalg as la
import scipy.interpolate
import scipy.optimize
import zscale
import itertools
import copy
import pickle
# observers
# defaults taken as in astropy v2.0 icrs
mw_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vsun = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vsun0 = {'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
gc_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 0.1*u.kpc, 'roll': 0*u.deg, 'galcen_coord': coord.SkyCoord(ra=266.4051*u.deg, dec=-28.936175*u.deg, frame='icrs')}
vgc = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
vgc0 = {'vcirc': 0*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}
MASK = -9999
pparams_fid = [np.log10(0.5e10)*u.Msun, 0.7*u.kpc, np.log10(6.8e10)*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0.*u.Gyr**-2*u.kpc**-1, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
#pparams_fid = [0.5e-5*u.Msun, 0.7*u.kpc, 6.8e-5*u.Msun, 3*u.kpc, 0.28*u.kpc, 430*u.km/u.s, 30*u.kpc, 1.57*u.rad, 1*u.Unit(1), 1*u.Unit(1), 1*u.Unit(1), 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.pc/u.Myr**2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0.*u.Gyr**-2, 0*u.deg, 0*u.deg, 0*u.kpc, 0*u.km/u.s, 0*u.mas/u.yr, 0*u.mas/u.yr]
class Stream():
def __init__(self, x0=[]*u.kpc, v0=[]*u.km/u.s, progenitor={'coords': 'galactocentric', 'observer': {}, 'pm_polar': False}, potential='nfw', pparams=[], minit=2e4*u.Msun, mfinal=2e4*u.Msun, rcl=20*u.pc, dr=0.5, dv=2*u.km/u.s, dt=1*u.Myr, age=6*u.Gyr, nstars=600, integrator='lf'):
"""Initialize """
setup = {}
if progenitor['coords']=='galactocentric':
setup['x0'] = x0
setup['v0'] = v0
elif (progenitor['coords']=='equatorial') & (len(progenitor['observer'])!=0):
if progenitor['pm_polar']:
a = v0[1].value
phi = v0[2].value
v0[1] = a*np.sin(phi)*u.mas/u.yr
v0[2] = a*np.cos(phi)*u.mas/u.yr
# convert positions
xeq = coord.SkyCoord(x0[0], x0[1], x0[2], **progenitor['observer'])
xgal = xeq.transform_to(coord.Galactocentric)
setup['x0'] = [xgal.x.to(u.kpc), xgal.y.to(u.kpc), xgal.z.to(u.kpc)]*u.kpc
# convert velocities
setup['v0'] = gc.vhel_to_gal(xeq.icrs, rv=v0[0], pm=v0[1:], **vsun)
#setup['v0'] = [v.to(u.km/u.s) for v in vgal]*u.km/u.s
else:
raise ValueError('Observer position needed!')
setup['dr'] = dr
setup['dv'] = dv
setup['minit'] = minit
setup['mfinal'] = mfinal
setup['rcl'] = rcl
setup['dt'] = dt
setup['age'] = age
setup['nstars'] = nstars
setup['integrator'] = integrator
setup['potential'] = potential
setup['pparams'] = pparams
self.setup = setup
self.setup_aux = {}
self.fill_intid()
self.fill_potid()
self.st_params = self.format_input()
def fill_intid(self):
"""Assign integrator ID for a given integrator choice
Assumes setup dictionary has an 'integrator' key"""
if self.setup['integrator']=='lf':
self.setup_aux['iaux'] = 0
elif self.setup['integrator']=='rk':
self.setup_aux['iaux'] = 1
def fill_potid(self):
"""Assign potential ID for a given potential choice
Assumes d has a 'potential' key"""
if self.setup['potential']=='nfw':
self.setup_aux['paux'] = 3
elif self.setup['potential']=='log':
self.setup_aux['paux'] = 2
elif self.setup['potential']=='point':
self.setup_aux['paux'] = 0
elif self.setup['potential']=='gal':
self.setup_aux['paux'] = 4
elif self.setup['potential']=='lmc':
self.setup_aux['paux'] = 6
elif self.setup['potential']=='dipole':
self.setup_aux['paux'] = 8
elif self.setup['potential']=='quad':
self.setup_aux['paux'] = 9
elif self.setup['potential']=='octu':
self.setup_aux['paux'] = 10
def format_input(self):
"""Format input parameters for streakline.stream"""
p = [None]*12
# progenitor position
p[0] = self.setup['x0'].si.value
p[1] = self.setup['v0'].si.value
# potential parameters
p[2] = [x.si.value for x in self.setup['pparams']]
# stream smoothing offsets
p[3] = [self.setup['dr'], self.setup['dv'].si.value]
# potential and integrator choice
p[4] = self.setup_aux['paux']
p[5] = self.setup_aux['iaux']
# number of steps and stream stars
p[6] = int(self.setup['age']/self.setup['dt'])
p[7] = int(p[6]/self.setup['nstars'])
# cluster properties
p[8] = self.setup['minit'].si.value
p[9] = self.setup['mfinal'].si.value
p[10] = self.setup['rcl'].si.value
# time step
p[11] = self.setup['dt'].si.value
return p
def generate(self):
"""Create streakline model for a stream of set parameters"""
#xm1, xm2, xm3, xp1, xp2, xp3, vm1, vm2, vm3, vp1, vp2, vp3 = streakline.stream(*p)
stream = streakline.stream(*self.st_params)
self.leading = {}
self.leading['x'] = stream[:3]*u.m
self.leading['v'] = stream[6:9]*u.m/u.s
self.trailing = {}
self.trailing['x'] = stream[3:6]*u.m
self.trailing['v'] = stream[9:12]*u.m/u.s
def observe(self, mode='cartesian', wangle=0*u.deg, units=[], errors=[], nstars=-1, sequential=False, present=[], logerr=False, observer={'z_sun': 0.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 0*u.deg, 'galcen_ra': 300*u.deg, 'galcen_dec': 20*u.deg}, vobs={'vcirc': 237.8*u.km/u.s, 'vlsr': [11.1, 12.2, 7.3]*u.km/u.s}, footprint='none', rotmatrix=None):
"""Observe the stream
stream.obs holds all observations
stream.err holds all errors"""
x = np.concatenate((self.leading['x'].to(u.kpc).value, self.trailing['x'].to(u.kpc).value), axis=1) * u.kpc
v = np.concatenate((self.leading['v'].to(u.km/u.s).value, self.trailing['v'].to(u.km/u.s).value), axis=1) * u.km/u.s
if mode=='cartesian':
# returns coordinates in following order
# x(x, y, z), v(vx, vy, vz)
if len(units)<2:
units.append(self.trailing['x'].unit)
units.append(self.trailing['v'].unit)
if len(errors)<2:
errors.append(0.2*u.kpc)
errors.append(2*u.km/u.s)
# positions
x = x.to(units[0])
ex = np.ones(np.shape(x))*errors[0]
ex = ex.to(units[0])
# velocities
v = v.to(units[1])
ev = np.ones(np.shape(v))*errors[1]
ev = ev.to(units[1])
self.obs = np.concatenate([x,v]).value
self.err = np.concatenate([ex,ev]).value
elif mode=='equatorial':
# assumes coordinates in the following order:
# ra, dec, distance, vrad, mualpha, mudelta
if len(units)!=6:
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
if len(errors)!=6:
errors = [0.2*u.deg, 0.2*u.deg, 0.5*u.kpc, 1*u.km/u.s, 0.2*u.mas/u.yr, 0.2*u.mas/u.yr]
# define reference frame
xgal = coord.Galactocentric(x, **observer)
#frame = coord.Galactocentric(**observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, v, **vobs)
# store coordinates
ra, dec, dist = [xeq.ra.to(units[0]).wrap_at(wangle), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vr, mua, mud = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
obs = np.hstack([ra, dec, dist, vr, mua, mud]).value
obs = np.reshape(obs,(6,-1))
if footprint=='sdss':
infoot = dec > -2.5*u.deg
obs = obs[:,infoot]
if np.allclose(rotmatrix, np.eye(3))!=1:
xi, eta = myutils.rotate_angles(obs[0], obs[1], rotmatrix)
obs[0] = xi
obs[1] = eta
self.obs = obs
# store errors
err = np.ones(np.shape(self.obs))
if logerr:
for i in range(6):
err[i] *= np.exp(errors[i].to(units[i]).value)
else:
for i in range(6):
err[i] *= errors[i].to(units[i]).value
self.err = err
self.obsunit = units
self.obserror = errors
# randomly select nstars from the stream
if nstars>-1:
if sequential:
select = np.linspace(0, np.shape(self.obs)[1], nstars, endpoint=False, dtype=int)
else:
select = np.random.randint(low=0, high=np.shape(self.obs)[1], size=nstars)
self.obs = self.obs[:,select]
self.err = self.err[:,select]
# include only designated dimensions
if len(present)>0:
self.obs = self.obs[present]
self.err = self.err[present]
self.obsunit = [ self.obsunit[x] for x in present ]
self.obserror = [ self.obserror[x] for x in present ]
def prog_orbit(self):
"""Generate progenitor orbital history"""
orbit = streakline.orbit(self.st_params[0], self.st_params[1], self.st_params[2], self.st_params[4], self.st_params[5], self.st_params[6], self.st_params[11], -1)
self.orbit = {}
self.orbit['x'] = orbit[:3]*u.m
self.orbit['v'] = orbit[3:]*u.m/u.s
def project(self, name, N=1000, nbatch=-1):
"""Project the stream from observed to native coordinates"""
poly = np.loadtxt("../data/{0:s}_all.txt".format(name))
self.streak = np.poly1d(poly)
self.streak_x = np.linspace(np.min(self.obs[0])-2, np.max(self.obs[0])+2, N)
self.streak_y = np.polyval(self.streak, self.streak_x)
self.streak_b = np.zeros(N)
self.streak_l = np.zeros(N)
pdot = np.polyder(poly)
for i in range(N):
length = scipy.integrate.quad(self._delta_path, self.streak_x[0], self.streak_x[i], args=(pdot,))
self.streak_l[i] = length[0]
XB = np.transpose(np.vstack([self.streak_x, self.streak_y]))
n = np.shape(self.obs)[1]
if nbatch<0:
nstep = 0
nbatch = -1
else:
nstep = np.int(n/nbatch)
i1 = 0
i2 = nbatch
for i in range(nstep):
XA = np.transpose(np.vstack([np.array(self.obs[0][i1:i2]), np.array(self.obs[1][i1:i2])]))
self.emdist(XA, XB, i1=i1, i2=i2)
i1 += nbatch
i2 += nbatch
XA = np.transpose(np.vstack([np.array(self.catalog['ra'][i1:]), np.array(self.catalog['dec'][i1:])]))
self.emdist(XA, XB, i1=i1, i2=n)
#self.catalog.write("../data/{0:s}_footprint_catalog.txt".format(self.name), format='ascii.commented_header')
def emdist(self, XA, XB, i1=0, i2=-1):
""""""
distances = scipy.spatial.distance.cdist(XA, XB)
self.catalog['b'][i1:i2] = np.min(distances, axis=1)
imin = np.argmin(distances, axis=1)
self.catalog['b'][i1:i2][self.catalog['dec'][i1:i2]<self.streak_y[imin]] *= -1
self.catalog['l'][i1:i2] = self.streak_l[imin]
def _delta_path(self, x, pdot):
"""Return integrand for calculating length of a path along a polynomial"""
return np.sqrt(1 + np.polyval(pdot, x)**2)
def plot(self, mode='native', fig=None, color='k', **kwargs):
"""Plot stream"""
# Plotting
if fig==None:
plt.close()
plt.figure()
ax = plt.axes([0.12,0.1,0.8,0.8])
if mode=='native':
# Color setup
cindices = np.arange(self.setup['nstars']) # colors of stream particles
nor = mpl.colors.Normalize(vmin=0, vmax=self.setup['nstars']) # colormap normalization
plt.plot(self.setup['x0'][0].to(u.kpc).value, self.setup['x0'][2].to(u.kpc).value, 'wo', ms=10, mew=2, zorder=3)
plt.scatter(self.trailing['x'][0].to(u.kpc).value, self.trailing['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='winter', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.scatter(self.leading['x'][0].to(u.kpc).value, self.leading['x'][2].to(u.kpc).value, s=30, c=cindices, cmap='autumn', norm=nor, marker='o', edgecolor='none', lw=0, alpha=0.1)
plt.xlabel("X (kpc)")
plt.ylabel("Z (kpc)")
elif mode=='observed':
plt.subplot(221)
plt.plot(self.obs[0], self.obs[1], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Dec")
plt.subplot(223)
plt.plot(self.obs[0], self.obs[2], 'o', color=color, **kwargs)
plt.xlabel("RA")
plt.ylabel("Distance")
plt.subplot(222)
plt.plot(self.obs[3], self.obs[4], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\\alpha$")
plt.subplot(224)
plt.plot(self.obs[3], self.obs[5], 'o', color=color, **kwargs)
plt.xlabel("V$_r$")
plt.ylabel("$\mu\delta$")
plt.tight_layout()
#plt.minorticks_on()
def read(self, fname, units={'x': u.kpc, 'v': u.km/u.s}):
"""Read stream star positions from a file"""
t = np.loadtxt(fname).T
n = np.shape(t)[1]
ns = int((n-1)/2)
self.setup['nstars'] = ns
# progenitor
self.setup['x0'] = t[:3,0] * units['x']
self.setup['v0'] = t[3:,0] * units['v']
# leading tail
self.leading = {}
self.leading['x'] = t[:3,1:ns+1] * units['x']
self.leading['v'] = t[3:,1:ns+1] * units['v']
# trailing tail
self.trailing = {}
self.trailing['x'] = t[:3,ns+1:] * units['x']
self.trailing['v'] = t[3:,ns+1:] * units['v']
def save(self, fname):
"""Save stream star positions to a file"""
# define table
t = Table(names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
# add progenitor info
t.add_row(np.ravel([self.setup['x0'].to(u.kpc).value, self.setup['v0'].to(u.km/u.s).value]))
# add leading tail infoobsmode
tt = Table(np.concatenate((self.leading['x'].to(u.kpc).value, self.leading['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# add trailing tail info
tt = Table(np.concatenate((self.trailing['x'].to(u.kpc).value, self.trailing['v'].to(u.km/u.s).value)).T, names=('x', 'y', 'z', 'vx', 'vy', 'vz'))
t = astropy.table.vstack([t,tt])
# save to file
t.write(fname, format='ascii.commented_header')
# make a streakline model of a stream
def stream_model(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
################################
# Plot observed stream and model
if graph:
observed = load_stream(name)
Ndim = np.shape(observed.obs)[0]
modcol = 'k'
obscol = 'orange'
ylabel = ['Dec (deg)', 'Distance (kpc)', 'Radial velocity (km/s)']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(12,4))
for i in range(3):
plt.sca(ax[i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(observed.obs[0], observed.obs[i+1], 's', color=obscol, mec='none', ms=8, label='Observed stream')
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=modcol, mec='none', ms=4, label='Fiducial model')
if i==0:
plt.legend(frameon=False, handlelength=0.5, fontsize='small')
plt.tight_layout()
if graphsave:
plt.savefig('../plots/mock_observables_{}_p{}.png'.format(name, potential), dpi=150)
return stream
def progenitor_params(n):
"""Return progenitor parameters for a given stream"""
if n==-1:
age = 1.6*u.Gyr
mi = 1e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = gd1_coordinates(observer=mw_observer)
elif n==-2:
age = 2.7*u.Gyr
mi = 1e5*u.Msun
mf = 2e4*u.Msun
x0, v0 = pal5_coordinates(observer=mw_observer, vobs=vsun0)
elif n==-3:
age = 3.5*u.Gyr
mi = 5e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = tri_coordinates(observer=mw_observer)
elif n==-4:
age = 2*u.Gyr
mi = 2e4*u.Msun
mf = 2e-1*u.Msun
x0, v0 = atlas_coordinates(observer=mw_observer)
out = {'x0': x0, 'v0': v0, 'age': age, 'mi': mi, 'mf': mf}
return out
def gal2eq(x, v, observer=mw_observer, vobs=vsun0):
""""""
# define reference frame
xgal = coord.Galactocentric(np.array(x)[:,np.newaxis]*u.kpc, **observer)
# convert
xeq = xgal.transform_to(coord.ICRS)
veq = gc.vgal_to_hel(xeq, np.array(v)[:,np.newaxis]*u.km/u.s, **vobs)
# store coordinates
units = [u.deg, u.deg, u.kpc, u.km/u.s, u.mas/u.yr, u.mas/u.yr]
xobs = [xeq.ra.to(units[0]), xeq.dec.to(units[1]), xeq.distance.to(units[2])]
vobs = [veq[2].to(units[3]), veq[0].to(units[4]), veq[1].to(units[5])]
return(xobs, vobs)
def gd1_coordinates(observer=mw_observer):
"""Approximate GD-1 progenitor coordinates"""
x = coord.SkyCoord(ra=154.377*u.deg, dec=41.5309*u.deg, distance=8.2*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-90, -250, -120]
return (x0, v0)
def pal5_coordinates(observer=mw_observer, vobs=vsun0):
"""Pal5 coordinates"""
# sdss
ra = 229.0128*u.deg
dec = -0.1082*u.deg
# bob's rrlyrae
d = 21.7*u.kpc
# harris
#d = 23.2*u.kpc
# odenkirchen 2002
vr = -58.7*u.km/u.s
# fritz & kallivayalil 2015
mua = -2.296*u.mas/u.yr
mud = -2.257*u.mas/u.yr
d = 24*u.kpc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d, **observer)
x0 = x.galactocentric
v0 = gc.vhel_to_gal(x.icrs, rv=vr, pm=[mua, mud], **vobs).to(u.km/u.s)
return ([x0.x.value, x0.y.value, x0.z.value], v0.value.tolist())
def tri_coordinates(observer=mw_observer):
"""Approximate Triangulum progenitor coordinates"""
x = coord.SkyCoord(ra=22.38*u.deg, dec=30.26*u.deg, distance=33*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [-40, 155, 155]
return (x0, v0)
def atlas_coordinates(observer=mw_observer):
"""Approximate ATLAS progenitor coordinates"""
x = coord.SkyCoord(ra=20*u.deg, dec=-27*u.deg, distance=20*u.kpc, **observer)
x_ = x.galactocentric
x0 = [x_.x.value, x_.y.value, x_.z.value]
v0 = [40, 150, -120]
return (x0, v0)
# great circle orientation
def find_greatcircle(stream=None, name='gd1', pparams=pparams_fid, dt=0.2*u.Myr, save=True, graph=True):
"""Save rotation matrix for a stream model"""
if stream==None:
stream = stream_model(name, pparams0=pparams, dt=dt)
# find the pole
ra = np.radians(stream.obs[0])
dec = np.radians(stream.obs[1])
rx = np.cos(ra) * np.cos(dec)
ry = np.sin(ra) * np.cos(dec)
rz = np.sin(dec)
r = np.column_stack((rx, ry, rz))
# fit the plane
x0 = np.array([0, 1, 0])
lsq = scipy.optimize.minimize(wfit_plane, x0, args=(r,))
x0 = lsq.x/np.linalg.norm(lsq.x)
ra0 = np.arctan2(x0[1], x0[0])
dec0 = np.arcsin(x0[2])
ra0 += np.pi
dec0 = np.pi/2 - dec0
# euler rotations
R0 = myutils.rotmatrix(np.degrees(-ra0), 2)
R1 = myutils.rotmatrix(np.degrees(dec0), 1)
R2 = myutils.rotmatrix(0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
# put xi = 50 at the beginning of the stream
xi[xi>180] -= 360
xi += 360
xi0 = np.min(xi) - 50
R2 = myutils.rotmatrix(-xi0, 2)
R = np.dot(R2, np.matmul(R1, R0))
xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
if save:
np.save('../data/rotmatrix_{}'.format(name), R)
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
mock['rotmatrix'] = R
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
if graph:
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.plot(stream.obs[0], stream.obs[1], 'ko')
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
plt.sca(ax[1])
plt.plot(xi, eta, 'ko')
plt.xlabel('$\\xi$ (deg)')
plt.ylabel('$\\eta$ (deg)')
plt.ylim(-5, 5)
plt.tight_layout()
plt.savefig('../plots/gc_orientation_{}.png'.format(name))
return R
def wfit_plane(x, r, p=None):
"""Fit a plane to a set of 3d points"""
Np = np.shape(r)[0]
if np.any(p)==None:
p = np.ones(Np)
Q = np.zeros((3,3))
for i in range(Np):
Q += p[i]**2 * np.outer(r[i], r[i])
x = x/np.linalg.norm(x)
lsq = np.inner(x, np.inner(Q, x))
return lsq
# observed streams
#def load_stream(n):
#"""Load stream observations"""
#if n==-1:
#observed = load_gd1(present=[0,1,2,3])
#elif n==-2:
#observed = load_pal5(present=[0,1,2,3])
#elif n==-3:
#observed = load_tri(present=[0,1,2,3])
#elif n==-4:
#observed = load_atlas(present=[0,1,2,3])
#return observed
def endpoints(name):
""""""
stream = load_stream(name)
# find endpoints
amin = np.argmin(stream.obs[0])
amax = np.argmax(stream.obs[0])
ra = np.array([stream.obs[0][i] for i in [amin, amax]])
dec = np.array([stream.obs[1][i] for i in [amin, amax]])
f = open('../data/mock_{}.params'.format(name), 'rb')
mock = pickle.load(f)
# rotate endpoints
R = mock['rotmatrix']
xi, eta = myutils.rotate_angles(ra, dec, R)
#xi, eta = myutils.rotate_angles(stream.obs[0], stream.obs[1], R)
mock['ra_range'] = ra
mock['xi_range'] = xi #np.percentile(xi, [10,90])
f.close()
f = open('../data/mock_{}.params'.format(name), 'wb')
pickle.dump(mock, f)
f.close()
def load_pal5(present, nobs=50, potential='gal'):
""""""
if len(present)==2:
t = Table.read('../data/pal5_members.txt', format='ascii.commented_header')
dist = 21.7
deltadist = 0.7
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.7]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==3:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/pal5_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/pal5_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_gd1(present, nobs=50, potential='gal'):
""""""
if len(present)==3:
t = Table.read('../data/gd1_members.txt', format='ascii.commented_header')
dist = 0
deltadist = 0.5
np.random.seed(34)
t = t[np.random.randint(0, high=len(t), size=nobs)]
nobs = len(t)
d = np.random.randn(nobs)*deltadist + dist
d += t['l']*0.04836 + 9.86
obs = np.array([t['ra'], t['dec'], d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.repeat( np.array([2e-4, 2e-4, 0.5]), nobs ).reshape(3, -1)
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
if len(present)==4:
#t = Table.read('../data/gd1_kinematic.txt', format='ascii.commented_header')
t = Table.read('../data/gd1_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs#[np.array(present)]
observed.obsunit = obsunit
observed.err = err#[np.array(present)]
observed.obserror = obserr
return observed
def load_tri(present, nobs=50, potential='gal'):
""""""
if len(present)==4:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d'], t['vr']])
obsunit = [u.deg, u.deg, u.kpc, u.km/u.s]
err = np.array([t['err_ra'], t['err_dec'], t['err_d'], t['err_vr']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
if len(present)==3:
t = Table.read('../data/tri_allmembers.txt', format='ascii.commented_header')
obs = np.array([t['ra'], t['dec'], t['d']])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([t['err_ra'], t['err_dec'], t['err_d']])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc]
ind = np.all(obs!=MASK, axis=0)
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def load_atlas(present, nobs=50, potential='gal'):
""""""
ra, dec = atlas_track()
n = np.size(ra)
d = np.random.randn(n)*2 + 20
obs = np.array([ra, dec, d])
obsunit = [u.deg, u.deg, u.kpc]
err = np.array([np.ones(n)*0.05, np.ones(n)*0.05, np.ones(n)*2])
obserr = [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s]
observed = Stream(potential=potential)
observed.obs = obs
observed.obsunit = obsunit
observed.err = err
observed.obserror = obserr
return observed
def atlas_track():
""""""
ra0, dec0 = np.radians(77.16), np.radians(46.92 - 90)
# euler rotations
D = np.array([[np.cos(ra0), np.sin(ra0), 0], [-np.sin(ra0), np.cos(ra0), 0], [0, 0, 1]])
C = np.array([[np.cos(dec0), 0, np.sin(dec0)], [0, 1, 0], [-np.sin(dec0), 0, np.cos(dec0)]])
B = np.diag(np.ones(3))
R = np.dot(B, np.dot(C, D))
Rinv = np.linalg.inv(R)
l0 = np.linspace(0, 2*np.pi, 500)
b0 = np.zeros(500)
xeq, yeq, zeq = myutils.eq2car(l0, b0)
eq = np.column_stack((xeq, yeq, zeq))
eq_rot = np.zeros(np.shape(eq))
for i in range(np.size(l0)):
eq_rot[i] = np.dot(Rinv, eq[i])
l0_rot, b0_rot = myutils.car2eq(eq_rot[:, 0], eq_rot[:, 1], eq_rot[:, 2])
ra_s, dec_s = np.degrees(l0_rot), np.degrees(b0_rot)
ind_s = (ra_s>17) & (ra_s<30)
ra_s = ra_s[ind_s]
dec_s = dec_s[ind_s]
return (ra_s, dec_s)
def fancy_name(n):
"""Return nicely formatted stream name"""
names = {-1: 'GD-1', -2: 'Palomar 5', -3: 'Triangulum', -4: 'ATLAS'}
return names[n]
# model parameters
def get_varied_pars(vary):
"""Return indices and steps for a preset of varied parameters, and a label for varied parameters
Parameters:
vary - string setting the parameter combination to be varied, options: 'potential', 'progenitor', 'halo', or a list thereof"""
if type(vary) is not list:
vary = [vary]
Nt = len(vary)
vlabel = '_'.join(vary)
pid = []
dp = []
for v in vary:
o1, o2 = get_varied_bytype(v)
pid += o1
dp += o2
return (pid, dp, vlabel)
def get_varied_bytype(vary):
"""Get varied parameter of a particular type"""
if vary=='potential':
pid = [5,6,8,10,11]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1), 0.4e11*u.Msun]
elif vary=='bary':
pid = [0,1,2,3,4]
# gd1
dp = [1e-1*u.Msun, 0.005*u.kpc, 1e-1*u.Msun, 0.002*u.kpc, 0.002*u.kpc]
## atlas & triangulum
#dp = [0.4e5*u.Msun, 0.0005*u.kpc, 0.5e6*u.Msun, 0.0002*u.kpc, 0.002*u.kpc]
# pal5
dp = [1e-2*u.Msun, 0.000005*u.kpc, 1e-2*u.Msun, 0.000002*u.kpc, 0.00002*u.kpc]
dp = [1e-7*u.Msun, 0.5*u.kpc, 1e-7*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
dp = [1e-2*u.Msun, 0.5*u.kpc, 1e-2*u.Msun, 0.5*u.kpc, 0.5*u.kpc]
elif vary=='halo':
pid = [5,6,8,10]
dp = [20*u.km/u.s, 2*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
dp = [35*u.km/u.s, 2.9*u.kpc, 0.05*u.Unit(1), 0.05*u.Unit(1)]
elif vary=='progenitor':
pid = [26,27,28,29,30,31]
dp = [1*u.deg, 1*u.deg, 0.5*u.kpc, 20*u.km/u.s, 0.3*u.mas/u.yr, 0.3*u.mas/u.yr]
elif vary=='dipole':
pid = [11,12,13]
#dp = [1e-11*u.Unit(1), 1e-11*u.Unit(1), 1e-11*u.Unit(1)]
dp = [0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2, 0.05*u.pc/u.Myr**2]
elif vary=='quad':
pid = [14,15,16,17,18]
dp = [0.5*u.Gyr**-2 for x in range(5)]
elif vary=='octu':
pid = [19,20,21,22,23,24,25]
dp = [0.001*u.Gyr**-2*u.kpc**-1 for x in range(7)]
else:
pid = []
dp = []
return (pid, dp)
def get_parlabel(pid):
"""Return label for a list of parameter ids
Parameter:
pid - list of parameter ids"""
master = ['log $M_b$', '$a_b$', 'log $M_d$', '$a_d$', '$b_d$', '$V_h$', '$R_h$', '$\phi$', '$q_x$', '$q_y$', '$q_z$', '$a_{1,-1}$', '$a_{1,0}$', '$a_{1,1}$', '$a_{2,-2}$', '$a_{2,-1}$', '$a_{2,0}$', '$a_{2,1}$', '$a_{2,2}$', '$a_{3,-3}$', '$a_{3,-2}$', '$a_{3,-1}$', '$a_{3,0}$', '$a_{3,1}$', '$a_{3,2}$', '$a_{3,3}$', '$RA_p$', '$Dec_p$', '$d_p$', '$V_{r_p}$', '$\mu_{\\alpha_p}$', '$\mu_{\delta_p}$', ]
master_units = ['dex', 'kpc', 'dex', 'kpc', 'kpc', 'km/s', 'kpc', 'rad', '', '', '', 'pc/Myr$^2$', 'pc/Myr$^2$', 'pc/Myr$^2$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'Gyr$^{-2}$ kpc$^{-1}$', 'deg', 'deg', 'kpc', 'km/s', 'mas/yr', 'mas/yr', ]
if type(pid) is list:
labels = []
units = []
for i in pid:
labels += [master[i]]
units += [master_units[i]]
else:
labels = master[pid]
units = master_units[pid]
return (labels, units)
def get_steps(Nstep=50, log=False):
"""Return deltax steps in both directions
Paramerets:
Nstep - number of steps in one direction (default: 50)
log - if True, steps are logarithmically spaced (default: False)"""
if log:
step = np.logspace(-10, 1, Nstep)
else:
step = np.linspace(0.1, 10, Nstep)
step = np.concatenate([-step[::-1], step])
return (Nstep, step)
def lmc_position():
""""""
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
x = coord.SkyCoord(ra=ra, dec=dec, distance=d)
xgal = [x.galactocentric.x.si, x.galactocentric.y.si, x.galactocentric.z.si]
print(xgal)
def lmc_properties():
""""""
# penarrubia 2016
mass = 2.5e11*u.Msun
ra = 80.8939*u.deg
dec = -69.7561*u.deg
dm = 18.48
d = 10**(1 + dm/5)*u.pc
c1 = coord.SkyCoord(ra=ra, dec=dec, distance=d)
cgal1 = c1.transform_to(coord.Galactocentric)
xgal = np.array([cgal1.x.to(u.kpc).value, cgal1.y.to(u.kpc).value, cgal1.z.to(u.kpc).value])*u.kpc
return (mass, xgal)
# fit bspline to a stream model
def fit_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit bspline to a stream model and save to file"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
fidsort = np.argsort(stream_fid.obs[0])
ra = np.linspace(np.min(stream_fid.obs[0])*1.05, np.max(stream_fid.obs[0])*0.95, Nobs)
tfid = np.r_[(stream_fid.obs[0][fidsort][0],)*(k+1), ra, (stream_fid.obs[0][fidsort][-1],)*(k+1)]
llabel = 'b-spline fit'
else:
llabel = ''
plt.close()
fig, ax = plt.subplots(2,5,figsize=(20,5), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim-1):
plt.sca(ax[0][i])
plt.plot(stream.obs[0], stream.obs[i+1], 'ko')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]), 'r-', lw=2, label=llabel)
if fiducial:
fits_fid = scipy.interpolate.make_lsq_spline(stream_fid.obs[0][fidsort], stream_fid.obs[i+1][fidsort], tfid, k=k)
plt.plot(stream_fid.obs[0], stream_fid.obs[i+1], 'wo', mec='k', alpha=0.1)
plt.plot(stream_fid.obs[0][fidsort], fits_fid(stream_fid.obs[0][fidsort]), 'b-', lw=2, label='Fiducial')
plt.ylabel(ylabel[i+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[i][0], ylims[i][1])
plt.sca(ax[1][i])
if fiducial:
yref = fits_fid(stream.obs[0])
ycolor = 'b'
else:
yref = fits[i](stream.obs[0])
ycolor = 'r'
plt.axhline(0, color=ycolor, lw=2)
if fiducial: plt.plot(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], 'wo', mec='k', alpha=0.1)
plt.plot(stream.obs[0], stream.obs[i+1] - yref, 'ko')
if fiducial:
fits_diff = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[i+1][isort] - stream_fid.obs[i+1][fidsort], t, k=k)
plt.plot(stream.obs[0][isort], fits_diff(stream.obs[0][isort]), 'r--')
plt.plot(stream.obs[0][isort], fits[i](stream.obs[0][isort]) - yref[isort], 'r-', lw=2, label=llabel)
plt.xlabel(ylabel[0])
plt.ylabel('$\Delta$ {}'.format(ylabel[i+1].split(' ')[0]))
if fiducial:
plt.sca(ax[0][Ndim-2])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
def fitbyt_bspline(n, pparams=pparams_fid, dt=0.2*u.Myr, align=False, save='', graph=False, graphsave='', fiducial=False):
"""Fit each tail individually"""
Ndim = 6
fits = [None]*(Ndim-1)
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
Nobs = 10
k = 3
isort = np.argsort(stream.obs[0])
ra = np.linspace(np.min(stream.obs[0])*1.05, np.max(stream.obs[0])*0.95, Nobs)
t = np.r_[(stream.obs[0][isort][0],)*(k+1), ra, (stream.obs[0][isort][-1],)*(k+1)]
for j in range(Ndim-1):
fits[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][isort], stream.obs[j+1][isort], t, k=k)
if len(save)>0:
np.savez('../data/{:s}'.format(save), fits=fits)
if graph:
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
if fiducial:
stream_fid = stream_model(n, pparams0=pparams_fid, dt=dt, rotmatrix=rotmatrix)
plt.close()
fig, ax = plt.subplots(2,Ndim,figsize=(20,4), sharex=True, gridspec_kw = {'height_ratios':[3, 1]})
for i in range(Ndim):
plt.sca(ax[0][i])
Nhalf = int(0.5*np.size(stream.obs[i]))
plt.plot(stream.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:], 'o')
if fiducial:
plt.plot(stream_fid.obs[i][:Nhalf], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.plot(stream_fid.obs[i][Nhalf:], 'wo', mec='k', mew=0.2, alpha=0.5)
plt.ylabel(ylabel[i])
plt.sca(ax[1][i])
if fiducial:
plt.plot(stream.obs[i][:Nhalf] - stream_fid.obs[i][:Nhalf], 'o')
plt.plot(stream.obs[i][Nhalf:] - stream_fid.obs[i][Nhalf:], 'o')
if fiducial:
plt.sca(ax[0][Ndim-1])
plt.legend(fontsize='small')
plt.tight_layout()
if len(graphsave)>0:
plt.savefig('../plots/{:s}.png'.format(graphsave))
else:
return fig
def get_stream_limits(n, align=False):
"""Return lists with limiting values in different dimensions"""
if n==-1:
xlims = [260, 100]
ylims = [[-20, 70], [5, 15], [-400, 400], [-15,5], [-15, 5]]
elif n==-2:
xlims = [250, 210]
ylims = [[-20, 15], [17, 27], [-80, -20], [-5,0], [-5, 0]]
elif n==-3:
xlims = [27, 17]
ylims = [[10, 50], [34, 36], [-175, -50], [0.45, 1], [0.1, 0.7]]
elif n==-4:
xlims = [35, 10]
ylims = [[-40, -20], [15, 25], [50, 200], [-0.5,0.5], [-1.5, -0.5]]
if align:
ylims[0] = [-5, 5]
xup = [110, 110, 80, 80]
xlims = [xup[np.abs(n)-1], 40]
return (xlims, ylims)
# step sizes for derivatives
def iterate_steps(n):
"""Calculate derivatives for different parameter classes, and plot"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
step_convergence(n, Nstep=10, vary=vary)
choose_step(n, Nstep=10, vary=vary)
def iterate_plotsteps(n):
"""Plot stream models for a variety of model parameters"""
for vary in ['bary', 'halo', 'progenitor']:
print(n, vary)
pid, dp, vlabel = get_varied_pars(vary)
for p in range(len(pid)):
plot_steps(n, p=p, Nstep=5, vary=vary, log=False)
def plot_steps(n, p=0, Nstep=20, log=True, dt=0.2*u.Myr, vary='halo', verbose=False, align=True, observer=mw_observer, vobs=vsun):
"""Plot stream for different values of a potential parameter"""
if align:
rotmatrix = np.load('../data/rotmatrix_{}.npy'.format(n))
else:
rotmatrix = None
pparams0 = pparams_fid
pid, dp, vlabel = get_varied_pars(vary)
plabel, punit = get_parlabel(pid[p])
Nstep, step = get_steps(Nstep=Nstep, log=log)
plt.close()
fig, ax = plt.subplots(5,5,figsize=(20,10), sharex=True, gridspec_kw = {'height_ratios':[3, 1, 1, 1, 1]})
# fiducial model
stream0 = stream_model(n, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix, observer=observer, vobs=vobs)
Nobs = 10
k = 3
isort = np.argsort(stream0.obs[0])
ra = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, Nobs)
t = np.r_[(stream0.obs[0][isort][0],)*(k+1), ra, (stream0.obs[0][isort][-1],)*(k+1)]
fits = [None]*5
for j in range(5):
fits[j] = scipy.interpolate.make_lsq_spline(stream0.obs[0][isort], stream0.obs[j+1][isort], t, k=k)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(n, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
color = mpl.cm.RdBu(i/(2*Nstep-1))
#print(i, dp[p], pparams)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
plt.sca(ax[0][j])
plt.plot(stream.obs[0], stream.obs[j+1], 'o', color=color, ms=2)
plt.sca(ax[1][j])
plt.plot(stream.obs[0], stream.obs[j+1] - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[2][j])
plt.plot(stream.obs[0], fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]), 'o', color=color, ms=2)
plt.sca(ax[3][j])
plt.plot(stream.obs[0], (fits_ex[j](stream.obs[0]) - fits[j](stream.obs[0]))/(s*dp[p]), 'o', color=color, ms=2)
# symmetric derivatives
ra_der = np.linspace(np.min(stream0.obs[0])*1.05, np.max(stream0.obs[0])*0.95, 100)
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx = -dy / np.abs(2*step[i]*dp[p])
plt.sca(ax[4][j])
plt.plot(ra_der, dydx, '-', color=color, lw=2, zorder=Nstep-i)
# labels, limits
xlims, ylims = get_stream_limits(n, align)
ylabel = ['R.A. (deg)', 'Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas/yr)', '$\mu_\delta$ (mas/yr)']
if align:
ylabel[:2] = ['$\\xi$ (deg)', '$\\eta$ (deg)']
for j in range(5):
plt.sca(ax[0][j])
plt.ylabel(ylabel[j+1])
plt.xlim(xlims[0], xlims[1])
plt.ylim(ylims[j][0], ylims[j][1])
plt.sca(ax[1][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[2][j])
plt.ylabel('$\Delta$ {}'.format(ylabel[j+1].split(' ')[0]))
plt.sca(ax[3][j])
plt.ylabel('$\Delta${}/$\Delta${}'.format(ylabel[j+1].split(' ')[0], plabel))
plt.sca(ax[4][j])
plt.xlabel(ylabel[0])
plt.ylabel('$\langle$$\Delta${}/$\Delta${}$\\rangle$'.format(ylabel[j+1].split(' ')[0], plabel))
#plt.suptitle('Varying {}'.format(plabel), fontsize='small')
plt.tight_layout()
plt.savefig('../plots/observable_steps_{:d}_{:s}_p{:d}_Ns{:d}.png'.format(n, vlabel, p, Nstep))
def step_convergence(name='gd1', Nstep=20, log=True, layer=1, dt=0.2*u.Myr, vary='halo', align=True, graph=False, verbose=False, Nobs=10, k=3, ra_der=np.nan, Nra=50):
"""Check deviations in numerical derivatives for consecutive step sizes"""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# fiducial model
pparams0 = pparams_fid
stream0 = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
if np.any(~np.isfinite(ra_der)):
ra_der = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nra)
Nra = np.size(ra_der)
# parameters to vary
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
dpvec = np.array([x.value for x in dp])
Nstep, step = get_steps(Nstep=Nstep, log=log)
dydx_all = np.empty((Np, Nstep, 5, Nra))
dev_der = np.empty((Np, Nstep-2*layer))
step_der = np.empty((Np, Nstep-2*layer))
for p in range(Np):
plabel = get_parlabel(pid[p])
if verbose: print(p, plabel)
# excursions
stream_fits = [[None] * 5 for x in range(2 * Nstep)]
for i, s in enumerate(step[:]):
if verbose: print(i, s)
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# fits
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fits_ex = [None]*5
for j in range(5):
fits_ex[j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
stream_fits[i][j] = fits_ex[j]
# symmetric derivatives
dydx = np.empty((Nstep, 5, Nra))
for i in range(Nstep):
color = mpl.cm.Greys_r(i/Nstep)
for j in range(5):
dy = stream_fits[i][j](ra_der) - stream_fits[-i-1][j](ra_der)
dydx[i][j] = -dy / np.abs(2*step[i]*dp[p])
dydx_all[p] = dydx
# deviations from adjacent steps
step_der[p] = -step[layer:Nstep-layer] * dp[p]
for i in range(layer, Nstep-layer):
dev_der[p][i-layer] = 0
for j in range(5):
for l in range(layer):
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i-l-1][j])**2)
dev_der[p][i-layer] += np.sum((dydx[i][j] - dydx[i+l+1][j])**2)
np.savez('../data/step_convergence_{}_{}_Ns{}_log{}_l{}'.format(name, vlabel, Nstep, log, layer), step=step_der, dev=dev_der, ders=dydx_all, steps_all=np.outer(dpvec,step[Nstep:]))
if graph:
plt.close()
fig, ax = plt.subplots(1,Np,figsize=(4*Np,4))
for p in range(Np):
plt.sca(ax[p])
plt.plot(step_der[p], dev_der[p], 'ko')
#plabel = get_parlabel(pid[p])
#plt.xlabel('$\Delta$ {}'.format(plabel))
plt.ylabel('D')
plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def choose_step(name='gd1', tolerance=2, Nstep=20, log=True, layer=1, vary='halo'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
Np = len(pid)
plabels, units = get_parlabel(pid)
punits = ['({})'.format(x) if len(x) else '' for x in units]
t = np.load('../data/step_convergence_{}_{}_Ns{}_log{}_l{}.npz'.format(name, vlabel, Nstep, log, layer))
dev = t['dev']
step = t['step']
dydx = t['ders']
steps_all = t['steps_all'][:,::-1]
Nra = np.shape(dydx)[-1]
best = np.empty(Np)
# plot setup
da = 4
nrow = 2
ncol = Np
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(da*ncol, da*1.3), squeeze=False, sharex='col', gridspec_kw = {'height_ratios':[1.2, 3]})
for p in range(Np):
# choose step
dmin = np.min(dev[p])
dtol = tolerance * dmin
opt_step = np.min(step[p][dev[p]<dtol])
opt_id = step[p]==opt_step
best[p] = opt_step
## largest step w deviation smaller than 1e-4
#opt_step = np.max(step[p][dev[p]<1e-4])
#opt_id = step[p]==opt_step
#best[p] = opt_step
plt.sca(ax[0][p])
for i in range(5):
for j in range(10):
plt.plot(steps_all[p], np.tanh(dydx[p,:,i,np.int64(j*Nra/10)]), '-', color='{}'.format(i/5), lw=0.5, alpha=0.5)
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.ylim(-1,1)
plt.ylabel('Derivative')
plt.title('{}'.format(plabels[p])+'$_{best}$ = '+'{:2.2g}'.format(opt_step), fontsize='small')
plt.sca(ax[1][p])
plt.plot(step[p], dev[p], 'ko')
plt.axvline(opt_step, ls='-', color='r', lw=2)
plt.plot(step[p][opt_id], dev[p][opt_id], 'ro')
plt.axhline(dtol, ls='-', color='orange', lw=1)
y0, y1 = plt.gca().get_ylim()
plt.axhspan(y0, dtol, color='orange', alpha=0.3, zorder=0)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
plt.xlabel('$\Delta$ {} {}'.format(plabels[p], punits[p]))
plt.ylabel('Derivative deviation')
np.save('../data/optimal_step_{}_{}'.format(name, vlabel), best)
plt.tight_layout(h_pad=0)
plt.savefig('../plots/step_convergence_{}_{}_Ns{}_log{}_l{}.png'.format(name, vlabel, Nstep, log, layer))
def read_optimal_step(name, vary, equal=False):
"""Return optimal steps for a range of parameter types"""
if type(vary) is not list:
vary = [vary]
dp = np.empty(0)
for v in vary:
dp_opt = np.load('../data/optimal_step_{}_{}.npy'.format(name, v))
dp = np.concatenate([dp, dp_opt])
if equal:
dp = np.array([0.05, 0.05, 0.2, 1, 0.01, 0.01, 0.05, 0.1, 0.05, 0.1, 0.1, 10, 1, 0.01, 0.01])
return dp
def visualize_optimal_steps(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, dt=0.2*u.Myr, Nobs=50, k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = mock['xi_range']
else:
rotmatrix = np.eye(3)
xmm = mock['ra_range']
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fiducial = stream_model(name=name, pparams0=pparams0, dt=dt, rotmatrix=rotmatrix)
iexsort = np.argsort(fiducial.obs[0])
raex = np.linspace(np.percentile(fiducial.obs[0], 10), np.percentile(fiducial.obs[0], 90), Nobs)
tex = np.r_[(fiducial.obs[0][iexsort][0],)*(k+1), raex, (fiducial.obs[0][iexsort][-1],)*(k+1)]
fit = scipy.interpolate.make_lsq_spline(fiducial.obs[0][iexsort], fiducial.obs[1][iexsort], tex, k=k)
nrow = 2
ncol = np.int64((Np+1)/nrow)
da = 4
c = ['b', 'b', 'b', 'r', 'r', 'r']
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(ncol*da, nrow*da), squeeze=False)
for p in range(Np):
plt.sca(ax[p%2][int(p/2)])
for i, s in enumerate([-1.1, -1, -0.9, 0.9, 1, 1.1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
fitex = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[1][iexsort], tex, k=k)
plt.plot(raex, fitex(raex) - fit(raex), '-', color=c[i])
plt.xlabel('R.A. (deg)')
plt.ylabel('Dec (deg)')
#print(get_parlabel(p))
plt.title('$\Delta$ {} = {:.2g}'.format(get_parlabel(p)[0], dp[p]), fontsize='medium')
plt.tight_layout()
plt.savefig('../plots/{}_optimal_steps.png'.format(name), dpi=200)
# observing modes
def define_obsmodes():
"""Output a pickled dictionary with typical uncertainties and dimensionality of data for a number of observing modes"""
obsmodes = {}
obsmodes['fiducial'] = {'sig_obs': np.array([0.1, 2, 5, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['binospec'] = {'sig_obs': np.array([0.1, 2, 10, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['hectochelle'] = {'sig_obs': np.array([0.1, 2, 1, 0.1, 0.1]), 'Ndim': [3,4,6]}
obsmodes['desi'] = {'sig_obs': np.array([0.1, 2, 10, np.nan, np.nan]), 'Ndim': [4,]}
obsmodes['gaia'] = {'sig_obs': np.array([0.1, 0.2, 10, 0.2, 0.2]), 'Ndim': [6,]}
obsmodes['exgal'] = {'sig_obs': np.array([0.5, np.nan, 20, np.nan, np.nan]), 'Ndim': [3,]}
pickle.dump(obsmodes, open('../data/observing_modes.info','wb'))
def obsmode_name(mode):
"""Return full name of the observing mode"""
if type(mode) is not list:
mode = [mode]
full_names = {'fiducial': 'Fiducial',
'binospec': 'Binospec',
'hectochelle': 'Hectochelle',
'desi': 'DESI-like',
'gaia': 'Gaia-like',
'exgal': 'Extragalactic'}
keys = full_names.keys()
names = []
for m in mode:
if m in keys:
name = full_names[m]
else:
name = m
names += [name]
return names
# crbs using bspline
def calculate_crb(name='gd1', dt=0.2*u.Myr, vary=['progenitor', 'bary', 'halo'], ra=np.nan, dd=0.5, Nmin=15, verbose=False, align=True, scale=False, errmode='fiducial', k=3):
""""""
mock = pickle.load(open('../data/mock_{}.params'.format(name),'rb'))
if align:
rotmatrix = mock['rotmatrix']
xmm = np.sort(mock['xi_range'])
else:
rotmatrix = np.eye(3)
xmm = np.sort(mock['ra_range'])
# typical uncertainties and data availability
obsmodes = pickle.load(open('../data/observing_modes.info', 'rb'))
if errmode not in obsmodes.keys():
errmode = 'fiducial'
sig_obs = obsmodes[errmode]['sig_obs']
data_dim = obsmodes[errmode]['Ndim']
# mock observations
if np.any(~np.isfinite(ra)):
if (np.int64((xmm[1]-xmm[0])/dd + 1) < Nmin):
dd = (xmm[1]-xmm[0])/Nmin
ra = np.arange(xmm[0], xmm[1]+dd, dd)
#ra = np.linspace(xmm[0]*1.05, xmm[1]*0.95, Nobs)
#else:
Nobs = np.size(ra)
print(name, Nobs)
err = np.tile(sig_obs, Nobs).reshape(Nobs,-1)
# varied parameters
pparams0 = pparams_fid
pid, dp_fid, vlabel = get_varied_pars(vary)
Np = len(pid)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
fits_ex = [[[None]*5 for x in range(2)] for y in range(Np)]
if scale:
dp_unit = unity_scale(dp)
dps = [x*y for x,y in zip(dp, dp_unit)]
# calculate derivatives for all parameters
for p in range(Np):
for i, s in enumerate([-1, 1]):
pparams = [x for x in pparams0]
pparams[pid[p]] = pparams[pid[p]] + s*dp[p]
stream = stream_model(name=name, pparams0=pparams, dt=dt, rotmatrix=rotmatrix)
# bspline fits to stream centerline
iexsort = np.argsort(stream.obs[0])
raex = np.linspace(np.percentile(stream.obs[0], 10), np.percentile(stream.obs[0], 90), Nobs)
tex = np.r_[(stream.obs[0][iexsort][0],)*(k+1), raex, (stream.obs[0][iexsort][-1],)*(k+1)]
for j in range(5):
fits_ex[p][i][j] = scipy.interpolate.make_lsq_spline(stream.obs[0][iexsort], stream.obs[j+1][iexsort], tex, k=k)
# populate matrix of derivatives and calculate CRB
for Ndim in data_dim:
#for Ndim in [6,]:
Ndata = Nobs * (Ndim - 1)
cyd = np.empty(Ndata)
dydx = np.empty((Np, Ndata))
dy2 = np.empty((2, Np, Ndata))
for j in range(1, Ndim):
for p in range(Np):
dy = fits_ex[p][0][j-1](ra) - fits_ex[p][1][j-1](ra)
dy2[0][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][0][j-1](ra)
dy2[1][p][(j-1)*Nobs:j*Nobs] = fits_ex[p][1][j-1](ra)
#positive = np.abs(dy)>0
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dy[positive])), np.max(np.abs(dy)), np.median(np.abs(dy))))
if scale:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dps[p].value)
else:
dydx[p][(j-1)*Nobs:j*Nobs] = -dy / np.abs(2*dp[p].value)
#if verbose: print('{:d},{:d} {:s} min{:.1e} max{:1e} med{:.1e}'.format(j, p, get_parlabel(pid[p])[0], np.min(np.abs(dydx[p][(j-1)*Nobs:j*Nobs][positive])), np.max(np.abs(dydx[p][(j-1)*Nobs:j*Nobs])), np.median(np.abs(dydx[p][(j-1)*Nobs:j*Nobs]))))
#print(j, p, get_parlabel(pid[p])[0], dp[p], np.min(np.abs(dy)), np.max(np.abs(dy)), np.median(dydx[p][(j-1)*Nobs:j*Nobs]))
cyd[(j-1)*Nobs:j*Nobs] = err[:,j-1]**2
np.savez('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dydx=dydx, y=dy2, cyd=cyd, dp=dp_opt)
# data component of the Fisher matrix
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
dxi = np.matmul(dydx, caux)
# component based on prior knowledge of model parameters
pxi = priors(name, vary)
# full Fisher matrix
cxi = dxi + pxi
if verbose:
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
sx = np.sqrt(np.diag(cx))
print('CRB', sx)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
np.savez('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), cxi=cxi, dxi=dxi, pxi=pxi)
def priors(name, vary):
"""Return covariance matrix with prior knowledge about parameters"""
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
cprog = mock['prog_prior']
cbary = np.array([0.1*x.value for x in pparams_fid[:5]])**-2
chalo = np.zeros(4)
cdipole = np.zeros(3)
cquad = np.zeros(5)
coctu = np.zeros(7)
priors = {'progenitor': cprog, 'bary': cbary, 'halo': chalo, 'dipole': cdipole, 'quad': cquad, 'octu': coctu}
cprior = np.empty(0)
for v in vary:
cprior = np.concatenate([cprior, priors[v]])
pxi = np.diag(cprior)
return pxi
def scale2invert(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], verbose=False, align=True, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
#dp = read_optimal_step(name, vary)
d = np.load('../data/crb/components_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
dydx = d['dydx']
cyd = d['cyd']
y = d['y']
dp = d['dp']
dy = (y[1,:,:] - y[0,:,:])
dydx = (y[1,:,:] - y[0,:,:]) / (2*dp[:,np.newaxis])
scaling_par = np.median(np.abs(dydx), axis=1)
dydx = dydx / scaling_par[:,np.newaxis]
dydx_ = np.reshape(dydx, (len(dp), Ndim-1, -1))
scaling_dim = np.median(np.abs(dydx_), axis=(2,0))
dydx_ = dydx_ / scaling_dim[np.newaxis,:,np.newaxis]
cyd_ = np.reshape(cyd, (Ndim-1, -1))
cyd_ = cyd_ / scaling_dim[:,np.newaxis]
cyd = np.reshape(cyd_, (-1))
dydx = np.reshape(dydx_, (len(dp), -1))
mmin = np.min(np.abs(dy), axis=0)
mmax = np.max(np.abs(dy), axis=0)
mmed = np.median(np.abs(dydx), axis=1)
dyn_range = mmax/mmin
#print(dyn_range)
print(np.min(dyn_range), np.max(dyn_range), np.std(dyn_range))
cy = np.diag(cyd)
cyi = np.diag(1. / cyd)
caux = np.matmul(cyi, dydx.T)
cxi = np.matmul(dydx, caux)
print('condition {:e}'.format(np.linalg.cond(cxi)))
cx = np.linalg.inv(cxi)
cx = np.matmul(np.linalg.inv(np.matmul(cx, cxi)), cx) # iteration to improve inverse at large cond numbers
print('standard inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
cx = stable_inverse(cxi, maxiter=30)
print('stable inverse', np.allclose(cxi, cxi.T), np.allclose(cx, cx.T), np.allclose(np.matmul(cx,cxi), np.eye(np.shape(cx)[0])))
def unity_scale(dp):
""""""
dim_scale = 10**np.array([2, 3, 3, 2, 4, 3, 7, 7, 5, 7, 7, 4, 4, 4, 4, 3, 3, 3, 4, 3, 4, 4, 4])
dim_scale = 10**np.array([3, 2, 3, 4, 0, 2, 2, 3, 2, 2, 2, 4, 3, 2, 2, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3])
#dim_scale = 10**np.array([2, 3, 3, 1, 3, 2, 5, 5, 3, 5, 5, 2, 2, 4, 4, 3, 3, 3])
dp_unit = [(dp[x].value*dim_scale[x])**-1 for x in range(len(dp))]
return dp_unit
def test_inversion(name='gd1', Ndim=6, vary=['progenitor', 'bary', 'halo'], align=True, errmode='fiducial'):
""""""
pid, dp, vlabel = get_varied_pars(vary)
d = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = d['cxi']
N = np.shape(cxi)[0]
cx_ = np.linalg.inv(cxi)
cx = stable_inverse(cxi, verbose=True, maxiter=100)
#cx_ii = stable_inverse(cx, verbose=True, maxiter=50)
print('condition {:g}'.format(np.linalg.cond(cxi)))
print('linalg inverse', np.allclose(np.matmul(cx_,cxi), np.eye(N)))
print('stable inverse', np.allclose(np.matmul(cx,cxi), np.eye(N)))
#print(np.matmul(cx,cxi))
#print('inverse inverse', np.allclose(cx_ii, cxi))
def stable_inverse(a, maxiter=20, verbose=False):
"""Invert a matrix with a bad condition number"""
N = np.shape(a)[0]
# guess
q = np.linalg.inv(a)
qa = np.matmul(q,a)
# iterate
for i in range(maxiter):
if verbose: print(i, np.sqrt(np.sum((qa - np.eye(N))**2)), np.allclose(qa, np.eye(N)))
if np.allclose(qa, np.eye(N)):
return q
qai = np.linalg.inv(qa)
q = np.matmul(qai,q)
qa = np.matmul(q,a)
return q
def crb_triangle(n, vary, Ndim=6, align=True, plot='all', fast=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
#print(cx[0][0])
if plot=='halo':
cx = cx[:4, :4]
params = params[:4]
elif plot=='bary':
cx = cx[4:9, 4:9]
params = params[4:9]
elif plot=='progenitor':
cx = cx[9:, 9:]
params = params[9:]
Nvar = len(params)
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.5), lw=2)
plt.gca().add_patch(e)
plt.gca().autoscale_view()
#plt.xlim(-ylim[i],ylim[i])
#plt.ylim(-ylim[j], ylim[j])
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
plt.savefig('../plots/crb_triangle_{:s}_{:d}_{:s}_{:d}_{:s}.pdf'.format(alabel, n, vlabel, Ndim, plot))
def crb_triangle_alldim(name='gd1', vary=['progenitor', 'bary', 'halo'], align=True, plot='all', fast=False, scale=False, errmode='fiducial'):
"""Show correlations in CRB between a chosen set of parameters in a triangle plot"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, Ndim in enumerate([3, 4, 6]):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
#cxi = np.load('../data/crb/bspline_cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npy'.format(errmode, Ndim, name, align, vlabel))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/cxi_{:s}_{:s}_a{:1d}_{:s}_{:s}.pdf'.format(errmode, name, align, vlabel, plot))
def compare_optimal_steps():
""""""
vary = ['progenitor', 'bary', 'halo', 'dipole', 'quad']
vary = ['progenitor', 'bary', 'halo']
for name in ['gd1', 'tri']:
print(name)
print(read_optimal_step(name, vary))
def get_crb(name, Nstep=10, vary=['progenitor', 'bary', 'halo'], first=True):
""""""
if first:
store_progparams(name)
wrap_angles(name, save=True)
progenitor_prior(name)
find_greatcircle(name=name)
endpoints(name)
for v in vary:
step_convergence(name=name, Nstep=Nstep, vary=v)
choose_step(name=name, Nstep=Nstep, vary=v)
calculate_crb(name=name, vary=vary, verbose=True)
crb_triangle_alldim(name=name, vary=vary)
########################
# cartesian coordinates
# accelerations
def acc_kepler(x, p=1*u.Msun):
"""Keplerian acceleration"""
r = np.linalg.norm(x)*u.kpc
a = -G * p * 1e11 * r**-3 * x
return a.to(u.pc*u.Myr**-2)
def acc_bulge(x, p=[pparams_fid[j] for j in range(2)]):
""""""
r = np.linalg.norm(x)*u.kpc
a = -(G*p[0]*x/(r * (r + p[1])**2)).to(u.pc*u.Myr**-2)
return a
def acc_disk(x, p=[pparams_fid[j] for j in range(2,5)]):
""""""
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
a = -(G*p[0]*x * (R**2 + (p[1] + np.sqrt(z**2 + p[2]**2))**2)**-1.5).to(u.pc*u.Myr**-2)
a[2] *= (1 + p[2]/np.sqrt(z**2 + p[2]**2))
return a
def acc_nfw(x, p=[pparams_fid[j] for j in [5,6,8,10]]):
""""""
r = np.linalg.norm(x)*u.kpc
q = np.array([1*u.Unit(1), p[2], p[3]])
a = (p[0]**2 * p[1] * r**-3 * (1/(1+p[1]/r) - np.log(1+r/p[1])) * x * q**-2).to(u.pc*u.Myr**-2)
return a
def acc_dipole(x, p=[pparams_fid[j] for j in range(11,14)]):
"""Acceleration due to outside dipole perturbation"""
pv = [x.value for x in p]
a = np.sqrt(3/(4*np.pi)) * np.array([pv[2], pv[0], pv[1]])*u.pc*u.Myr**-2
return a
def acc_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Acceleration due to outside quadrupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = 0.5*np.sqrt(15/np.pi)
a[0] = x[0]*(f*p[4] - f/np.sqrt(3)*p[2]) + x[1]*f*p[0] + x[2]*f*p[3]
a[1] = x[0]*f*p[0] - x[1]*(f*p[4] + f/np.sqrt(3)*p[2]) + x[2]*f*p[1]
a[2] = x[0]*f*p[3] + x[1]*f*p[1] + x[2]*2*f/np.sqrt(3)*p[2]
return a.to(u.pc*u.Myr**-2)
def acc_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Acceleration due to outside octupole perturbation"""
a = np.zeros(3)*u.pc*u.Myr**-2
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
xu = x.unit
pu = p[0].unit
pvec = np.array([i.value for i in p]) * pu
dmat = np.ones((3,7)) * f * pvec * xu**2
x = np.array([i.value for i in x])
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
a = np.einsum('ij->i', dmat) * dmat.unit
return a.to(u.pc*u.Myr**-2)
# derivatives
def der_kepler(x, p=1*u.Msun):
"""Derivative of Kepler potential parameters wrt cartesian components of the acceleration"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc**-1 * u.Myr**2 * u.Msun
dmat[:,0] = (-r**3/(G*x)).to(u.pc**-1 * u.Myr**2 * u.Msun) * 1e-11
return dmat.value
def pder_kepler(x, p=1*u.Msun):
"""Derivative of cartesian components of the acceleration wrt to Kepler potential parameter"""
r = np.linalg.norm(x)*u.kpc
dmat = np.zeros((3,1)) * u.pc * u.Myr**-2 * u.Msun**-1
dmat[:,0] = (-G*x*r**-3).to(u.pc * u.Myr**-2 * u.Msun**-1) * 1e11
return dmat.value
def pder_nfw(x, pu=[pparams_fid[j] for j in [5,6,8,10]]):
"""Calculate derivatives of cartesian components of the acceleration wrt halo potential parameters"""
p = pu
q = np.array([1, p[2], p[3]])
# physical quantities
r = np.linalg.norm(x)*u.kpc
a = acc_nfw(x, p=pu)
# derivatives
dmat = np.zeros((3, 4))
# Vh
dmat[:,0] = 2*a/p[0]
# Rh
dmat[:,1] = a/p[1] + p[0]**2 * p[1] * r**-3 * (1/(p[1]+p[1]**2/r) - 1/(r*(1+p[1]/r)**2)) * x * q**-2
# qy, qz
for i in [1,2]:
dmat[i,i+1] = (-2*a[i]/q[i]).value
return dmat
def pder_bulge(x, pu=[pparams_fid[j] for j in range(2)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Hernquist bulge potential parameters"""
# coordinates
r = np.linalg.norm(x)*u.kpc
# accelerations
ab = acc_bulge(x, p=pu[:2])
# derivatives
dmat = np.zeros((3, 2))
# Mb
dmat[:,0] = ab/pu[0]
# ab
dmat[:,1] = 2 * ab / (r + pu[1])
return dmat
def pder_disk(x, pu=[pparams_fid[j] for j in range(2,5)]):
"""Calculate derivarives of cartesian components of the acceleration wrt Miyamoto-Nagai disk potential parameters"""
# coordinates
R = np.linalg.norm(x[:2])*u.kpc
z = x[2]
aux = np.sqrt(z**2 + pu[2]**2)
# accelerations
ad = acc_disk(x, p=pu)
# derivatives
dmat = np.zeros((3, 3))
# Md
dmat[:,0] = ad / pu[0]
# ad
dmat[:,1] = 3 * ad * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2)
# bd
dmat[:2,2] = 3 * ad[:2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux
dmat[2,2] = (3 * ad[2] * (pu[1] + aux) / (R**2 + (pu[1] + aux)**2) * pu[2] / aux - G * pu[0] * z * (R**2 + (pu[1] + aux)**2)**-1.5 * z**2 * (pu[2]**2 + z**2)**-1.5).value
return dmat
def der_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of dipole potential parameters wrt (Cartesian) components of the acceleration vector a"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt((4*np.pi)/3)
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def pder_dipole(x, pu=[pparams_fid[j] for j in range(11,14)]):
"""Calculate derivatives of (Cartesian) components of the acceleration vector a wrt dipole potential parameters"""
# shape: 3, Npar
dmat = np.zeros((3,3))
f = np.sqrt(3/(4*np.pi))
dmat[0,2] = f
dmat[1,0] = f
dmat[2,1] = f
return dmat
def der_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of quadrupole potential parameters wrt (Cartesian) components of the acceleration vector a"""
f = 2/np.sqrt(15/np.pi)
s = np.sqrt(3)
x = [1e-3/i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] = np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] = np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] = np.array([0, x[1], 0.5*s*x[2], x[0], 0])
return dmat
def pder_quad(x, p=[pparams_fid[j] for j in range(14,19)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt quadrupole potential parameters"""
f = 0.5*np.sqrt(15/np.pi)
s = 1/np.sqrt(3)
x = [1e-3*i.value for i in x]
dmat = np.ones((3,5)) * f
dmat[0] *= np.array([x[1], 0, -s*x[0], x[2], x[0]])
dmat[1] *= np.array([x[0], x[2], -s*x[1], 0, -x[1]])
dmat[2] *= np.array([0, x[1], 2*s*x[2], x[0], 0])
return dmat
def pder_octu(x, p=[pparams_fid[j] for j in range(19,26)]):
"""Caculate derivatives of (Cartesian) components of the acceleration vector a wrt octupole potential parameters"""
f = np.array([0.25*np.sqrt(35/(2*np.pi)), 0.5*np.sqrt(105/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(7/np.pi), 0.25*np.sqrt(21/(2*np.pi)), 0.25*np.sqrt(105/np.pi), 0.25*np.sqrt(35/(2*np.pi))])
x = [1e-3*i.value for i in x]
dmat = np.ones((3,7)) * f
dmat[0] *= np.array([6*x[0]*x[1], x[1]*x[2], -2*x[0]*x[1], -6*x[0]*x[2], 4*x[2]**2-x[1]**2-3*x[0]**2, 2*x[0]*x[2], 3*x[0]**2-3*x[1]**2])
dmat[1] *= np.array([3*x[0]**2-3*x[1]**2, x[0]*x[2], 4*x[2]**2-x[0]**2-3*x[1]**2, -6*x[1]*x[2], -2*x[0]*x[1], -2*x[1]*x[2], -6*x[0]*x[1]])
dmat[2] *= np.array([0, x[0]*x[1], 8*x[1]*x[2], 6*x[2]**2-3*x[0]**2-3*x[1]**2, 8*x[0]*x[2], x[0]**2-x[1]**2, 0])
return dmat
def crb_ax(n, Ndim=6, vary=['halo', 'bary', 'progenitor'], align=True, fast=False):
"""Calculate CRB inverse matrix for 3D acceleration at position x in a halo potential"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# subset halo parameters
Nhalo = 4
cq = cx[:Nhalo,:Nhalo]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
xi = np.array([-8.3, 0.1, 0.1])*u.kpc
x0, v0 = gd1_coordinates()
#xi = np.array(x0)*u.kpc
d = 50
Nb = 20
x = np.linspace(x0[0]-d, x0[0]+d, Nb)
y = np.linspace(x0[1]-d, x0[1]+d, Nb)
x = np.linspace(-d, d, Nb)
y = np.linspace(-d, d, Nb)
xv, yv = np.meshgrid(x, y)
xf = np.ravel(xv)
yf = np.ravel(yv)
af = np.empty((Nb**2, 3))
plt.close()
fig, ax = plt.subplots(3,3,figsize=(11,10))
dimension = ['x', 'y', 'z']
xlabel = ['y', 'x', 'x']
ylabel = ['z', 'z', 'y']
for j in range(3):
if j==0:
xin = np.array([np.repeat(x0[j], Nb**2), xf, yf]).T
elif j==1:
xin = np.array([xf, np.repeat(x0[j], Nb**2), yf]).T
elif j==2:
xin = np.array([xf, yf, np.repeat(x0[j], Nb**2)]).T
for i in range(Nb**2):
#xi = np.array([xf[i], yf[i], x0[2]])*u.kpc
xi = xin[i]*u.kpc
a = acc_nfw(xi)
dqda = halo_accelerations(xi)
cai = np.matmul(dqda, np.matmul(cqi, dqda.T))
if fast:
ca = np.linalg.inv(cai)
else:
ca = stable_inverse(cai)
a_crb = (np.sqrt(np.diag(ca)) * u.km**2 * u.kpc**-1 * u.s**-2).to(u.pc*u.Myr**-2)
af[i] = np.abs(a_crb/a)
af[i] = a_crb
for i in range(3):
plt.sca(ax[j][i])
im = plt.imshow(af[:,i].reshape(Nb,Nb), extent=[-d, d, -d, d], cmap=mpl.cm.gray) #, norm=mpl.colors.LogNorm(), vmin=1e-2, vmax=0.1)
plt.xlabel(xlabel[j]+' (kpc)')
plt.ylabel(ylabel[j]+' (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", size="4%", pad=0.05)
plt.colorbar(im, cax=cax, orientation='horizontal')
plt.gca().xaxis.set_ticks_position('top')
cax.tick_params(axis='x', labelsize='xx-small')
if j==0:
plt.title('a$_{}$'.format(dimension[i]), y=4)
plt.tight_layout(rect=[0,0,1,0.95])
plt.savefig('../plots/acc_{}_{}_{}.png'.format(n, vlabel, Ndim))
def acc_cart(x, components=['bary', 'halo', 'dipole']):
""""""
acart = np.zeros(3) * u.pc*u.Myr**-2
dict_acc = {'bary': [acc_bulge, acc_disk], 'halo': [acc_nfw], 'dipole': [acc_dipole], 'quad': [acc_quad], 'octu': [acc_octu], 'point': [acc_kepler]}
accelerations = []
for c in components:
accelerations += dict_acc[c]
for acc in accelerations:
a_ = acc(x)
acart += a_
return acart
def acc_rad(x, components=['bary', 'halo', 'dipole']):
"""Return radial acceleration"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
a_cart = acc_cart(x, components=components)
a_rad = np.dot(a_cart, trans)
return a_rad
def ader_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [der_bulge, der_disk], 'halo': [der_nfw], 'dipole': [der_dipole], 'quad': [der_quad], 'point': [der_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_cart(x, components=['bary', 'halo', 'dipole']):
""""""
dacart = np.empty((3,0))
dict_der = {'bary': [pder_bulge, pder_disk], 'halo': [pder_nfw], 'dipole': [pder_dipole], 'quad': [pder_quad], 'octu': [pder_octu], 'point': [pder_kepler]}
derivatives = []
for c in components:
derivatives += dict_der[c]
for ader in derivatives:
da_ = ader(x)
dacart = np.hstack((dacart, da_))
return dacart
def apder_rad(x, components=['bary', 'halo', 'dipole']):
"""Return dar/dx_pot (radial acceleration/potential parameters) evaluated at vector x"""
r = np.linalg.norm(x) * x.unit
theta = np.arccos(x[2].value/r.value)
phi = np.arctan2(x[1].value, x[0].value)
trans = np.array([np.sin(theta)*np.cos(phi), np.sin(theta)*np.sin(phi), np.cos(theta)])
dadq_cart = apder_cart(x, components=components)
dadq_rad = np.einsum('ij,i->j', dadq_cart, trans)
return dadq_rad
def crb_acart(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', align=True, d=20, Nb=50, fast=False, scale=False, relative=True, progenitor=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = 3e-1
vmax = 1e1
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Npoint = [6, 5, 4, 3, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix, 3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i] = dadq
ca = np.matmul(dadq, np.matmul(cq, dadq.T))
a_crb = np.sqrt(np.diag(ca)) * u.pc * u.Myr**-2
if relative:
af[i] = np.abs(a_crb/a)
else:
af[i] = a_crb
#print(xi, a_crb)
# save
np.savez('../data/crb_acart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative), acc=af, x=xin, der=derf)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
label = ['$\Delta$ $a_X$', '$\Delta$ $a_Y$', '$\Delta$ $a_Z$']
for i in range(3):
plt.sca(ax[i])
im = plt.imshow(af[:,i].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=mpl.colors.LogNorm())
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i] + rlabel)
plt.tight_layout()
plt.savefig('../plots/crb_acc_cart{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative))
def crb_acart_cov(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', j=0, align=True, d=20, Nb=30, fast=False, scale=False, relative=True, progenitor=False, batch=False, errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
prog_coords = {-1: gd1_coordinates(), -2: pal5_coordinates(), -3: tri_coordinates(), -4: atlas_coordinates()}
x0, v0 = prog_coords[n]
print(x0)
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Npix = np.size(xv)
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
## check orthogonality:
#for i in range(Npot-1):
#for k in range(i+1, Npot):
#print(i, k)
#print(np.dot(vecs[:,i], vecs[:,k]))
#print(np.dot(vecs[::3,i], vecs[::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]), np.dot(vecs[1::3,i], vecs[1::3,k]))
# save
np.savez('../data/crb_acart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}'.format(alabel, errmode, n, vlabel, component, Ndim, d, Nb, relative, progenitor), x=xin, der=derf, c=ca)
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-2
vmax = 5e0
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
for i in range(3):
plt.sca(ax[i])
#im = plt.imshow(vecs[i::3,j].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax)
im = plt.imshow(vcomb[i::3].reshape(Nb, Nb), origin='lower', extent=[-d, d, -d, d], cmap=mpl.cm.gray, vmin=vmin, vmax=vmax, norm=norm)
if progenitor:
plt.plot(Rp, zp, 'r*', ms=10)
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", size="3%", pad=0.1)
plt.colorbar(im, cax=cax)
plt.ylabel(label[i])
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/crb_acc_cart_cov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative, progenitor))
def a_vecfield(vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], component='all', d=20, Nb=10):
"""Plot acceleration field in R,z plane"""
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
x0 = np.array([4, 4, 0])
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
for i in range(Npix):
xi = xin[i]*u.kpc
acart = acc_cart(xi, components=components)
acart_pix[i] = acart
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
plt.close()
plt.figure()
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1])
plt.tight_layout()
def a_crbcov_vecfield(n, Ndim=6, vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all', j=0, align=True, d=20, Nb=10, fast=False, scale=True, relative=False, progenitor=False, batch=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
if relative:
vmin = 1e-2
vmax = 1
rlabel = ' / a'
else:
vmin = -0.005
vmax = 0.005
#vmin = 1e-2
#vmax = 1e0
rlabel = ' (pc Myr$^{-2}$)'
# read in full inverse CRB for stream modeling
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
if progenitor:
x0, v0 = gd1_coordinates()
else:
x0 = np.array([4, 4, 0])
Rp = np.linalg.norm(x0[:2])
zp = x0[2]
R = np.linspace(-d, d, Nb)
k = x0[1]/x0[0]
x = R/np.sqrt(1+k**2)
y = k * x
z = np.linspace(-d, d, Nb)
xv, zv = np.meshgrid(x, z)
yv, zv = np.meshgrid(y, z)
xin = np.array([np.ravel(xv), np.ravel(yv), np.ravel(zv)]).T
Rin = np.linalg.norm(xin[:,:2], axis=1) * np.sign(xin[:,0])
zin = xin[:,2]
Npix = np.size(xv)
acart_pix = np.empty((Npix, 3))
acyl_pix = np.empty((Npix, 2))
vcomb_pix = np.empty((Npix, 2))
af = np.empty((Npix, 3))
derf = np.empty((Npix*3, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_cart(xi, components=components)
acart_pix[i] = a
dadq = apder_cart(xi, components=components)
derf[i*3:(i+1)*3] = dadq
acyl_pix[:,0] = np.linalg.norm(acart_pix[:,:2], axis=1) * -np.sign(xin[:,0])
acyl_pix[:,1] = acart_pix[:,2]
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix*3
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
vmin = 1e-3
vmax = 1e-1
norm = mpl.colors.LogNorm()
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
vmin = -0.025
vmax = 0.025
norm = None
vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(xin[:,0])
#vcomb_pix[:,0] = np.sqrt(vcomb[0::3]**2 + vcomb[1::3]**2) * -np.sign(vcomb[0::3])
vcomb_pix[:,1] = vcomb[2::3]
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
plt.sca(ax[0])
plt.quiver(Rin, zin, acyl_pix[:,0], acyl_pix[:,1], pivot='middle')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Acceleration {}'.format(component), fontsize='medium')
plt.sca(ax[1])
plt.quiver(Rin, zin, vcomb_pix[:,0], vcomb_pix[:,1], pivot='middle', headwidth=0, headlength=0, headaxislength=0, scale=0.02, scale_units='xy')
plt.xlabel('R (kpc)')
plt.ylabel('Z (kpc)')
plt.title('Eigenvector {}'.format(np.abs(j)), fontsize='medium')
plt.tight_layout()
if batch:
return fig
else:
plt.savefig('../plots/afield_crbcov{:s}_{:s}_{:d}_{:s}_{:s}_{:d}_{:d}_{:d}_{:d}_{:d}.png'.format(alabel, errmode, n, vlabel, component, np.abs(j), Ndim, d, Nb, relative))
def summary(n, mode='scalar', vary=['progenitor', 'bary', 'halo', 'dipole', 'quad'], errmode='fiducial', component='all'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
fn = {'scalar': crb_acart_cov, 'vector': a_crbcov_vecfield}
bins = {'scalar': 30, 'vector': 10}
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
Npars = {'bary': Nbary, 'halo': Nhalo, 'dipole': Ndipole, 'quad': Nquad, 'point': Npoint}
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
Niter = [Npars[x] for x in components]
Niter = sum(Niter) + 1
pp = PdfPages('../plots/acceleration_{}_{}_{}_{}_{}.pdf'.format(n, errmode, vlabel, component, mode))
for i in range(Niter):
print(i, Niter)
fig = fn[mode](-1, progenitor=True, batch=True, errmode=errmode, vary=vary, component=component, j=-i, d=20, Nb=bins[mode])
pp.savefig(fig)
pp.close()
#########
# Summary
def full_names():
""""""
full = {'gd1': 'GD-1', 'atlas': 'ATLAS', 'tri': 'Triangulum', 'ps1a': 'PS1A', 'ps1b': 'PS1B', 'ps1c': 'PS1C', 'ps1d': 'PS1D', 'ps1e': 'PS1E', 'ophiuchus': 'Ophiuchus', 'hermus': 'Hermus', 'kwando': 'Kwando', 'orinoco': 'Orinoco', 'sangarius': 'Sangarius', 'scamander': 'Scamander'}
return full
def full_name(name):
""""""
full = full_names()
return full[name]
def get_done(sort_length=False):
""""""
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'ophiuchus', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
done = ['gd1', 'tri', 'atlas', 'ps1a', 'ps1c', 'ps1e', 'kwando', 'orinoco', 'sangarius', 'hermus', 'ps1d']
# length
if sort_length:
tosort = []
for name in done:
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
tosort += [np.max(mock['xi_range']) - np.min(mock['xi_range'])]
done = [x for _,x in sorted(zip(tosort,done))]
else:
tosort = []
vary = ['progenitor', 'bary', 'halo']
Ndim = 6
errmode = 'fiducial'
align = True
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_vh = myutils.wherein(np.array(pid), np.array([5]))
for name in done:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
tosort += [crb[pid_vh]]
done = [x for _,x in sorted(zip(tosort,done))][::-1]
return done
def store_mocks():
""""""
done = get_done()
for name in done:
stream = stream_model(name)
np.save('../data/streams/mock_observed_{}'.format(name), stream.obs)
def period(name):
"""Return orbital period in units of stepsize and number of complete periods"""
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
a = np.abs(np.fft.rfft(r))
f = np.argmax(a[1:]) + 1
p = np.size(a)/f
return (p, f)
def extract_crbs(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', j=0, align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
tout = Table(names=('name', 'crb'))
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
plt.close()
fig, ax = plt.subplots(Np,1,figsize=(10,15), subplot_kw=dict(projection='mollweide'))
for name in names[:]:
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
crb = np.sqrt(np.diag(cx))
#print([pparams0[pid_comp[i]] for i in range(Np)])
crb_frac = [crb[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)]
print(name, crb_frac)
stream = stream_model(name=name)
for i in range(Np):
plt.sca(ax[i])
color_index = np.array(crb_frac[:])
color_index[color_index>0.2] = 0.2
color_index /= 0.2
color = mpl.cm.viridis(color_index[i])
plt.plot(np.radians(stream.obs[0]), np.radians(stream.obs[1]), 'o', color=color, ms=4)
for i in range(Np):
plt.sca(ax[i])
#plt.xlabel('RA')
plt.ylabel('Dec')
plt.text(0.9, 0.9, '$\Delta$ {}'.format(get_parlabel(pid_comp[i])[0]), fontsize='medium', transform=plt.gca().transAxes, va='bottom', ha='left')
plt.grid()
plt.xlabel('RA')
# add custom colorbar
sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis, norm=plt.Normalize(vmin=0, vmax=20))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
if component=='bary':
cb_pad = 0.1
else:
cb_pad = 0.06
cb = fig.colorbar(sm, ax=ax.ravel().tolist(), pad=cb_pad, aspect=40, ticks=np.arange(0,21,5))
cb.set_label('Cramer $-$ Rao bounds (%)')
#plt.tight_layout()
plt.savefig('../plots/crb_onsky_{}.png'.format(component))
def vhrh_correlation(Ndim=6, vary=['progenitor', 'bary', 'halo'], component='halo', errmode='fiducial', align=True):
""""""
names = get_done()
t = Table.read('../data/crb/ar_orbital_summary.fits')
N = len(names)
p = np.empty(N)
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
i = pid_comp[0]
j = pid_comp[1]
for e, name in enumerate(names):
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
p[e] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j])
plt.close()
plt.figure()
plt.plot(t['rapo'], p, 'ko')
def allstream_2d(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, relative=False):
"""Compare 2D constraints between all streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
# plot setup
ncol = np.int64(np.ceil(np.sqrt(N)))
nrow = np.int64(np.ceil(N/ncol))
w_ = 8
h_ = 1.1 * w_*nrow/ncol
alpha = 1
lw = 2
frac = [0.8, 0.5, 0.2]
# parameter pairs
paramids = [8, 11, 12, 13, 14]
all_comb = list(itertools.combinations(paramids, 2))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
#print(comb)
pp = PdfPages('../plots/allstreams_2d_{}_a{:1d}_{}_r{:1d}.pdf'.format(errmode, align, vlabel, relative))
for c in range(Ncomb):
l, k = comb[c]
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(w_, h_), sharex=True, sharey=True)
for i in range(N):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
for e, Ndim in enumerate([3,4,6]):
color = mpl.cm.bone(frac[e])
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, names[i], align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cx_2d = np.array([[cx[k][k], cx[k][l]], [cx[l][k], cx[l][l]]])
if relative:
pk = pparams_fid[pid[k]].value
pl = pparams_fid[pid[l]].value
fid_2d = np.array([[pk**2, pk*pl], [pk*pl, pl**2]])
cx_2d = cx_2d / fid_2d * 100**2
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw)
plt.gca().add_patch(e)
txt = plt.text(0.9, 0.9, full_name(names[i]), fontsize='small', transform=plt.gca().transAxes, ha='right', va='top')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
if relative:
plt.xlim(-20, 20)
plt.ylim(-20,20)
else:
plt.gca().autoscale_view()
plabels, units = get_parlabel([pid[k],pid[l]])
if relative:
punits = [' (%)' for x in units]
else:
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
for i in range(ncol):
plt.sca(ax[nrow-1][i])
plt.xlabel(params[0])
for i in range(nrow):
plt.sca(ax[i][0])
plt.ylabel(params[1])
for i in range(N, ncol*nrow):
plt.sca(ax[np.int64(i/ncol)][i%ncol])
plt.axis('off')
plt.tight_layout(h_pad=0, w_pad=0)
pp.savefig(fig)
pp.close()
# circular velocity
def pder_vc(x, p=[pparams_fid[j] for j in [0,1,2,3,4,5,6,8,10]], components=['bary', 'halo']):
""""""
N = np.size(x)
# components
if 'bary' in components:
bulge = np.array([G*x*(x+p[1])**-2, -2*G*p[0]*x*(x+p[1])**-3])
aux = p[3] + p[4]
disk = np.array([G*x**2*(x**2 + aux**2)**-1.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5, -3*G*p[2]*x**2*aux*(x**2 + aux**2)**-2.5])
nfw = np.array([2*p[5]*(p[6]/x*np.log(1+x.value/p[6].value) - (1+x.value/p[6].value)**-1), p[5]**2*(np.log(1+x.value/p[6].value)/x - (x+p[6])**-1 - x*(x+p[6])**-2), np.zeros(N), np.zeros(N)])
pder = np.vstack([bulge, disk, nfw])
else:
pder = np.array([2*p[0]*(p[1]/x*np.log(1+x.value/p[1].value) - (1+x.value/p[1].value)**-1), p[0]**2*(np.log(1+x.value/p[1].value)/x - (x+p[1])**-1 - x*(x+p[1])**-2), np.zeros(N), np.zeros(N)])
return pder
def delta_vc_vec(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, fast=False, scale=False, ascale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
#colors = {'gd1': mpl.cm.bone(0), 'atlas': mpl.cm.bone(0.5), 'tri': mpl.cm.bone(0.8)}
plt.close()
fig, ax = plt.subplots(1,2,figsize=(10,5))
for name in names:
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
Npot = np.shape(cq)[0]
if fast:
cqi = np.linalg.inv(cq)
else:
cqi = stable_inverse(cq)
if scale:
dp_opt = read_optimal_step(name, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cqi *= scale_mat
x = np.linspace(0.01, d, Nb)*u.kpc
Npix = np.size(x)
derf = np.transpose(pder_vc(x, components=components))
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Nb
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
if j==0:
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
#label = ['($\Sigma$ Eigval $\\times$ Eigvec$^2$ $a_{}$'.format(x)+')$^{1/2}$' for x in ['X', 'Y', 'Z']]
else:
vcomb = vecs[:,j]*np.sqrt(vals[j])
#label = ['Eig {} $a_{}$'.format(np.abs(j), x) for x in ['X', 'Y', 'Z']]
mcomb = (vcomb*u.km**2*u.s**-2 * x / G).to(u.Msun)
vc_true = vcirc_potential(x, pparams=pparams_fid)
# relate to orbit
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
rcur = r[0]
r0 = r[-1]
print(name, rcur, r0)
e = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
np.savez('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}'.format(errmode, Ndim, name, align, vlabel), dvc=np.sqrt(vcomb), vc=vc_true.value, r=x.value, rperi=rmin, rapo=rmax, rcur=rcur, r0=r0, ecc=e, l=l, p=p, Np=Np)
if ascale:
x = x * rmax**-1
#x = x * rcur**-1
# plot
plt.sca(ax[0])
plt.plot(x, np.sqrt(vcomb), '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, vc_true, 'r-')
plt.sca(ax[1])
plt.plot(x, np.sqrt(vcomb)/vc_true, '-', lw=3, color=colors[name], label=labels[name])
#plt.plot(x, mcomb, '-', lw=3, color=colors[name], label=labels[name])
plt.sca(ax[0])
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ (km s$^{-1}$)')
#plt.ylim(0, 100)
plt.sca(ax[1])
plt.legend(loc=1, frameon=True, handlelength=1, fontsize='small')
if ascale:
plt.xlim(0,5)
plt.xlabel('r/r$_{apo}$')
else:
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $V_c$ / $V_c$')
#plt.ylabel('$\Delta$ $M_{enc}$ ($M_\odot$)')
#plt.ylim(0, 1e11)
plt.tight_layout()
plt.savefig('../plots/vc_r_summary_apo{:d}.pdf'.format(ascale))
def delta_vc_correlations(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='all', j=0, align=True, d=200, Nb=1000, r=False, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
elabel = ''
ylabel = 'min ($\Delta$ $V_c$ / $V_c$)'
if r:
ylabel = 'r(min($\Delta$ $V_c$ / $V_c$)) (kpc)'
elabel = 'r'
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
plt.close()
fig, ax = plt.subplots(2,3,figsize=(15,9))
for name in names:
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
rel_dvc = np.min(d['dvc'] / d['vc'])
if r:
idmin = np.argmin(d['dvc'] / d['vc'])
rel_dvc = d['r'][idmin]
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
plt.sca(ax[0][0])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rapo'], rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('$r_{apo}$ (kpc)')
plt.ylabel(ylabel)
plt.sca(ax[0][1])
#plt.plot(d['rcur']/d['rapo'], rel_dvc, 'o', ms=10, color=colors[name])
if r:
plt.plot(d['rapo'], d['rapo'], 'r.', zorder=0, lw=1.5)
plt.plot(d['rcur'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.plot(d['r0'], rel_dvc, 'ro')
plt.xlabel('$r_{current}$')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
ecc = np.sqrt(1 - (d['rperi']/d['rapo'])**2)
ecc = d['ecc']
plt.plot(ecc, rel_dvc, 'o', ms=10, color=colors[name], label=labels[name])
plt.xlabel('Eccentricity')
plt.ylabel(ylabel)
plt.sca(ax[1][0])
plt.plot(np.median(np.abs(d['l'][:,2])/np.linalg.norm(d['l'], axis=1)), rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('|L_z|/|L|')
plt.ylabel(ylabel)
plt.sca(ax[1][1])
plt.plot(d['Np'], rel_dvc, 'o', ms=10, color=colors[name])
#plt.xlabel('$r_{peri}$ (kpc)')
plt.xlabel('Completed periods')
plt.ylabel(ylabel)
plt.sca(ax[1][2])
plt.plot(dlambda, rel_dvc, 'o', ms=10, color=colors[name])
plt.xlabel('$\Delta$ $\\xi$ (deg)')
plt.ylabel(ylabel)
plt.sca(ax[0][2])
plt.legend(fontsize='small', handlelength=0.1)
plt.tight_layout()
plt.savefig('../plots/delta_vc{}_correlations.pdf'.format(elabel))
def collate_orbit(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True):
"""Store all of the properties on streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
vcmin = np.empty(N)
r_vcmin = np.empty(N)
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
for e, name in enumerate(names[:]):
d = np.load('../data/crb/vcirc_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
idmin = np.argmin(d['dvc'] / d['vc'])
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
tname[e] = name
vcmin[e] = (d['dvc'] / d['vc'])[idmin]
r_vcmin[e] = d['r'][idmin]
if e==0:
Nr = np.size(d['r'])
dvc = np.empty((N, Nr))
vc = np.empty((N, Nr))
r = np.empty((N, Nr))
dvc[e] = d['dvc']
vc[e] = d['dvc'] / d['vc']
r[e] = d['r']
Labs[e] = np.median(np.abs(d['l']), axis=0)
Lmod[e] = np.median(np.linalg.norm(d['l'], axis=1))
lx[e] = np.abs(np.median(d['l'][:,0]/np.linalg.norm(d['l'], axis=1)))
ly[e] = np.abs(np.median(d['l'][:,1]/np.linalg.norm(d['l'], axis=1)))
lz[e] = np.abs(np.median(d['l'][:,2]/np.linalg.norm(d['l'], axis=1)))
period[e] = d['p']
Nperiod[e] = d['Np']
ecc[e] = d['ecc']
rperi[e] = d['rperi']
rapo[e] = d['rapo']
rcur[e] = d['rcur']
length[e] = dlambda
t = Table([tname, vcmin, r_vcmin, dvc, vc, r, Labs, Lmod, lx, ly, lz, period, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'vcmin', 'rmin', 'dvc', 'vc', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur'))
t.pprint()
t.write('../data/crb/vc_orbital_summary.fits', overwrite=True)
# radial acceleration
def ar_r(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39):
"""Calculate precision in radial acceleration as a function of galactocentric radius"""
np.random.seed(seed)
pid, dp_fid, vlabel = get_varied_pars(vary)
components = [c for c in vary if c!='progenitor']
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
tname = np.chararray(N, itemsize=Nmax)
armin = np.empty((N, Nsight))
r_armin = np.empty((N, Nsight))
Labs = np.empty((N,3))
lx = np.empty(N)
ly = np.empty(N)
lz = np.empty(N)
Lmod = np.empty(N)
period_ = np.empty(N)
Nperiod = np.empty(N)
ecc = np.empty(N)
rperi = np.empty(N)
rapo = np.empty(N)
rcur = np.empty(N)
length = np.empty(N)
Npix = 300
r = np.linspace(0.1, 200, Npix)
dar = np.empty((N, Nsight, Npix))
ar = np.empty((N, Nsight, Npix))
rall = np.empty((N, Nsight, Npix))
plt.close()
fig, ax = plt.subplots(1,3, figsize=(15,5))
for e, name in enumerate(names[:]):
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cx = stable_inverse(cxi)
cq = cx[6:,6:]
Npot = np.shape(cq)[0]
# relate to orbit
orbit = stream_orbit(name=name)
ro = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(ro)
rmax = np.max(ro)
rcur_ = ro[0]
r0 = ro[-1]
e_ = (rmax - rmin)/(rmax + rmin)
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
p, Np = period(name)
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for s in range(Nsight):
if Nsight==1:
# single sightline
x0 = mock['x0']
xeq = coord.SkyCoord(ra=x0[0], dec=x0[1], distance=x0[2])
xg = xeq.transform_to(coord.Galactocentric)
rg = np.linalg.norm(np.array([xg.x.value, xg.y.value, xg.z.value]))
theta = np.arccos(xg.z.value/rg)
phi = np.arctan2(xg.y.value, xg.x.value)
else:
u_ = np.random.random(1)
v_ = np.random.random(1)
theta = np.arccos(2*u_ - 1)
phi = 2 * np.pi * v_
xin = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]).T
arad_pix = np.empty((Npix, 1))
af = np.empty(Npix)
derf = np.empty((Npix, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_rad(xi, components=components)
af[i] = a
dadq = apder_rad(xi, components=components)
derf[i] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
# store
idmin = np.argmin(vcomb / np.abs(af))
armin[e][s] = (vcomb / np.abs(af))[idmin]
r_armin[e][s] = r[idmin]
dar[e][s] = vcomb
ar[e][s] = vcomb / np.abs(af)
rall[e][s] = r
dlambda = np.max(mock['xi_range']) - np.min(mock['xi_range'])
tname[e] = name
Labs[e] = np.median(np.abs(l), axis=0)
Lmod[e] = np.median(np.linalg.norm(l, axis=1))
lx[e] = np.abs(np.median(l[:,0]/np.linalg.norm(l, axis=1)))
ly[e] = np.abs(np.median(l[:,1]/np.linalg.norm(l, axis=1)))
lz[e] = np.abs(np.median(l[:,2]/np.linalg.norm(l, axis=1)))
period_[e] = p
Nperiod[e] = Np
ecc[e] = e_
rperi[e] = rmin
rapo[e] = rmax
rcur[e] = rcur_
length[e] = dlambda
t = Table([tname, armin, r_armin, dar, ar, rall, Labs, Lmod, lx, ly, lz, period_, Nperiod, length, ecc, rperi, rapo, rcur], names=('name', 'armin', 'rmin', 'dar', 'ar', 'r', 'Labs', 'Lmod', 'lx', 'ly', 'lz', 'period', 'Nperiod', 'length', 'ecc', 'rperi', 'rapo', 'rcur'))
t.pprint()
t.write('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight), overwrite=True)
plt.tight_layout()
def plot_ar(current=False, vary=['progenitor', 'bary', 'halo'], Nsight=1):
"""Explore constraints on radial acceleration, along the progenitor line"""
pid, dp_fid, vlabel = get_varied_pars(vary)
t = Table.read('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight))
N = len(t)
fapo = t['rapo']/np.max(t['rapo'])
fapo = t['rapo']/100
flen = t['length']/(np.max(t['length']) + 10)
fcolor = fapo
plt.close()
fig, ax = plt.subplots(1, 4, figsize=(20,5))
for i in range(N):
color = mpl.cm.bone(fcolor[i])
lw = flen[i] * 5
plt.sca(ax[0])
plt.plot(t['r'][i][0], t['ar'][i][1], '-', color=color, lw=lw)
plt.xlabel('R (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$')
plt.ylim(0, 3.5)
armin = np.median(t['armin'], axis=1)
armin_err = 0.5 * (np.percentile(t['armin'], 84, axis=1) - np.percentile(t['armin'], 16, axis=1))
rmin = np.median(t['rmin'], axis=1)
rmin_err = 0.5 * (np.percentile(t['rmin'], 84, axis=1) - np.percentile(t['rmin'], 16, axis=1))
plt.sca(ax[1])
plt.scatter(t['length'], armin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['length'], armin, yerr=armin_err, color='k', fmt='none', zorder=0)
plt.xlabel('Length (deg)')
plt.ylabel('min $\Delta$ $a_r$')
plt.ylim(0, 3.5)
plt.sca(ax[2])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-')
#plt.plot(a, 2*a, 'k--')
#plt.plot(a, 3*a, 'k:')
plt.scatter(t['rcur'], rmin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['rcur'], rmin, yerr=rmin_err, color='k', fmt='none', zorder=0)
plt.xlabel('$R_{cur}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
#for i in range(len(t)):
#plt.text(t['rcur'][i], rmin[i]+5, t['name'][i], fontsize='small')
plt.xlim(0,90)
plt.ylim(0,90)
plt.sca(ax[3])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-')
#plt.plot(a, 2*a, 'k--')
#plt.plot(a, 3*a, 'k:')
plt.scatter(t['rapo'], rmin, c=fcolor, cmap='bone', vmin=0, vmax=1)
plt.errorbar(t['rapo'], rmin, yerr=rmin_err, color='k', fmt='none', zorder=0)
plt.xlabel('$R_{apo}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
plt.xlim(0,90)
plt.ylim(0,90)
plt.tight_layout()
plt.savefig('../plots/ar_crb_{}_sight{:d}.pdf'.format(vlabel, Nsight))
# save stream constraints
tout = Table([t['name'], t['rapo'], t['rcur'], t['length'], rmin, rmin_err, armin, armin_err], names=('name', 'rapo', 'rcur', 'length', 'rmin', 'rmin_err', 'armin', 'armin_err'))
tout.write('../data/ar_constraints_{}_sight{}.fits'.format(vlabel, Nsight), overwrite=True)
def plot_all_ar(Nsight=50):
"""Explore constraints on radial acceleration, along the progenitor line"""
alist = [0.2, 0.4, 0.7, 1]
mslist = [11, 9, 7, 5]
lwlist = [8, 6, 4, 2]
fc = [0.8, 0.6, 0.4, 0.2]
vlist = [['progenitor', 'bary', 'halo'], ['progenitor', 'bary', 'halo', 'dipole'], ['progenitor', 'bary', 'halo', 'dipole', 'quad'], ['progenitor', 'bary', 'halo', 'dipole', 'quad', 'octu']]
labels = ['Fiducial Galaxy', '+ dipole', '++ quadrupole', '+++ octupole']
alist = [0.2, 0.55, 1]
#mslist = [11, 8, 5]
mslist = [13, 10, 7]
#lwlist = [8, 5, 2]
lwlist = [9, 6, 3]
fc = [0.8, 0.5, 0.2]
vlist = [['progenitor', 'bary', 'halo'], ['progenitor', 'bary', 'halo', 'dipole', 'quad'], ['progenitor', 'bary', 'halo', 'dipole', 'quad', 'octu']]
labels = ['Fiducial Galaxy', '++ quadrupole', '+++ octupole']
plt.close()
fig, ax = plt.subplots(1, 3, figsize=(13.5,4.5))
for e, vary in enumerate(vlist):
pid, dp_fid, vlabel = get_varied_pars(vary)
t = Table.read('../data/crb/ar_orbital_summary_{}_sight{:d}.fits'.format(vlabel, Nsight))
N = len(t)
color = mpl.cm.viridis(fc[e])
lw = lwlist[e]
ms = mslist[e]
alpha = alist[e]
plt.sca(ax[0])
for i in range(0,5,4):
plt.plot(t['r'][i][0], t['ar'][i][1], '-', color=color, lw=lw, alpha=alpha)
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$')
plt.ylim(0, 3.5)
armin = np.median(t['armin'], axis=1)
armin_err = 0.5 * (np.percentile(t['armin'], 84, axis=1) - np.percentile(t['armin'], 16, axis=1))
rmin = np.median(t['rmin'], axis=1)
rmin_err = 0.5 * (np.percentile(t['rmin'], 84, axis=1) - np.percentile(t['rmin'], 16, axis=1))
# fit exponential
p = np.polyfit(t['length'], np.log(armin), 1)
print(1/p[0], np.exp(p[1]))
poly = np.poly1d(p)
x_ = np.linspace(np.min(t['length']), np.max(t['length']), 100)
y_ = poly(x_)
plt.sca(ax[1])
plt.plot(x_, np.exp(y_), '-', color=color, alpha=alpha, lw=lw, label='')
plt.plot(t['length'], armin, 'o', color=color, ms=ms, alpha=alpha, label=labels[e])
plt.errorbar(t['length'], armin, yerr=armin_err, color=color, fmt='none', zorder=0, alpha=alpha)
#plt.plot(t['length'], np.log(armin), 'o', color=color, ms=ms, alpha=alpha, label=labels[e])
#plt.errorbar(t['length'], np.log(armin), yerr=np.log(armin_err), color=color, fmt='none', zorder=0, alpha=alpha)
if e==len(vlist)-1:
plt.legend(loc=1, fontsize='small', handlelength=0.5, frameon=False)
plt.xlabel('Stream length (deg)')
plt.ylabel('min $\Delta$ $a_r$')
plt.ylim(0, 3.5)
plt.sca(ax[2])
a = np.linspace(0,90,100)
plt.plot(a, a, 'k-', alpha=0.4)
plt.plot(t['rcur'], rmin, 'o', color=color, ms=ms, alpha=alpha)
plt.errorbar(t['rcur'], rmin, yerr=rmin_err, color=color, fmt='none', zorder=0, alpha=alpha)
plt.xlabel('$R_{cur}$ (kpc)')
plt.ylabel('$R_{min}$ (kpc)')
plt.xlim(0,90)
plt.ylim(0,90)
#plt.sca(ax[3])
#a = np.linspace(0,90,100)
#plt.plot(a, a, 'k-')
#plt.plot(t['rapo'], rmin, 'o', color=color, ms=ms, alpha=alpha)
#plt.errorbar(t['rapo'], rmin, yerr=rmin_err, color=color, fmt='none', zorder=0, alpha=alpha)
#plt.xlabel('$R_{apo}$ (kpc)')
#plt.ylabel('$R_{min}$ (kpc)')
#plt.xlim(0,90)
#plt.ylim(0,90)
plt.tight_layout()
plt.savefig('../plots/ar_crb_all_sight{:d}.pdf'.format(Nsight))
plt.savefig('../paper/ar_crb_all.pdf')
def ar_multi(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', align=True, Nsight=1, seed=39, verbose=True):
"""Calculate precision in radial acceleration as a function of galactocentric radius for multiple streams"""
np.random.seed(seed)
pid, dp_fid, vlabel = get_varied_pars(vary)
components = [c for c in vary if c!='progenitor']
Npar = len(pid)
names = get_done()
N = len(names)
Nmax = len(max(names, key=len))
armin = np.empty((N, Nsight))
r_armin = np.empty((N, Nsight))
Npix = 300
r = np.linspace(0.1, 200, Npix)
dar = np.empty((N, Nsight, Npix))
ar = np.empty((N, Nsight, Npix))
rall = np.empty((N, Nsight, Npix))
plt.close()
fig, ax = plt.subplots(1,1, figsize=(8,6))
plt.sca(ax)
for k in range(N):
names_in = [names[x] for x in range(k+1)]
if verbose: print(k, names_in)
cxi_all = np.zeros((Npar, Npar))
for e, name in enumerate(names_in):
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
cxi_all = cxi_all + cxi
cx_all = stable_inverse(cxi_all)
cq = cx_all[6:,6:]
Npot = np.shape(cq)[0]
for s in range(Nsight):
if Nsight==1:
# single sightline
mock = pickle.load(open('../data/mock_{}.params'.format('gd1'), 'rb'))
x0 = mock['x0']
xeq = coord.SkyCoord(ra=x0[0], dec=x0[1], distance=x0[2])
xg = xeq.transform_to(coord.Galactocentric)
rg = np.linalg.norm(np.array([xg.x.value, xg.y.value, xg.z.value]))
theta = np.arccos(xg.z.value/rg)
phi = np.arctan2(xg.y.value, xg.x.value)
else:
u_ = np.random.random(1)
v_ = np.random.random(1)
theta = np.arccos(2*u_ - 1)
phi = 2 * np.pi * v_
xin = np.array([r*np.sin(theta)*np.cos(phi), r*np.sin(theta)*np.sin(phi), r*np.cos(theta)]).T
arad_pix = np.empty((Npix, 1))
af = np.empty(Npix)
derf = np.empty((Npix, Npot))
for i in range(Npix):
xi = xin[i]*u.kpc
a = acc_rad(xi, components=components)
af[i] = a
dadq = apder_rad(xi, components=components)
derf[i] = dadq
ca = np.matmul(derf, np.matmul(cq, derf.T))
Nx = Npot
Nw = Npix
vals, vecs = la.eigh(ca, eigvals=(Nw - Nx - 2, Nw - 1))
vcomb = np.sqrt(np.sum(vecs**2*vals, axis=1))
# store
idmin = np.argmin(vcomb / np.abs(af))
armin[k][s] = (vcomb / np.abs(af))[idmin]
r_armin[k][s] = r[idmin]
dar[k][s] = vcomb
ar[k][s] = vcomb / np.abs(af)
rall[k][s] = r
plt.plot(rall[k][s], ar[k][s]*100, '-', color=mpl.cm.viridis_r(k/12.), lw=2)
t = Table([armin, r_armin, dar, ar, rall], names=('armin', 'rmin', 'dar', 'ar', 'r'))
t.pprint()
t.write('../data/crb/ar_multistream{}_{}_sight{:d}.fits'.format(N, vlabel, Nsight), overwrite=True)
plt.xlabel('r (kpc)')
plt.ylabel('$\Delta$ $a_r$ / $a_r$ (%)')
plt.ylim(0,100)
# add custom colorbar
sm = plt.cm.ScalarMappable(cmap=mpl.cm.viridis_r, norm=plt.Normalize(vmin=1, vmax=12))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes('right', size='4%', pad=0.05)
#cb = fig.colorbar(sm, ax=cax, pad=0.1, aspect=40, ticks=np.arange(1,13,3))
cb = plt.colorbar(sm, cax=cax, ticks=np.arange(1,13,3))
cb.set_label('Number of streams')
plt.tight_layout()
plt.savefig('../plots/ar_multistream{}_{}_sight{:d}.png'.format(N, vlabel, Nsight))
# flattening
def delta_q(q='x', Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', j=0, align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
kq = {'x': 0, 'z': 2}
iq = {'x': 2, 'z': 3}
labelq = {'x': '$_x$', 'z': '$_z$'}
component = 'halo'
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
names = get_done()
labels = full_names()
colors = {x: mpl.cm.bone(e/len(names)) for e, x in enumerate(names)}
plt.close()
fig, ax = plt.subplots(1,3,figsize=(15,5))
for name in names:
#for n in [-1,]:
# read in full inverse CRB for stream modeling
fm = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, name, align, vlabel))
cxi = fm['cxi']
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
crb_all = np.sqrt(np.diag(cx))
crb = [crb_all[pid_crb[i]] for i in range(Np)]
crb_frac = [crb_all[pid_crb[i]]/pparams0[pid_comp[i]].value for i in range(Np)]
delta_q = crb[iq[q]]
## choose the appropriate components:
#Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
#if 'progenitor' not in vary:
#Nprog = 0
#nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
#nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': np.shape(cx)[0], 'point': 1}
#if 'progenitor' not in vary:
#nstart['dipole'] = Npoint
#nend['dipole'] = Npoint + Ndipole
#if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
#components = [component]
#else:
#components = [x for x in vary if x!='progenitor']
#cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
#if ('progenitor' not in vary) & ('bary' not in vary):
#cq = cx
#Npot = np.shape(cq)[0]
#if scale:
#dp_opt = read_optimal_step(n, vary)
#dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
#dp_unit = unity_scale(dp)
#scale_vec = np.array([x.value for x in dp_unit[nstart[component]:nend[component]]])
#scale_mat = np.outer(scale_vec, scale_vec)
#cqi /= scale_mat
#delta_q = np.sqrt(cq[iq[q], iq[q]])
# relate to orbit
orbit = stream_orbit(name=name)
r = np.linalg.norm(orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
e = (rmax - rmin)/(rmax + rmin)
e = rmin/rmax
l = np.cross(orbit['x'].to(u.kpc), orbit['v'].to(u.km/u.s), axisa=0, axisb=0)
ltheta = np.median(l[:,kq[q]]/np.linalg.norm(l, axis=1))
langle = np.degrees(np.arccos(ltheta))
sigltheta = np.std(l[:,kq[q]]/np.linalg.norm(l, axis=1))
plt.sca(ax[0])
plt.plot(e, delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[1])
plt.plot(sigltheta, delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[2])
plt.plot(np.abs(ltheta), delta_q, 'o', color=colors[name], label=labels[name])
plt.sca(ax[0])
plt.legend(frameon=False, handlelength=1, fontsize='small')
plt.xlabel('Eccentricity')
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.xlim(0,1)
#plt.ylim(0, 1e11)
plt.sca(ax[1])
plt.xlabel('$\sigma$ L{}'.format(labelq[q]) + ' (kpc km s$^{-1}$)')
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.sca(ax[2])
plt.xlabel('|L{}| / |L|'.format(labelq[q]))
plt.ylabel('$\Delta$ q{}'.format(labelq[q]))
plt.tight_layout()
plt.savefig('../plots/delta_q{}.pdf'.format(q))
###
# multiple streams
###
def pairs_pdf(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, summary=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
done = get_done()
N = len(done)
pp = PdfPages('../plots/corner_pairs_{:s}{:1d}_a{:1d}_{:s}_{:s}_{:d}.pdf'.format(errmode, Ndim, align, vlabel, component, summary))
fig = None
ax = None
for i in range(N):
di = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[i], align, vlabel))
cxi_i = di['cxi']
for j in range(i+1,N):
dj = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[j], align, vlabel))
cxi_j = dj['cxi']
cxi = cxi_i + cxi_j
cx = stable_inverse(cxi)
cx_i = stable_inverse(cxi_i)
cx_j = stable_inverse(cxi_j)
# select component of the parameter space
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
cq_i = cx_i[nstart[component]:nend[component], nstart[component]:nend[component]]
cq_j = cx_j[nstart[component]:nend[component], nstart[component]:nend[component]]
Nvar = np.shape(cq)[0]
print(done[i], done[j])
print(np.sqrt(np.diag(cq)))
print(np.sqrt(np.diag(cq_i)))
print(np.sqrt(np.diag(cq_j)))
if summary==False:
fig = None
ax = None
# plot ellipses
fig, ax = corner_ellipses(cq, fig=fig, ax=ax)
fig, ax = corner_ellipses(cq_i, alpha=0.5, fig=fig, ax=ax)
fig, ax = corner_ellipses(cq_j, alpha=0.5, fig=fig, ax=ax)
# labels
plt.title('{} & {}'.format(done[i], done[j]))
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
pp.savefig(fig)
else:
fig, ax = corner_ellipses(cq, fig=fig, ax=ax, alpha=0.5)
if summary:
# labels
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
pp.savefig(fig)
pp.close()
def multi_pdf(Nmulti=3, Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True):
"""Create a pdf with each page containing a corner plot with constraints on a given component of the model from multiple streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
Ntot = len(pid)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
pparams0 = pparams_fid
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
pp = PdfPages('../plots/corner_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}.pdf'.format(Nmulti, errmode, Ndim, align, vlabel, component))
fig = None
ax = None
done = get_done()
N = len(done)
if Nmulti>N:
Nmulti = N
t = np.arange(N, dtype=np.int64).tolist()
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
comb_all = np.ones((Ncomb, N)) * np.nan
cx_all = np.empty((Ncomb, Nvar, Nvar))
p_all = np.empty((Ncomb, Nvar))
prel_all = np.empty((Ncomb, Nvar))
for i in range(Ncomb):
print(i, [done[i_] for i_ in comb[i]])
cxi = np.zeros((Ntot, Ntot))
fig = None
ax = None
for j in range(Nmulti):
ind = comb[i][j]
#print('{} '.format(done[ind]), end='')
dj = np.load('../data/crb/cxi_{:s}{:1d}_{:s}_a{:1d}_{:s}.npz'.format(errmode, Ndim, done[ind], align, vlabel))
cxi_ = dj['dxi']
cxi = cxi + cxi_
# select component of the parameter space
cx_ = stable_inverse(cxi_)
cq_ = cx_[nstart[component]:nend[component], nstart[component]:nend[component]]
if Ncomb==1:
np.save('../data/crb/cx_multi1_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}'.format(errmode, Ndim, done[ind], align, vlabel, component), cq_)
print(np.sqrt(np.diag(cq_)))
fig, ax = corner_ellipses(cq_, alpha=0.5, fig=fig, ax=ax)
cx = stable_inverse(cxi + dj['pxi'])
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
print(np.sqrt(np.diag(cq)))
#label = '.'.join([done[comb[i][i_]] for i_ in range(Nmulti)])
#np.save('../data/crb/cx_multi{:d}_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, label, align, vlabel, component), cq)
cx_all[i] = cq
p_all[i] = np.sqrt(np.diag(cq))
prel_all[i] = p_all[i]/pparams_arr
comb_all[i][:Nmulti] = np.array(comb[i])
fig, ax = corner_ellipses(cq, fig=fig, ax=ax)
# labels
title = ' + '.join([done[comb[i][i_]] for i_ in range(Nmulti)])
plt.suptitle(title)
for k in range(Nvar-1):
plt.sca(ax[-1][k])
plt.xlabel(params[k])
plt.sca(ax[k][0])
plt.ylabel(params[k+1])
plt.tight_layout(rect=(0,0,1,0.95))
pp.savefig(fig)
np.savez('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, align, vlabel, component), comb=comb_all, cx=cx_all, p=p_all, p_rel=prel_all)
pp.close()
def collate(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, Nmax=None):
""""""
done = get_done()
N = len(done)
if Nmax==None:
Nmax = N
t = np.arange(N, dtype=np.int64).tolist()
pid, dp_fid, vlabel = get_varied_pars(vary)
Ntot = len(pid)
pparams0 = pparams_fid
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Np = len(pid_comp)
pid_crb = myutils.wherein(np.array(pid), np.array(pid_comp))
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
for i in range(1, Nmax+1):
Nmulti = i
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
comb_all = np.ones((Ncomb, N)) * np.nan
cx_all = np.empty((Ncomb, Nvar, Nvar))
p_all = np.empty((Ncomb, Nvar))
prel_all = np.empty((Ncomb, Nvar))
for j in range(Ncomb):
label = '.'.join([done[comb[j][i_]] for i_ in range(Nmulti)])
cx = np.load('../data/crb/cx_multi{:d}_{:s}{:1d}_{:s}_a{:1d}_{:s}_{:s}.npy'.format(Nmulti, errmode, Ndim, label, align, vlabel, component))
cx_all[j] = cx
p_all[j] = np.sqrt(np.diag(cx))
prel_all[j] = p_all[j]/pparams_arr
comb_all[j][:Nmulti] = np.array(comb[j])
np.savez('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}'.format(Nmulti, errmode, Ndim, align, vlabel, component), comb=comb_all, cx=cx_all, p=p_all, p_rel=prel_all)
def nstream_improvement(Ndim=6, vary=['progenitor', 'bary', 'halo'], errmode='fiducial', component='halo', align=True, relative=False):
"""Show how much parameters improve by including additional streams"""
pid, dp_fid, vlabel = get_varied_pars(vary)
done = get_done()
N = len(done)
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad} #, 'all': np.shape(cx)[0], 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
pid_comp = pid[nstart[component]:nend[component]]
plabels, units = get_parlabel(pid_comp)
if relative:
punits = [' (%)' for x in units]
else:
punits = [' ({})'.format(x) if len(x) else '' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
Nvar = len(pid_comp)
pparams0 = pparams_fid
pparams_comp = [pparams0[x] for x in pid_comp]
pparams_arr = np.array([x.value for x in pparams_comp])
median = np.empty((Nvar, N))
x = np.arange(N) + 1
da = 3
ncol = 2
nrow = np.int64(Nvar/ncol)
w = 4 * da
h = nrow * da
plt.close()
fig, ax = plt.subplots(nrow, ncol, figsize=(w,h), sharex='col')
for i in range(N):
Nmulti = i+1
t = np.arange(N, dtype=np.int64).tolist()
all_comb = list(itertools.combinations(t, Nmulti))
comb = sorted(list(set(all_comb)))
Ncomb = len(comb)
coll = np.load('../data/crb/cx_collate_multi{:d}_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(Nmulti, errmode, Ndim, align, vlabel, component))
comb_all = coll['comb']
cq_all = coll['cx']
p_all = coll['p']
if relative:
p_all = p_all * 100 / pparams_arr
median = np.median(p_all, axis=0)
Ncomb = np.shape(comb_all)[0]
nst = np.ones(Ncomb) * Nmulti
for k in range(Nvar):
plt.sca(ax[k%ncol][np.int64(k/ncol)])
if (i==0) & (k==0):
plt.plot(nst, p_all[:,k], 'o', color='0.8', ms=10, label='Single combination of N streams')
plt.plot(Nmulti, median[k], 'wo', mec='k', mew=2, ms=10, label='Median over different\ncombinations of N streams')
else:
plt.plot(nst, p_all[:,k], 'o', color='0.8', ms=10)
plt.plot(Nmulti, median[k], 'wo', mec='k', mew=2, ms=10)
if Nmulti<=3:
if Nmulti==1:
Nmin = 3
else:
Nmin = 1
ids_min = p_all[:,k].argsort()[:Nmin]
for j_ in range(Nmin):
best_names = [done[np.int64(i_)] for i_ in comb[ids_min[j_]][:Nmulti]]
print(k, j_, best_names)
label = ', '.join(best_names)
plt.text(Nmulti, p_all[ids_min[j_],k], '{}'.format(label), fontsize='xx-small')
#print(ids_min)
#idmin = np.argmin(p_all[:,k])
#print(k, [done[np.int64(i_)] for i_ in comb[idmin][:Nmulti]])
for k in range(Nvar):
plt.sca(ax[k%ncol][np.int64(k/ncol)])
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
if relative:
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.ylabel(params[k])
if k==0:
plt.legend(frameon=False, fontsize='small', loc=1)
if k%ncol==nrow-1:
plt.xlabel('Number of streams in a combination')
plt.tight_layout()
plt.savefig('../plots/nstream_improvement_{:s}{:1d}_a{:1d}_{:s}_{:s}_{:1d}.pdf'.format(errmode, Ndim, align, vlabel, component, relative))
def corner_ellipses(cx, dax=2, color='k', alpha=1, lw=2, fig=None, ax=None, autoscale=True, correlate=False):
"""Corner plot with ellipses given by an input matrix"""
# assert square matrix
Nvar = np.shape(cx)[0]
if correlate:
Npair = np.int64(Nvar*(Nvar - 1)/2)
pcc = np.empty((3,Npair))
k = 0
if (np.any(fig)==None) | (np.any(ax)==None):
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
if correlate:
pcc[0,k] = i
pcc[1,k] = j
pcc[2,k] = cx[i][j]/np.sqrt(cx[i][i]*cx[j][j])
k += 1
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=color, alpha=alpha, lw=lw)
plt.gca().add_patch(e)
if autoscale:
plt.gca().autoscale_view()
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.tight_layout()
if correlate:
return(fig, ax, pcc)
else:
return (fig, ax)
###
# compare observing modes
###
def comp_errmodes_old(n, errmodes=['binospec', 'fiducial', 'hectochelle'], Ndim=4, vary=['progenitor', 'bary', 'halo'], plot='halo', align=True, fast=False, scale=False):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
if align:
alabel = '_align'
else:
alabel = ''
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
if scale:
dp_unit = unity_scale(dp)
#print(dp_unit)
dp_unit = dp_unit[i0:i1]
pid = pid[i0:i1]
#print(params, dp_unit, Nvar, len(pid), len(dp_unit))
#label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
label = errmodes
plt.close()
dax = 2
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(dax*Nvar, dax*Nvar), sharex='col', sharey='row')
for l, errmode in enumerate(errmodes):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, errmode, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
#print(np.sqrt(np.diag(cx)))
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.bone(0.1+l/4), lw=2, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('../plots/crb_triangle_alldim{:s}_comparison_{:d}_{:s}_{:s}.pdf'.format(alabel, n, vlabel, plot))
def comp_obsmodes(vary=['progenitor', 'bary', 'halo'], align=True, component='halo'):
"""Compare CRBs from different observing modes"""
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Nvar = len(pid_comp)
plabels, units = get_parlabel(pid_comp)
punits = [' (%)' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
plainlabels = ['V_h', 'R_h', 'q_x', 'q_z']
names = get_done()
errmodes = ['fiducial', 'fiducial', 'fiducial', 'desi', 'gaia']
Ndims = [ 3, 4, 6, 4, 6]
Nmode = len(errmodes)
# fiducial
errmode = 'fiducial'
Ndim = 6
coll_fiducial = np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))
#errmodes = ['fiducial', 'gaia', 'desi']
#Ndims = [6,6,4]
labels = {'desi': 'DESI-like', 'gaia': 'Gaia-like', 'fiducial': 'Fiducial'}
cfrac = {'desi': 0.8, 'gaia': 0.6, 'fiducial': 0.2}
cmap = {'fiducial': mpl.cm.bone, 'desi': mpl.cm.pink, 'gaia': mpl.cm.pink}
frac = [0.8, 0.5, 0.2, 0.5, 0.2]
ls_all = ['-', '-', '-', '--', '--']
a = 0.7
da = 3
ncol = 2
nrow = np.int64(Nvar/ncol)
w = 4 * da
h = nrow * da * 1.3
plt.close()
fig, ax = plt.subplots(nrow+2, ncol, figsize=(w, h), sharex=True, gridspec_kw = {'height_ratios':[3, 1.2, 3, 1.2]})
for i in range(Nmode):
errmode = errmodes[i]
Ndim = Ndims[i]
coll = np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))
lw = np.sqrt(Ndims[i]) * 2
ls = ls_all[i]
#color = mpl.cm.bone(cfrac[errmodes[i]])
color = cmap[errmode](frac[i])
for j in range(Nvar):
#plt.sca(ax[j])
plt.sca(ax[j%ncol*2][np.int64(j/ncol)])
if labels[errmode]=='Fiducial':
label = '{} {}D'.format(labels[errmode], Ndims[i])
else:
label = '{} ({}D)'.format(labels[errmode], Ndims[i])
plt.plot(coll['p_rel'][:,j]*100, '-', ls=ls, alpha=a, lw=lw, color=color, label=label)
plt.sca(ax[j%ncol*2+1][np.int64(j/ncol)])
plt.plot(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j], '-', ls=ls, alpha=a, lw=lw, color=color)
#print(errmode, j, np.median(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j]), np.std(coll['p_rel'][:,j]/coll_fiducial['p_rel'][:,j]))
for j in range(Nvar):
plt.sca(ax[j%ncol*2][np.int64(j/ncol)])
plt.ylabel(params[j])
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.sca(ax[j%ncol*2+1][np.int64(j/ncol)])
plt.ylabel('$\\frac{\Delta %s}{\Delta {%s}_{,\,Fid\,6D}}$'%(plainlabels[j], plainlabels[j]), fontsize='medium')
plt.ylim(0.5, 10)
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda y,pos: ('{{:.{:1d}f}}'.format(int(np.maximum(-np.log10(y),0)))).format(y)))
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.sca(ax[nrow][ncol-1])
plt.legend(loc=0, fontsize='x-small', handlelength=0.8, frameon=True)
# stream names
for j in range(ncol):
plt.sca(ax[0][j])
y0, y1 = plt.gca().get_ylim()
fp = 0.8
yp = y0 + fp*(y1-y0)
for e, name in enumerate(names):
txt = plt.text(e, yp, name, ha='center', va='top', rotation=90, fontsize='x-small', color='0.2')
txt.set_bbox(dict(facecolor='w', alpha=0.7, ec='none'))
plt.tight_layout()
plt.savefig('../plots/obsmode_comparison.pdf')
def vel_improvement(vary=['progenitor', 'bary', 'halo'], align=True, component='halo', errmode='fiducial'):
""""""
pid, dp_fid, vlabel = get_varied_pars(vary)
pid_comp, dp_fid2, vlabel2 = get_varied_pars(component)
Nvar = len(pid_comp)
plabels, units = get_parlabel(pid_comp)
punits = [' (%)' for x in units]
params = ['$\Delta$ {}{}'.format(x, y) for x,y in zip(plabels, punits)]
names = get_done()
coll = []
for Ndim in [3,4,6]:
coll += [np.load('../data/crb/cx_collate_multi1_{:s}{:1d}_a{:1d}_{:s}_{:s}.npz'.format(errmode, Ndim, align, vlabel, component))]
rv = coll[0]['p_rel'] / coll[1]['p_rel']
pm = coll[1]['p_rel'] / coll[2]['p_rel']
N = len(names)
prog_rv = np.empty(N)
prog_pm = np.empty(N)
for i in range(N):
mock = pickle.load(open('../data/mock_{}.params'.format(names[i]), 'rb'))
pms = np.array([x.value for x in mock['v0'][1:]])
prog_rv[i] = np.abs(mock['v0'][0].value)
#prog_pm[i] = np.linalg.norm(pms)
prog_pm[i] = max(np.abs(pms))
da = 2
plt.close()
fig, ax = plt.subplots(Nvar, 3, figsize=(da*3, da*Nvar), sharex='col')
for j in range(Nvar):
plt.sca(ax[j][0])
plt.plot(prog_rv, rv[:,j], 'ko')
plt.sca(ax[j][1])
plt.plot(prog_rv/prog_pm, pm[:,j], 'ko')
plt.sca(ax[j][2])
plt.plot(prog_pm, pm[:,j], 'ko')
plt.tight_layout()
###
# Referee's report
###
def mass_age(name='atlas', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), graph=False, graphsave=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
for i in range(3):
mock['x0'][i] += pparams0[26+i]
mock['v0'][i] += pparams0[29+i]
# vary potential parameters
potential = 'octu'
pparams = pparams0[:26]
#print(pparams[0])
pparams[0] = (10**pparams0[0].value)*pparams0[0].unit
pparams[2] = (10**pparams0[2].value)*pparams0[2].unit
#pparams[0] = pparams0[0]*1e15
#pparams[2] = pparams0[2]*1e15
#print(pparams[0])
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
ylabel = ['Dec (deg)', 'd (kpc)', '$V_r$ (km/s)', '$\mu_\\alpha$ (mas yr$^{-1}$)', '$\mu_\delta$ (mas yr$^{-1}$)']
plt.close()
fig, ax = plt.subplots(2, 5, figsize=(20,7), sharex='col', sharey='col', squeeze=False)
for e, f in enumerate(np.arange(0.8,1.21,0.1)[::-1]):
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': f*mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
for i in range(5):
plt.sca(ax[0][i])
plt.gca().invert_xaxis()
#plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=mpl.cm.viridis(e/5), mec='none', ms=4, label='{:.2g}$\\times$10$^3$ M$_\odot$'.format(f*mock['mi'].to(u.Msun).value*1e-3))
if (i==0) & (e==4):
plt.legend(frameon=True, handlelength=0.5, fontsize='small', markerscale=1.5)
if i==2:
plt.title('Age = {:.2g}'.format(mock['age'].to(u.Gyr)), fontsize='medium')
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': f*mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'wangle': mock['wangle'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.generate()
stream.observe(**params['observe'])
for i in range(5):
plt.sca(ax[1][i])
plt.gca().invert_xaxis()
plt.xlabel('R.A. (deg)')
plt.ylabel(ylabel[i])
plt.plot(stream.obs[0], stream.obs[i+1], 'o', color=mpl.cm.viridis(e/5), mec='none', ms=4, label='{:.2g}'.format(f*mock['age'].to(u.Gyr)))
if (i==0) & (e==4):
plt.legend(frameon=True, handlelength=0.5, fontsize='small', markerscale=1.5)
if i==2:
plt.title('Initial mass = {:.2g}$\\times$10$^3$ M$_\odot$'.format(mock['mi'].to(u.Msun).value*1e-3), fontsize='medium')
plt.tight_layout(w_pad=0)
plt.savefig('../paper/age_mass_{}.png'.format(name))
# progenitor's orbit
def prog_orbit(n):
""""""
orbit = stream_orbit(n)
R = np.linalg.norm(orbit['x'][:2,:].to(u.kpc), axis=0)[::-1]
x = orbit['x'][0].to(u.kpc)[::-1]
y = orbit['x'][1].to(u.kpc)[::-1]
z = orbit['x'][2].to(u.kpc)[::-1]
c = np.arange(np.size(z))[::-1]
plt.close()
fig, ax = plt.subplots(1,3,figsize=(15,5))
plt.sca(ax[0])
plt.scatter(x, y, c=c, cmap=mpl.cm.gray)
plt.xlabel('X (kpc)')
plt.ylabel('Y (kpc)')
plt.sca(ax[1])
plt.scatter(x, z, c=c, cmap=mpl.cm.gray)
plt.xlabel('X (kpc)')
plt.ylabel('Z (kpc)')
plt.sca(ax[2])
plt.scatter(y, z, c=c, cmap=mpl.cm.gray)
plt.xlabel('Y (kpc)')
plt.ylabel('Z (kpc)')
plt.tight_layout()
plt.savefig('../plots/orbit_cartesian_{}.png'.format(n))
#plt.scatter(R[::-1], z[::-1], c=c[::-1], cmap=mpl.cm.gray)
#plt.plot(Rp, zp, 'ko', ms=10)
#plt.xlim(0,40)
#plt.ylim(-20,20)
def prog_orbit3d(name, symmetry=False):
""""""
orbit = stream_orbit(name)
R = np.linalg.norm(orbit['x'][:2,:].to(u.kpc), axis=0)[::-1]
x = orbit['x'][0].to(u.kpc)[::-1].value
y = orbit['x'][1].to(u.kpc)[::-1].value
z = orbit['x'][2].to(u.kpc)[::-1].value
c = np.arange(np.size(z))[::-1]
plt.close()
fig = plt.figure(figsize=(9,9))
ax = fig.add_subplot(1,1,1, projection='3d')
if symmetry:
azimuth = {-1: 119, -2: -39, -3: -5, -4: -11}
elevation = {-1: 49, -2: -117, -3: 49, -4: 60}
ax.view_init(azim=azimuth[n], elev=elevation[n])
else:
ax.view_init(azim=-10, elev=30)
ax.set_frame_on(False)
ax.scatter(x, y, z, 'o', depthshade=False, c=c, cmap=mpl.cm.YlOrBr_r)
ax.set_xlabel('X (kpc)')
ax.set_ylabel('Y (kpc)')
ax.set_zlabel('Z (kpc)')
plt.title('{}'.format(name))
plt.tight_layout()
plt.savefig('../plots/orbit_3d_{}_{:d}.png'.format(name, symmetry))
def stream_orbit(name='gd1', pparams0=pparams_fid, dt=0.2*u.Myr, rotmatrix=np.eye(3), diagnostic=False, observer=mw_observer, vobs=vsun, footprint='', obsmode='equatorial'):
"""Create a streakline model of a stream
baryonic component as in kupper+2015: 3.4e10*u.Msun, 0.7*u.kpc, 1e11*u.Msun, 6.5*u.kpc, 0.26*u.kpc"""
# vary progenitor parameters
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
#for i in range(3):
#mock['x0'][i] += pparams0[19+i]
#mock['v0'][i] += pparams0[22+i]
# vary potential parameters
potential = 'quad'
pparams = pparams0[:19]
pparams[0] = pparams0[0]*1e10
pparams[2] = pparams0[2]*1e10
# adjust circular velocity in this halo
vobs['vcirc'] = vcirc_potential(observer['galcen_distance'], pparams=pparams)
# create a model stream with these parameters
params = {'generate': {'x0': mock['x0'], 'v0': mock['v0'], 'progenitor': {'coords': 'equatorial', 'observer': mock['observer'], 'pm_polar': False}, 'potential': potential, 'pparams': pparams, 'minit': mock['mi'], 'mfinal': mock['mf'], 'rcl': 20*u.pc, 'dr': 0., 'dv': 0*u.km/u.s, 'dt': dt, 'age': mock['age'], 'nstars': 400, 'integrator': 'lf'}, 'observe': {'mode': mock['obsmode'], 'nstars':-1, 'sequential':True, 'errors': [2e-4*u.deg, 2e-4*u.deg, 0.5*u.kpc, 5*u.km/u.s, 0.5*u.mas/u.yr, 0.5*u.mas/u.yr], 'present': [0,1,2,3,4,5], 'observer': mock['observer'], 'vobs': mock['vobs'], 'footprint': mock['footprint'], 'rotmatrix': rotmatrix}}
stream = Stream(**params['generate'])
stream.prog_orbit()
if diagnostic:
r = np.linalg.norm(stream.orbit['x'].to(u.kpc), axis=0)
rmin = np.min(r)
rmax = np.max(r)
e = (rmax - rmin)/(rmax + rmin)
print(rmin, rmax, e)
return stream.orbit
def check_rcur():
""""""
done = get_done()[::-1]
N = len(done)
t = Table.read('../data/crb/ar_orbital_summary.fits')
for i, name in enumerate(done):
mock = pickle.load(open('../data/mock_{}.params'.format(name), 'rb'))
c = coord.ICRS(ra=mock['x0'][0], dec=mock['x0'][1], distance=mock['x0'][2])
gal = c.transform_to(coord.Galactocentric)
rcur = np.sqrt(gal.x**2 + gal.y**2 + gal.z**2).to(u.kpc)
print(done[i], rcur, np.array(t[t['name']==name]['rcur']))
# summary of parameter constraints
def relative_crb(vary=['progenitor', 'bary', 'halo'], component='all', Ndim=6, align=True, fast=False, scale=False):
"""Plot crb_param/param for 3 streams"""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': len(pid), 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
plabels, units = get_parlabel(pid)
#params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
params = [x for x in plabels]
params = params[nstart[component]:nend[component]]
Nvar = len(params)
xpos = np.arange(Nvar)
params_fid = np.array([pparams_fid[x].value for x in pid[nstart[component]:nend[component]]])
plt.close()
plt.figure(figsize=(10,6))
for n in [-1,-2,-3]:
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cq /= scale_mat
crb = np.sqrt(np.diag(cq))
crb_rel = crb / params_fid
print(fancy_name(n))
#print(crb)
print(crb_rel)
plt.plot(xpos, crb_rel, 'o', label='{}'.format(fancy_name(n)))
plt.legend(fontsize='small')
plt.ylabel('Relative CRB')
plt.xticks(xpos, params, rotation='horizontal', fontsize='medium')
plt.xlabel('Parameter')
plt.ylim(0, 0.2)
#plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/relative_crb_{:s}_{:s}_{:d}.png'.format(vlabel, component, Ndim))
def relative_crb_sky(vary=['progenitor', 'bary', 'halo'], component='all', Ndim=6, align=True, fast=False, scale=False):
""""""
pid, dp, vlabel = get_varied_pars(vary)
if align:
alabel = '_align'
else:
alabel = ''
# choose the appropriate components:
Nprog, Nbary, Nhalo, Ndipole, Nquad, Npoint = [6, 5, 4, 3, 5, 1]
if 'progenitor' not in vary:
Nprog = 0
nstart = {'bary': Nprog, 'halo': Nprog + Nbary, 'dipole': Nprog + Nbary + Nhalo, 'quad': Nprog + Nbary + Nhalo + Ndipole, 'all': Nprog, 'point': 0}
nend = {'bary': Nprog + Nbary, 'halo': Nprog + Nbary + Nhalo, 'dipole': Nprog + Nbary + Nhalo + Ndipole, 'quad': Nprog + Nbary + Nhalo + Ndipole + Nquad, 'all': len(pid), 'point': 1}
if 'progenitor' not in vary:
nstart['dipole'] = Npoint
nend['dipole'] = Npoint + Ndipole
if component in ['bary', 'halo', 'dipole', 'quad', 'point']:
components = [component]
else:
components = [x for x in vary if x!='progenitor']
plabels, units = get_parlabel(pid)
#params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
params = [x for x in plabels]
params = params[nstart[component]:nend[component]]
Nvar = len(params)
xpos = np.arange(Nvar)
params_fid = np.array([pparams_fid[x].value for x in pid[nstart[component]:nend[component]]])
dd = 5
plt.close()
fig, ax = plt.subplots(Nvar, 2, figsize=(dd, 0.5*dd*Nvar), sharex='col', sharey='col', gridspec_kw = {'width_ratios':[6, 1]})
for n in [-1,-2,-3]:
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cq = cx[nstart[component]:nend[component], nstart[component]:nend[component]]
if scale:
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
scale_vec = np.array([x.value for x in dp[nstart[component]:nend[component]]])
scale_mat = np.outer(scale_vec, scale_vec)
cq /= scale_mat
crb = np.sqrt(np.diag(cq))
crb_rel = crb / params_fid
#print(fancy_name(n))
##print(crb)
#print(crb_rel)
stream = stream_model(n)
for i in range(Nvar):
vmin, vmax = -2, 2
cind = (np.log10(crb_rel[i]) - vmin)/(vmax - vmin)
color = mpl.cm.magma_r(cind)
plt.sca(ax[i])
plt.plot(stream.obs[0], stream.obs[1], 'o', color=color)
for i in range(Nvar):
plt.sca(ax[i])
plt.gca().set_axis_bgcolor(mpl.cm.magma(0))
plt.gca().invert_xaxis()
plt.title(params[i], fontsize='medium')
plt.ylabel('Dec (deg)')
if i==Nvar-1:
plt.xlabel('R.A. (deg)')
#plt.legend(fontsize='small')
#plt.ylabel('Relative CRB')
#plt.xticks(xpos, params, rotation='horizontal', fontsize='medium')
#plt.xlabel('Parameter')
#plt.gca().set_yscale('log')
plt.tight_layout()
plt.savefig('../plots/relative_crb_sky_{:s}_{:s}_{:d}.png'.format(vlabel, component, Ndim))
# toy problem: kepler + dipole
import sklearn.datasets
def create_fmi(n, Ndim=4, niter=20, alabel='_align', vlabel='point_dipole', Nobsdim=6):
""""""
state = n
invertible = False
cnt = 0
for cnt in range(niter):
cxi = sklearn.datasets.make_spd_matrix(Ndim, random_state=state)
cx = stable_inverse(cxi)
invertible = np.allclose(np.matmul(cxi, cx), np.eye(Ndim))
if invertible:
break
else:
state = np.random.get_state()
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n, vlabel, Nobsdim), cxi)
cx[0,1:] = 0
cx[1:,0] = 0
cxi = stable_inverse(cx)
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n+1, vlabel, Nobsdim), cxi)
def basic_fmi(n=0, alabel='_align', vlabel='point_dipole', Nobsdim=6):
""""""
Ndim = 4
cxi = np.diag([1.5, 3, 1, 1])
np.save('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}'.format(alabel, n, vlabel, Nobsdim), cxi)
def crb_toy(n, alabel='_align', Nobsdim=6, vlabel='point_dipole'):
""""""
def talk_crb_triangle(n=-1, vary=['progenitor', 'bary', 'halo'], plot='all', reveal=0, fast=False, scale=False):
"""Produce a triangle plot of 2D Cramer-Rao bounds for all model parameters using a given stream"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
alabel='_align'
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
#label = ['GD-1', 'Pal 5']
label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
#name = columns[int(np.abs(n)-1)]
#labels = ['RA, Dec, d', 'RA, Dec, d,\n$V_r$', 'RA, Dec, d,\n$V_r$, $\mu_\\alpha$, $\mu_\\delta$']
#params0 = ['$V_h$ (km/s)', '$R_h$ (kpc)', '$q_1$', '$q_z$', '$M_{LMC}$', '$X_p$', '$Y_p$', '$Z_p$', '$V_{xp}$', '$V_{yp}$', '$V_{zp}$']
#params = ['$\Delta$ '+x for x in params0]
ylim = [150, 20, 0.5, 0.5, 5e11]
ylim = [20, 10, 0.1, 0.1]
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(8,8), sharex='col', sharey='row')
# plot 2d bounds in a triangle fashion
Ndim = 3
#labels = columns
streams = np.array([-1,-2,-3,-4])
slist = streams[:reveal+1]
#for l, n in enumerate(slist):
for l, Ndim in enumerate([3, 4, 6]):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arccos(v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.PuBu((l+3)/6), lw=3, label=label[l])
plt.gca().add_patch(e)
if l==1:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
#plt.title('Marginalized ')
#plt.tight_layout()
plt.tight_layout(h_pad=0.0, w_pad=0.0)
plt.savefig('../plots/talk2/triangle_{}.png'.format(n))
#plt.savefig('../plots/talk2/triangle_{}.png'.format(reveal))
def talk_stream_comp(n=-1, vary=['progenitor', 'bary', 'halo'], plot='all', reveal=0, fast=False, scale=False):
"""Produce a triangle plot of 2D Cramer-Rao bounds for all model parameters using a given stream"""
pid, dp_fid, vlabel = get_varied_pars(vary)
dp_opt = read_optimal_step(n, vary)
dp = [x*y.unit for x,y in zip(dp_opt, dp_fid)]
plabels, units = get_parlabel(pid)
params = ['$\Delta$' + x + '({})'.format(y) for x,y in zip(plabels, units)]
alabel='_align'
if plot=='halo':
i0 = 11
i1 = 15
elif plot=='bary':
i0 = 6
i1 = 11
elif plot=='progenitor':
i0 = 0
i1 = 6
elif plot=='dipole':
i0 = 15
i1 = len(params)
else:
i0 = 0
i1 = len(params)
Nvar = i1 - i0
params = params[i0:i1]
label = ['GD-1', 'Pal 5', 'Triangulum']
#label = ['RA, Dec, d', 'RA, Dec, d, $V_r$', 'RA, Dec, d, $V_r$, $\mu_\\alpha$, $\mu_\delta$']
#name = columns[int(np.abs(n)-1)]
#labels = ['RA, Dec, d', 'RA, Dec, d,\n$V_r$', 'RA, Dec, d,\n$V_r$, $\mu_\\alpha$, $\mu_\\delta$']
#params0 = ['$V_h$ (km/s)', '$R_h$ (kpc)', '$q_1$', '$q_z$', '$M_{LMC}$', '$X_p$', '$Y_p$', '$Z_p$', '$V_{xp}$', '$V_{yp}$', '$V_{zp}$']
#params = ['$\Delta$ '+x for x in params0]
ylim = [150, 20, 0.5, 0.5, 5e11]
ylim = [20, 10, 0.1, 0.1]
plt.close()
fig, ax = plt.subplots(Nvar-1, Nvar-1, figsize=(8,8), sharex='col', sharey='row')
# plot 2d bounds in a triangle fashion
Ndim = 3
#labels = columns
streams = np.array([-1,-2,-3,-4])
slist = streams[:reveal+1]
for l, n in enumerate(slist):
#for l, Ndim in enumerate([3, 4, 6]):
cxi = np.load('../data/crb/bspline_cxi{:s}_{:d}_{:s}_{:d}.npy'.format(alabel, n, vlabel, Ndim))
if fast:
cx = np.linalg.inv(cxi)
else:
cx = stable_inverse(cxi)
cx = cx[i0:i1,i0:i1]
for i in range(0,Nvar-1):
for j in range(i+1,Nvar):
plt.sca(ax[j-1][i])
if scale:
cx_2d = np.array([[cx[i][i]/dp_unit[i]**2, cx[i][j]/(dp_unit[i]*dp_unit[j])], [cx[j][i]/(dp_unit[j]*dp_unit[i]), cx[j][j]/dp_unit[j]**2]])
else:
cx_2d = np.array([[cx[i][i], cx[i][j]], [cx[j][i], cx[j][j]]])
w, v = np.linalg.eig(cx_2d)
if np.all(np.isreal(v)):
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec=mpl.cm.YlOrBr((l+3)/6), lw=3, label=label[l])
plt.gca().add_patch(e)
if l==0:
plt.gca().autoscale_view()
if j==Nvar-1:
plt.xlabel(params[i])
if i==0:
plt.ylabel(params[j])
# turn off unused axes
for i in range(0,Nvar-1):
for j in range(i+1,Nvar-1):
plt.sca(ax[i][j])
plt.axis('off')
plt.sca(ax[int(Nvar/2-1)][int(Nvar/2-1)])
plt.legend(loc=2, bbox_to_anchor=(1,1))
#plt.title('Marginalized ')
#plt.tight_layout()
plt.tight_layout(h_pad=0.0, w_pad=0.0)
plt.savefig('../plots/talk2/comparison_{}.png'.format(reveal))
def test_ellipse():
""""""
th = np.radians(60)
v = np.array([[np.cos(th),np.sin(th)], [-np.sin(th),np.cos(th)]])
w = np.array([2,1])
plt.close()
plt.figure()
theta = np.degrees(np.arctan2(v[0][1], v[0][0]))
print(theta, np.degrees(th))
e = mpl.patches.Ellipse((0,0), width=w[0]*2, height=w[1]*2, angle=theta, fc='none', ec='k', lw=2)
plt.gca().add_artist(e)
plt.xlim(-5,5)
plt.ylim(-5,5)
def test_ellipse2():
""""""
v1 = np.array([1.5, -0.05])
v2 = np.array([0.01, 0.3])
c = np.outer(v1, v1) + np.outer(v2, v2)
w, v = np.linalg.eig(c)
print(w)
print(v)
plt.close()
plt.figure()
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
print(width/height)
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec='k', lw=2)
plt.gca().add_artist(e)
plt.xlim(-5,5)
plt.ylim(-5,5)
plt.savefig('../plots/test_ellipse.png')
def test_ellipse3():
""""""
v1 = np.array([-28., -8.])
v2 = np.array([6., -21.])
c = np.outer(v1, v1) + np.outer(v2, v2)
w, v = np.linalg.eig(c)
print(w)
print(v)
plt.close()
plt.figure()
theta = np.degrees(np.arctan2(v[1][0], v[0][0]))
width = np.sqrt(w[0])*2
height = np.sqrt(w[1])*2
print(width, height, width/height)
e = mpl.patches.Ellipse((0,0), width=width, height=height, angle=theta, fc='none', ec='k', lw=2)
plt.gca().add_artist(e)
plt.gca().autoscale_view()
plt.xlim(-40,40)
plt.ylim(-40,40)
plt.savefig('../plots/test_ellipse3.png')
|
abonaca/stream_information
|
scripts/stream_info/stream_info.py
|
Python
|
mit
| 191,260
|
[
"Galaxy"
] |
702d493c395eea7adafb124655d8a038a68a71a90980eafd6187546120e5d151
|
# Copyright 2014 Roberto Brian Sarrionandia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import tusers
from google.appengine.ext import ndb
class DeleteCustomStatusHandler(webapp2.RequestHandler):
def get(self):
user = tusers.get_current_user()
#Get the requested tournament
tid = self.request.get('t')
t_key = ndb.Key('Tournament', int(tid))
t = t_key.get()
if (user and user.key in t.owner):
status = self.request.get('s')
if status in t.customRoomStatus:
t.customRoomStatus.remove(status)
t.put()
#Send the user back to the institution list
self.redirect('/custom_room_status?t=' + tid)
else:
self.redirect(tusers.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([
('/delete_status', DeleteCustomStatusHandler)
], debug=True)
|
sarrionandia/tournatrack
|
deletestatus.py
|
Python
|
apache-2.0
| 1,320
|
[
"Brian"
] |
a608e1c13e88ba7e2ea3fd411e3324998eb64b0cfdcd45828f2b1526897f2a33
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Google Inc. All Rights Reserved.
#
# Authors:
# Arkadiusz Socała <as277575@mimuw.edu.pl>
# Michael Cohen <scudette@google.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""A visitor computing layout from a type definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fractions
from layout_expert.c_ast import c_ast
from layout_expert.lib import parsers
from layout_expert.layout import layout as layouts
class LayoutComputingVisitor(object):
"""A visitor computing layout from a type definition."""
def __init__(self, type_manager):
self.type_manager = type_manager
def compute_layout(self, element):
return element.accept(self)
def visit_c_program(self, program):
for element in program.content:
return element.accept(self)
def visit_c_enum(self, enum):
_ = enum
return self.compute_layout(c_ast.CTypeReference('int'))
def visit_c_struct(self, struct):
"""A method visiting a struct definition and returing the layout.
Args:
struct: an object representing a struct definition in AST.
Returns:
An object representing the layout of the struct.
"""
fields = self.type_manager.collect_fields(struct)
packed = self._is_packed(struct.attributes)
bit_alignment = self._get_attributes_alignment(struct.attributes)
bit_offset = 0
for field, type_definition in zip(fields, struct.content):
bit_offset = self._align_field(
bit_offset,
field.layout,
type_definition,
packed,
)
if not packed or self._is_alignment_overriden(type_definition):
bit_alignment = self._lcm(
bit_alignment, field.layout.bit_alignment)
field.bit_offset = bit_offset
bit_offset += field.layout.bit_size
bit_size = self._align(bit_offset, bit_alignment)
return layouts.Layout(
bit_size=bit_size,
bit_alignment=bit_alignment,
fields=fields,
)
def visit_c_union(self, union):
"""A method visiting a union definition and returing the layout.
Args:
union: an object representing a union definition in AST.
Returns:
An object representing the layout of the union.
"""
fields = self.type_manager.collect_fields(union)
packed = self._is_packed(union.attributes)
bit_alignment = self._get_attributes_alignment(union.attributes)
bit_size = 0
for field, type_definition in zip(fields, union.content):
if not packed or self._is_alignment_overriden(type_definition):
bit_alignment = self._lcm(
bit_alignment, field.layout.bit_alignment)
field.bit_offset = 0
bit_size = max(bit_size, field.layout.bit_size)
bit_size = self._align(bit_size, bit_alignment)
return layouts.Layout(
bit_size=bit_size,
bit_alignment=bit_alignment,
fields=fields,
)
def visit_c_array(self, array):
layout = array.type_definition.accept(self)
length = self.type_manager.evaluate(array.length)
array.evaluated_length = length
return layouts.ArrayLayout(
bit_size=length * layout.bit_size,
bit_alignment=layout.bit_alignment,
length=length,
member_layout=layout
)
def visit_c_pointer(self, pointer):
_ = pointer
return layouts.Layout(
bit_size=self._pointer_bit_size(),
bit_alignment=self._pointer_bit_alignment(),
fields=[],
)
def visit_c_pointer_to_function(self, pointer_to_function):
return self.visit_c_pointer(pointer_to_function)
def visit_c_simple_type(self, simple_type):
return layouts.Layout(
bit_size=simple_type.bit_size,
bit_alignment=simple_type.bit_alignment,
fields=[],
)
def visit_c_type_reference(self, type_reference):
# Dereference the referred type.
reference_ast = self.type_manager.get_type_ast(type_reference.name)
# This is a circular reference which means it is not defined.
if reference_ast == type_reference:
raise c_ast.IrreducibleFunction(
"Unable to resolve type name %s. Is it defined?",
type_reference.name)
# Visit it.
return reference_ast.accept(self)
def visit_c_type_definition(self, type_definition):
return type_definition.type_definition.accept(self)
def visit_c_typedef(self, typedef):
layout = typedef.type_definition.accept(self)
for attribute in typedef.attributes:
if parsers.attribute_name_match(attribute.name, 'aligned'):
expression = attribute.parameters[0]
byte_alignment = self.type_manager.evaluate(
expression)
layout.bit_alignment = 8 * byte_alignment
return layout
def _get_results(self, elements):
collected_layouts = []
for element in elements:
element_layouts = element.accept(self)
collected_layouts.extend(element_layouts)
return collected_layouts
def _is_packed(self, attributes):
for attribute in attributes:
if parsers.attribute_name_match(attribute.name, 'packed'):
return True
return False
def _get_attributes_alignment(self, attributes):
bit_alignment = self._base_alignment()
for attribute in attributes:
if parsers.attribute_name_match(attribute.name, 'aligned'):
expression = attribute.parameters[0]
byte_alignment = int(self.type_manager.evaluate(
expression))
bit_alignment = self._lcm(bit_alignment, 8 * byte_alignment)
return bit_alignment
def _align_field(self, bit_offset, layout, type_definition, packed):
bit_alignment = self._get_field_alignment(
layout, type_definition, packed)
aligned = self._align(bit_offset, bit_alignment)
if layout.bit_field and bit_offset + layout.bit_size <= aligned:
return bit_offset
return aligned
def _get_field_alignment(self, layout, type_definition, packed):
if packed and not self._is_alignment_overriden(type_definition):
if layout.bit_field:
return 1
else:
return self._base_alignment()
else:
return layout.bit_alignment
def _align(self, offset, alignment):
# round up offset to the next multiplication of alignment
return alignment * ((offset + alignment - 1) // alignment)
def _is_alignment_overriden(self, type_definition):
if hasattr(type_definition, 'attributes'):
for attribute in type_definition.attributes:
if parsers.attribute_name_match(attribute.name, 'aligned'):
return True
return False
def _base_alignment(self):
return 8
def _pointer_bit_size(self):
return self.type_manager.get_type_ast('long').bit_size
def _pointer_bit_alignment(self):
return self.type_manager.get_type_ast('long').bit_alignment
def _lcm(self, a, b):
return a * b // fractions.gcd(a, b)
def visit_c_void_type(self, _):
raise c_ast.IrreducibleFunction("Unable to layout Void expression.")
|
dsweet04/rekall
|
tools/layout_expert/layout_expert/visitors/layout_computing_visitor.py
|
Python
|
gpl-2.0
| 8,271
|
[
"VisIt"
] |
46b74e47901c35993fef849f5088fc379adb49fd63aaa4c3b81adc606a04637f
|
# -*- coding: utf-8 -*-
"""
=================
Plot multiple EMD
=================
Shows how to compute multiple EMD and Sinkhorn with two differnt
ground metrics and plot their values for diffeent distributions.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
from ot.datasets import get_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
n_target = 50 # nb target distributions
# bin positions
x = np.arange(n, dtype=np.float64)
lst_m = np.linspace(20, 90, n_target)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
B = np.zeros((n, n_target))
for i, m in enumerate(lst_m):
B[:, i] = gauss(n, m=m, s=5)
# loss matrix and normalization
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'euclidean')
M /= M.max()
M2 = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)), 'sqeuclidean')
M2 /= M2.max()
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1)
pl.subplot(2, 1, 1)
pl.plot(x, a, 'b', label='Source distribution')
pl.title('Source distribution')
pl.subplot(2, 1, 2)
pl.plot(x, B, label='Target distributions')
pl.title('Target distributions')
pl.tight_layout()
##############################################################################
# Compute EMD for the different losses
# ------------------------------------
#%% Compute and plot distributions and loss matrix
d_emd = ot.emd2(a, B, M) # direct computation of EMD
d_emd2 = ot.emd2(a, B, M2) # direct computation of EMD with loss M2
pl.figure(2)
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.title('EMD distances')
pl.legend()
##############################################################################
# Compute Sinkhorn for the different losses
# -----------------------------------------
#%%
reg = 1e-2
d_sinkhorn = ot.sinkhorn2(a, B, M, reg)
d_sinkhorn2 = ot.sinkhorn2(a, B, M2, reg)
pl.figure(2)
pl.clf()
pl.plot(d_emd, label='Euclidean EMD')
pl.plot(d_emd2, label='Squared Euclidean EMD')
pl.plot(d_sinkhorn, '+', label='Euclidean Sinkhorn')
pl.plot(d_sinkhorn2, '+', label='Squared Euclidean Sinkhorn')
pl.title('EMD distances')
pl.legend()
pl.show()
|
aje/POT
|
examples/plot_compute_emd.py
|
Python
|
mit
| 2,404
|
[
"Gaussian"
] |
72cac7c12226c0433abdcf1e901fb98efc611f2111315245deef31fd45fc0e77
|
# $Id$
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" Various bits and pieces for calculating Molecular descriptors
"""
from rdkit import RDConfig
from rdkit.ML.Descriptors import Descriptors
from rdkit.Chem import Descriptors as DescriptorsMod
from rdkit.RDLogger import logger
logger = logger()
import re
class MolecularDescriptorCalculator(Descriptors.DescriptorCalculator):
""" used for calculating descriptors for molecules
"""
def __init__(self,simpleList,*args,**kwargs):
""" Constructor
**Arguments**
- simpleList: list of simple descriptors to be calculated
(see below for format)
**Note**
- format of simpleList:
a list of strings which are functions in the rdkit.Chem.Descriptors module
"""
self.simpleList = tuple(simpleList)
self.descriptorNames = tuple(self.simpleList)
self.compoundList = None
self._findVersions()
def _findVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
self.descriptorVersions=[]
for nm in self.simpleList:
vers='N/A'
if hasattr(DescriptorsMod,nm):
fn = getattr(DescriptorsMod,nm)
if hasattr(fn,'version'):
vers = fn.version
self.descriptorVersions.append(vers)
def SaveState(self,fileName):
""" Writes this calculator off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
"""
import cPickle
try:
f = open(fileName,'wb+')
except:
logger.error('cannot open output file %s for writing'%(fileName))
return
cPickle.dump(self,f)
f.close()
def CalcDescriptors(self,mol,*args,**kwargs):
""" calculates all descriptors for a given molecule
**Arguments**
- mol: the molecule to be used
**Returns**
a tuple of all descriptor values
"""
res = [-666]*len(self.simpleList)
for i,nm in enumerate(self.simpleList):
fn = getattr(DescriptorsMod,nm,lambda x:777)
try:
res[i] = fn(mol)
except:
import traceback
traceback.print_exc()
return tuple(res)
def GetDescriptorNames(self):
""" returns a tuple of the names of the descriptors this calculator generates
"""
return self.descriptorNames
def GetDescriptorSummaries(self):
""" returns a tuple of summaries for the descriptors this calculator generates
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod,nm,lambda x:777)
if hasattr(fn,'__doc__') and fn.__doc__:
doc = fn.__doc__.split('\n\n')[0].strip()
doc = re.sub('\ *\n\ *',' ',doc)
else:
doc = 'N/A'
res.append(doc)
return res
def GetDescriptorFuncs(self):
""" returns a tuple of the functions used to generate this calculator's descriptors
"""
res = []
for nm in self.simpleList:
fn = getattr(DescriptorsMod,nm,lambda x:777)
res.append(fn)
return tuple(res)
def GetDescriptorVersions(self):
""" returns a tuple of the versions of the descriptor calculators
"""
return tuple(self.descriptorVersions)
|
rdkit/rdkit-orig
|
rdkit/ML/Descriptors/MoleculeDescriptors.py
|
Python
|
bsd-3-clause
| 3,247
|
[
"RDKit"
] |
15f6b3913d1ca15cc24341923906b0f54b880bd03110c3c278a5b487bf178b09
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow image utilities.
"""
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import py_image
# Helper for tensorflow training
def filts2imgs(filts, h, w):
K = tf.shape(filts)[1]
ch = tf.shape(filts)[3]
filts = tf.reshape(filts, [-1, K, K, h, w])
filts = tf.pad(filts, [[0,0],[1,1],[1,1],[0,0],[0,0]])
filts = tf.transpose(filts, [0, 3, 1, 4, 2])
filts = tf.reshape(filts, [-1, h*(K+2), w*(K+2), 1])
return filts
def store_plot(plots, name, scalar, label=""):
if name not in plots:
plots[name] = []
plots[name].append([label, scalar])
return plots
def gen_plots(plots, g_index):
summaries = []
for name in plots:
plot = plots[name]
# plot.sort(key=lambda x : x[0])
scalars = []
i = 0
for label, scalar in plot:
scalars.append(scalar)
name += '_' + str(i) + '_' + label
i += 1
tensor = tf.reshape(tf.stack(scalars), [len(scalars)])
scalar = tf.cond(g_index < len(scalars), lambda: tensor[g_index], lambda: tensor[0])
summaries.append(tf.summary.scalar(name, scalar))
print 'Generating plot with name', name
return tf.summary.merge(summaries)
def run_summaries(sess, fdict, writers, summaries, g_index, step):
num_writers = len(writers)
for i in range(num_writers):
fdict[g_index] = i
summaries_out, = sess.run([summaries], feed_dict=fdict)
writers[i].add_summary(summaries_out, step)
# Basic
def batch_down2(img):
return (img[:,::2,::2,...]+img[:,1::2,::2,...]+img[:,::2,1::2,...]+img[:,1::2,1::2,...])/4
def batch_down2rgb(img):
return tf.stack([img[:,::2,::2,...],(img[:,1::2,::2,...]+img[:,::2,1::2,...])/2,img[:,1::2,1::2,...]],axis=-1)
def down2(img):
return (img[::2,::2,...]+img[1::2,::2,...]+img[::2,1::2,...]+img[1::2,1::2,...])/4
# Loss
def gradient(imgs):
return tf.stack([.5*(imgs[...,1:,:-1]-imgs[...,:-1,:-1]), .5*(imgs[...,:-1,1:]-imgs[...,:-1,:-1])], axis=-1)
def gradient_loss(guess, truth):
return tf.reduce_mean(tf.abs(gradient(guess)-gradient(truth)))
def basic_img_loss(img, truth):
l2_pixel = tf.reduce_mean(tf.square(img - truth))
l1_grad = gradient_loss(img, truth)
return l2_pixel + l1_grad
# SSIM
def _tf_fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5):
window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1,1,1,1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1,1,1,1],padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1,1,1,1],padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1,1,1,1],padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1,1,1,1],padding='VALID') - mu1_mu2
if cs_map:
value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
# Eval stuff
def ckpt_num(ckpt):
if 'model.ckpt-' not in ckpt:
ckpt = tf.train.latest_checkpoint(ckpt)
if ckpt is not None:
ckpt = ckpt[ckpt.find('model.ckpt')+11:]
ckpt = int(ckpt)
return ckpt
else:
return -1
def print_keys_merge_simple(log_dir):
g = tf.Graph()
with g.as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ckpt_path = log_dir
if 'model.ckpt' not in ckpt_path:
ckpt_path = tf.train.latest_checkpoint(log_dir)
if ckpt_path is not None:
print 'Restoring from',ckpt_path
saver = tf.train.import_meta_graph(ckpt_path + '.meta')
print 'Meta restored'
else:
print 'No checkpoint found in {}'.format(ckpt_path)
return None
var_col = tf.get_collection('inputs')
noisy = var_col[0]
dt = var_col[1]
sig_read = var_col[2]
output_ = tf.get_collection('output')
output = []
for out in output_:
if 'dnet' in out.name:
output.append(out)
filters_ = tf.get_collection('filters')
filters = []
for f in filters_:
filters.append(f)
print 'output keys'
for k in output:
print k
print 'filter keys'
for k in filters:
print k
# def test_merge_simple(log_dir, noisy_in, truth_in, sig_in):
# g = tf.Graph()
# with g.as_default():
#
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
#
# ckpt_path = log_dir
# if 'model.ckpt' not in ckpt_path:
# ckpt_path = tf.train.latest_checkpoint(log_dir)
#
# if ckpt_path is not None:
# print 'Restoring from',ckpt_path
# saver = tf.train.import_meta_graph(ckpt_path + '.meta')
# print 'Meta restored'
# else:
# print 'No checkpoint found in {}'.format(ckpt_path)
# return None
#
# var_col = tf.get_collection('inputs')
# noisy = var_col[0]
# dt = var_col[1]
# sig_read = var_col[2]
# output_ = tf.get_collection('output')
# output = []
# for out in output_:
# if 'dnet' in out.name:
# output.append(out)
# filters_ = tf.get_collection('filters')
# filters = []
# for f in filters_:
# filters.append(f)
#
# saver.restore(sess, ckpt_path)
# print 'Weights restored'
#
# def output2dict(out_tf, out_np):
# ret = {}
# for i in range(len(out_tf)):
# ret[out_tf[i].name] = out_np[i]
# return ret
#
# def dict_combine(dict1, dict2):
# for d in dict2:
# if d not in dict1:
# dict1[d] = []
# dict1[d].append(dict2[d])
# return dict1
#
# if isinstance(noisy_in, list):
# ret_list = [{}, {}]
# for i in range(len(noisy_in)):
# print i,
# fdict = {noisy : noisy_in[i], dt : truth_in[i], sig_read : sig_in[i]}
# output_out, filters_out = sess.run([output, filters], fdict)
# ret_list[0] = dict_combine(ret_list[0], output2dict(output, output_out))
# if filters is not []:
# ret_list[1] = dict_combine(ret_list[1], output2dict(filters, filters_out))
# print 'Done'
#
# else:
# fdict = {noisy : noisy_in, dt : truth_in, sig_read : sig_in}
# output_out, filters_out = sess.run([output, filters], fdict)
# ret_list = output2dict(output, output_out), output2dict(filters, filters_out)
# return ret_list
def test_merge_simple_tt(log_dir, train_tensor, tt_mod=None, ret_filt=False, ret_grad=False):
# First we split up the batch to make sure it's small enough to fit on a GTX 1080
psize = 512
bd = 64
sh = train_tensor.shape
if tt_mod is None:
train_tensor = py_image.tensor2patches(train_tensor, psize, bd)
print 'Traintensor resized from {} to {}'.format(sh, train_tensor.shape)
pixlimit = (psize+2*bd)**2
batchsize = (pixlimit-1) // np.prod(train_tensor.shape[1:3]) + 1
numbatches = (train_tensor.shape[0]-1)//batchsize+1
print 'With traintensor shape {}, using {} batches of length {} each'.format(
train_tensor.shape, numbatches, batchsize)
tt = []
for i in range(numbatches):
tt.append(train_tensor[i*batchsize:(i+1)*batchsize,...])
tt_mod = tt
else:
tt = tt_mod
noisy_in = [t[...,:8] for t in tt]
truth_in = [t[...,8] for t in tt]
sig_in = [t[...,9:] for t in tt]
g = tf.Graph()
with g.as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
ckpt_path = log_dir
if 'model.ckpt' not in ckpt_path:
ckpt_path = tf.train.latest_checkpoint(log_dir)
if ckpt_path is not None:
print 'Restoring from',ckpt_path
saver = tf.train.import_meta_graph(ckpt_path + '.meta')
print 'Meta restored'
saver.restore(sess, ckpt_path)
print 'Weights restored'
else:
print 'No checkpoint found in {}'.format(ckpt_path)
return None
var_col = tf.get_collection('inputs')
noisy = var_col[0]
dt = var_col[1]
sig_read = var_col[2]
output_ = tf.get_collection('output')
output = []
for out in output_:
if 'dnet' in out.name:
output.append(out)
filters_ = tf.get_collection('filters')
filters = []
for f in filters_:
filters.append(f)
if ret_grad:
grad_stuff = []
# vals = tf.get_collection(tf.GraphKeys.LOSSES)
# for v in vals:
# print v
# total_loss = tf.reduce_sum(vals)
true_out = [out for out in output if 'noshow' not in out.name][0]
print true_out.name
total_loss = tf.reduce_mean(tf.square(true_out - dt))
# total_loss = slim.losses.get_total_loss()
loss_grad = tf.gradients(total_loss, noisy)[0]
# lg_rel = tf.abs(loss_grad)
# lg_rel = lg_rel / tf.reduce_mean(lg_rel, axis=-1, keep_dims=True)
# lg_rel = tf.reduce_mean(lg_rel, axis=[1,2])
# lg_mean = tf.abs(loss_grad)
# lg_mean = tf.reduce_mean(lg_mean, axis=[1,2])
#
# grad_stuff += [lg_rel, lg_mean]
#
# lg_rel = tf.square(loss_grad)
# lg_rel = lg_rel / tf.reduce_mean(lg_rel, axis=-1, keep_dims=True)
# lg_rel = tf.reduce_mean(lg_rel, axis=[1,2])
# lg_mean = tf.square(loss_grad)
# lg_mean = tf.reduce_mean(lg_mean, axis=[1,2])
#
# grad_stuff += [lg_rel, lg_mean]
#
# grad_stuff = tf.stack(grad_stuff, axis=1)
def output2dict(out_tf, out_np):
ret = {}
for i in range(len(out_tf)):
ret[out_tf[i].name] = out_np[i]
return ret
def dict_combine(dict1, dict2):
for d in dict2:
if d not in dict1:
dict1[d] = []
dict1[d].append(dict2[d])
return dict1
ret_dict = {}
filt_dict = {}
grad_dict = {}
to_run = {}
to_run['output'] = output
if ret_filt:
to_run['filters'] = filters
if ret_grad:
to_run['grads'] = loss_grad
for i in range(len(noisy_in)):
print i,
fdict = {noisy : noisy_in[i], dt : truth_in[i], sig_read : sig_in[i]}
run_list = sess.run(to_run, fdict)
output_out = run_list['output']
ret_dict = dict_combine(ret_dict, output2dict(output, output_out))
if ret_filt:
filters_out = run_list['filters']
filt_dict = dict_combine(filt_dict, output2dict(filters, filters_out))
if ret_grad:
grad_out = run_list['grads']
grad_dict = dict_combine(grad_dict, {'grad' : grad_out})
ret_dict = {k: np.concatenate(ret_dict[k], axis=0) for k in ret_dict}
ret_dict = {k: py_image.patches2tensor(ret_dict[k], sh, psize, bd) for k in ret_dict}
if ret_filt and filt_dict is not {}:
filt_dict = {k: np.concatenate(filt_dict[k], axis=0) for k in filt_dict}
filt_dict = {k: py_image.patches2tensor(filt_dict[k], sh, psize, bd) for k in filt_dict}
if ret_grad and grad_dict is not {}:
# gg = grad_dict['grad']
# print 'grad stuff'
# for g in gg:
# print g.shape
grad_dict = {k: np.concatenate(grad_dict[k], axis=0) for k in grad_dict}
grad_dict = {k: py_image.patches2tensor(grad_dict[k], sh, psize, bd) for k in grad_dict}
return ret_dict, tt_mod, filt_dict, grad_dict
# Conv stuff
def make_conv2d_vars(in_tensor, W, K, name, stddev=.01):
shape = [K, K, in_tensor.get_shape().as_list()[-1], W]
initial = tf.truncated_normal(shape, stddev=stddev)
filters = tf.Variable(initial, name=name + '_w')
shape = [W]
initial = tf.constant(0.0, shape=shape)
bias = tf.Variable(initial, name=name+'_b')
return filters, bias
# sres
def sres_upshape(x, n):
ndims = len(x.get_shape().as_list())
sh = tf.shape(x)
if ndims==5:
x = tf.reshape(x, [sh[0], sh[1], sh[2], n, n, sh[-1]])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [sh[0], sh[1]*n, sh[2]*n, sh[-1]])
else:
x = tf.reshape(x, [sh[0], sh[1], sh[2], n, n])
x = tf.transpose(x, [0, 1, 3, 2, 4])
x = tf.reshape(x, [sh[0], sh[1]*n, sh[2]*n])
return x
def sres_downshape(x, n):
ndims = len(x.get_shape().as_list())
sh = tf.shape(x)
if ndims==4:
x = tf.reshape(x, [sh[0], sh[1]//n, n, sh[2]//n, n, sh[-1]])
x = tf.transpose(x, [0, 1, 3, 2, 4, 5])
x = tf.reshape(x, [sh[0], sh[1]//n, sh[2]//n, n*n, sh[-1]])
else:
x = tf.reshape(x, [sh[0], sh[1]//n, n, sh[2]//n, n])
x = tf.transpose(x, [0, 1, 3, 2, 4])
x = tf.reshape(x, [sh[0], sh[1]//n, sh[2]//n, n*n])
return x
# optimal convolve
def solve_convolve(noisy, truth, final_K, excl_edges=False):
kpad = final_K//2
ch = noisy.get_shape().as_list()[-1]
ch1 = truth.get_shape().as_list()[-1]
sh = tf.shape(noisy)
h, w = sh[1], sh[2]
img_stack = []
noisy = tf.pad(noisy, [[0,0],[kpad,kpad],[kpad,kpad],[0,0]])
for i in range(final_K):
for j in range(final_K):
img_stack.append(noisy[:, i:h+i, j:w+j, :])
img_stack = tf.stack(img_stack, axis=-2)
is0 = img_stack
if excl_edges:
img_stack = img_stack[:, kpad:-kpad, kpad:-kpad, :]
truth = truth[:, kpad:-kpad, kpad:-kpad]
h = h - 2*kpad
w = w - 2*kpad
A = tf.reshape(img_stack, [tf.shape(img_stack)[0], h*w, final_K**2 * ch])
b = tf.reshape(truth, [tf.shape(truth)[0], h*w, ch1])
x_ = tf.matrix_solve_ls(A, b, fast=False)
x = tf.reshape(x_, [tf.shape(truth)[0], final_K, final_K, ch, ch1])
return x
def convolve(img_stack, filts, final_K, final_W):
initial_W = img_stack.get_shape().as_list()[-1]
fsh = tf.shape(filts)
filts = tf.reshape(filts, [fsh[0], fsh[1], fsh[2], final_K ** 2 * initial_W, final_W])
kpad = final_K//2
imgs = tf.pad(img_stack, [[0,0],[kpad,kpad],[kpad,kpad],[0,0]])
ish = tf.shape(img_stack)
img_stack = []
for i in range(final_K):
for j in range(final_K):
img_stack.append(imgs[:, i:tf.shape(imgs)[1]-2*kpad+i, j:tf.shape(imgs)[2]-2*kpad+j, :])
img_stack = tf.stack(img_stack, axis=-2)
img_stack = tf.reshape(img_stack, [ish[0], ish[1], ish[2], final_K**2 * initial_W, 1])
img_net = tf.reduce_sum(img_stack * filts, axis=-2) # removes the final_K**2*initial_W dimension but keeps final_W
return img_net
def optimal_convolve(noisy, truth, final_K, conv_stack=None):
filts = solve_convolve(noisy, truth, final_K, True)
fsh = tf.shape(filts)
filts_ = tf.expand_dims(tf.expand_dims(filts, axis=1), axis=1)
final_W = truth.get_shape().as_list()[-1]
if conv_stack is None:
conv_stack = noisy
shift1 = convolve(conv_stack, filts_, final_K, final_W)
return shift1, filts
# For separable stuff
def convolve_aniso(img_stack, filts, final_Kh, final_Kw, final_W, layerwise=False):
initial_W = img_stack.get_shape().as_list()[-1]
fsh = tf.shape(filts)
if layerwise:
filts = tf.reshape(filts, [fsh[0], fsh[1], fsh[2], final_Kh * final_Kw, initial_W])
else:
filts = tf.reshape(filts, [fsh[0], fsh[1], fsh[2], final_Kh * final_Kw * initial_W, final_W])
kpadh = final_Kh//2
kpadw = final_Kw//2
imgs = tf.pad(img_stack, [[0,0],[kpadh,kpadh],[kpadw,kpadw],[0,0]])
ish = tf.shape(img_stack)
img_stack = []
for i in range(final_Kh):
for j in range(final_Kw):
img_stack.append(imgs[:, i:tf.shape(imgs)[1]-2*kpadh+i, j:tf.shape(imgs)[2]-2*kpadw+j, :])
img_stack = tf.stack(img_stack, axis=-2)
if layerwise:
img_stack = tf.reshape(img_stack, [ish[0], ish[1], ish[2], final_Kh * final_Kw, initial_W])
else:
img_stack = tf.reshape(img_stack, [ish[0], ish[1], ish[2], final_Kh * final_Kw * initial_W, 1])
img_net = tf.reduce_sum(img_stack * filts, axis=-2) # removes the final_K**2*initial_W dimension but keeps final_W
return img_net
# Helper
def tf_fn_test(tf_fn):
def ret_fn(*args):
g = tf.Graph()
with g.as_default():
tf_args = []
for arg in args:
tf_args.append(tf.placeholder(tf.float32, shape=arg.shape))
output = tf_fn(*tf_args)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
fdict = {tf_arg : np_arg for tf_arg, np_arg in zip(args, tf_args)}
output = sess.run(output, feed_dict=fdict)
return output
return ret_fn
# HDR Plus
def rcwindow(N):
x = tf.linspace(0., N, N+1)[:-1]
rcw = .5 - .5 * tf.cos(2.*np.pi * (x + .5) / N)
rcw = tf.reshape(rcw,(N,1)) * tf.reshape(rcw,(1,N))
return rcw
def roll_tf(x, shift, axis=0):
sh = tf.shape(x)
n = sh[axis]
shift = shift % n
bl0 = tf.concat([sh[:axis], [n-shift], sh[axis+1:]], axis=0)
bl1 = tf.concat([sh[:axis], [shift], sh[axis+1:]], axis=0)
or0 = tf.concat([tf.zeros_like(sh[:axis]), [shift], tf.zeros_like(sh[axis+1:])], axis=0)
or1 = tf.zeros_like(bl0)
x0 = tf.slice(x, or0, bl0)
x1 = tf.slice(x, or1, bl1)
return tf.concat([x0, x1], axis=axis)
def hdrplus_merge(imgs, N, c, sig):
ccast_tf = lambda x : tf.complex(x, tf.zeros_like(x))
# imgs is [batch, h, w, ch]
rcw = tf.expand_dims(rcwindow(N), axis=-1)
imgs = imgs * rcw
imgs = tf.transpose(imgs, [0, 3, 1, 2])
imgs_f = tf.fft2d(ccast_tf(imgs))
imgs_f = tf.transpose(imgs_f, [0, 2, 3, 1])
Dz2 = tf.square(tf.abs(imgs_f[...,0:1] - imgs_f))
Az = Dz2 / (Dz2 + c*sig**2)
filt0 = 1 + tf.expand_dims(tf.reduce_sum(Az[...,1:], axis=-1), axis=-1)
filts = tf.concat([filt0, 1 - Az[...,1:]], axis=-1)
output_f = tf.reduce_mean(imgs_f * ccast_tf(filts), axis=-1)
output_f = tf.real(tf.ifft2d(output_f))
return output_f
def hdrplus_tiled(noisy, N, sig, c=10**2.25):
sh = tf.shape(noisy)[0:3]
buffer = tf.zeros_like(noisy[...,0])
allpics = []
for i in range(2):
for j in range(2):
nrolled = roll_tf(roll_tf(noisy, shift=-N//2*i, axis=1), shift=-N//2*j, axis=2)
hpatches = (tf.transpose(tf.reshape(nrolled, [sh[0], sh[1]//N, N, sh[2]//N, N, -1]), [0,1,3,2,4,5]))
hpatches = tf.reshape(hpatches, [sh[0]*sh[1]*sh[2]//N**2, N, N, -1])
merged = hdrplus_merge(hpatches, N, c, sig)
merged = tf.reshape(merged, [sh[0], sh[1]//N, sh[2]//N, N, N])
merged = (tf.reshape(tf.transpose(merged, [0,1,3,2,4]), sh))
merged = roll_tf(roll_tf(merged, axis=1, shift=N//2*i), axis=2, shift=N//2*j)
buffer += merged
allpics.append(merged)
return buffer
|
google/burst-denoising
|
tf_image.py
|
Python
|
apache-2.0
| 20,387
|
[
"Gaussian"
] |
00e312a76547fbfe49ef0a25c94bccbdd80862330484947c46155be8a3811c47
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
'''
Nonlinear noise reduction
This is how the three of us got into this business. Since spectral
filters are problematic with chaotic, broad band signals, new
techniques were necessary. All the implementations here use phase
space projections for noise reduction. The programs nrlazy and
lazy use locally constant approximations of the dynamics. Rainers
nrlazy corrects the whole embedding vector, while Thomas' lazy
corrects only the center point. We haven't quite resolved yet which
is preferable. The two routines ghkss and project implement locally
linear projections (very similar). Finally, for testing purposes
you may want to add noise to data and compare the outcome of your
cleaning attempts with the true signal.
The introduction paper has a section on nonlinear noise reduction, too.
Ref:
http://www.mpipks-dresden.mpg.de/~tisean/TISEAN_2.1/docs/chaospaper/node22.html
'''
def makenoise():
'''
Depending on whether the -0 flag is set it either takes (possibly
multivariate) data and adds a desired amount of noise to it or it
just creates a series of random numbers with zero mean.
Usage
Option Description Default
---------------------------------------------------------------
-l# number of points to use whole file
-x# number of lines to be ignored 0
-m# number of columns to be used 1
-c# column(s) to be read 1
-%# noiselevel in percent 5
-r# absolute noiselevel (or absolute not set
variance if -g is set)
-g add gaussian noise instead uniform
of uniform
-I# change the seed of the random some fixed value
number generator
If # = 0, the seed is taken from
the time command, which means the
seed is set to the number of seconds
since Jan 1st, 1970
-0 Don't read any data file, just not set
generate random numbers with zero
mean. (Requires -r and -l)
---------------------------------------------------------------
Output
The output consists of the noisy time series.
'''
raise NotImplementedError
def addnoise():
'''
Add Gaussian / uniform white noise
-r absolute noise level
-v same as fraction of standard deviation
-u add uniform noise (default Gaussian)
-0 do not read input, just issue random numbers
-l number of values to be read (all)
-x number of values to be skipped (0)
-c column to be read (1 or file,#)
-o output file name, just -o means file_noisy
-V verbosity level (0 = only fatal errors)
-h show this message
Adds Gaussian noise of rms amplitude given by -r to data in file(s).
The amplitude can also be given (-v) as a fraction of the rms amplitude
of the data. With -u, uniform noise in [0:#] is added, # given by -r or
-v. Either -r or -v must be present. Output file file_noisy.
If -0 is given, no input files are read. Instead, -l random numbers of
magnitude -r are produced.
'''
raise NotImplementedError
def compare():
'''
Compare two datasets
-l number of values to be read (all)
-x number of values to be skipped (0)
-c columns to be read, comma separated (1,2)
-V verbosity level (0 = only fatal errors)
-h show this message
Prints the rms distance between two columns of file.
'''
raise NotImplementedError
def nr_lazy():
'''
Simple nonlinear noise reduction
This program performs simple nonlinear noise reduction. Each
embedded point is replaced by the average vector calculated in its
neighbourhood with a given size. This is different from what is
described in Schreiber. There and in the program lazy only the
central component of each vector is corrected. It is advisable to
give both a try. We found a tendency that lazy performs better on
map like data while nrlazy is superiour on flow like data.
Option Description Default
-----------------------------------------------------------------------
-l# number of points to use whole file
-x# number of lines to be ignored 0
-c# column to be read 1
-m# embedding dimension 5
-d# delay for the embedding 1
-i# number of iterations 1
-r# neighborhood size (interval of the data) / 1000
-v# neighborhood size in units not set
of the std. dev. of the data
overwrites the -r option
-----------------------------------------------------------------------
Output
Each of the files produced consists of one column being the
filtered time series. If the verbosity level is set accordingly,
the second column contains the number of neighbors found for this
point. If this number is 1, no correction is done at all for this
point.
'''
raise NotImplementedError
def lazy():
'''
Simple nonlinear noise reduction
-m embedding dimension
-r absolut radius of neighbourhoods
-v same as fraction of standard deviation
-i number of iterations (1)
-l number of values to be read (all)
-x number of values to be skipped (0)
-c column to be read (1 or file,#)
-o output file name, just -o means file_lc, file_lcc (etc.)
-V verbosity level (0 = only fatal errors)
-h show this message
Performs nonlinear noise reduction with locally constant
approximations. Either -r or -v must be present. Output file (the
cleaned sequence) is file_lc[ccc] (one letter c per iteration).
This routine is based on
T. Schreiber,
Extremely simple nonlinear noise reduction method,
Phys. Rev. E 47, 2401 (1993).
Note: With already fairly clean data, you can expect superior
results using project or ghkss.
See also nrlazy which corrects more than just the central
component. You may want to try both.
'''
raise NotImplementedError
def ghkss():
'''
Nonlinear noise reduction
This program performs a noise reduction [0] as proposed in
Grassberger et al. [1]. In principal, it performs a orthogonal
projection onto a q-dimensional manifold using a special (tricky)
metric. In case the -2 parameter is set, an euclidean metric is
used. This is done in Cawley et al. [2] as well as in Sauer [3]
and is sometimes useful for flow systems.
[0] http://www.mpipks-dresden.mpg.de/~tisean/TISEAN_2.1/docs/chaospaper/node24.html
[1] http://www.mpipks-dresden.mpg.de/~tisean/TISEAN_2.1/docs/chaospaper/citation.html#on
[2] http://www.mpipks-dresden.mpg.de/~tisean/TISEAN_2.1/docs/chaospaper/citation.html#cawley
[3] http://www.mpipks-dresden.mpg.de/~tisean/TISEAN_2.1/docs/chaospaper/citation.html#sauer
Usage
Option Description Default
----------------------------------------------------------------------
-l# number of points to use whole file
-x# number of lines to be ignored 0
-c# column to be read 1
-m# embedding dimension 5
-d# delay for the embedding 1
-q# dimension of the manifold 3
to project to
-k# minimal number of neighbours 30
-r# minimal size of the (interval of data) / 1000
neighbourhood
-i# number of iterations 1
-2 use euclidean metric instead of tricky metric
the tricky one
----------------------------------------------------------------------
Output
Each file produced contains the filtered time series as one
column. The standard error device shows some statistics, namely
for each iteration (i) the number of vectors corrected up to the
actual value of the neighborhood size, (ii) the average shift and
(iii) the average correction. (iv) The next line shows for how
many points the correction was unreasonably large and the last
line shows (v) the file, to which the corrected data was written.
'''
raise NotImplementedError
def project():
'''
Nonlinear noise reduction
-m embedding dimension
-q dimension of manifold
-r radius of neighbourhoods
-k minimal number of neighbours
-i number of iterations (1)
-l number of values to be read (all)
-x number of values to be skipped (0)
-c column to be read (1 or file,#)
-o output file name, just -o means file_c, file_cc (etc.)
-V verbosity level (0 = only fatal errors)
-h show this message
Performs nonlinear projective noise reduction. Output file (the
cleaned sequence) is file_c[ccc] (one letter c per iteration). As
a second column, the difference between original and cleaned
sequence is printed.
Note: This routine is largely redundant with ghkss.
This routine is based on
P. Grassberger, R. Hegger, H. Kantz, C. Schaffrath, and
T. Schreiber, On noise reduction methods for chaotic data, Chaos
3, 127 (1993); Reprinted in: E. Ott, T. Sauer, and J. A. Yorke,
eds., Coping With Chaos, Wiley, New York (1994)
'''
raise NotImplementedError
|
sdia/tisane
|
nonlinear_noise_reduction.py
|
Python
|
gpl-3.0
| 9,629
|
[
"Gaussian"
] |
caac8c0d62815421ec444022b2d7840f2c9825eeed8b6478be51cbc3c468f471
|
from distutils.core import setup
import os
def version():
setupDir = os.path.dirname(os.path.realpath(__file__))
versionFile = open(os.path.join(setupDir, 'checkm', 'VERSION'))
return versionFile.read().strip()
setup(
name='checkm-genome',
version=version(),
author='Donovan Parks, Michael Imelfort, Connor Skennerton',
author_email='donovan.parks@gmail.com',
packages=['checkm', 'checkm.plot', 'checkm.test', 'checkm.util'],
scripts=['bin/checkm'],
package_data={'checkm': ['VERSION', 'DATA_CONFIG']},
url='http://pypi.python.org/pypi/checkm/',
license='GPL3',
description='Assess the quality of putative genome bins.',
long_description=open('README.txt').read(),
install_requires=[
"numpy >= 1.8.0",
"scipy >= 0.9.0",
"matplotlib >= 1.3.1",
"pysam >= 0.7.4, <= 0.7.7",
"dendropy >= 4.0.0",
"ScreamingBackpack >= 0.2.333"],
)
|
fw1121/CheckM
|
setup.py
|
Python
|
gpl-3.0
| 939
|
[
"pysam"
] |
08f386df14647908a457b501570a34fc7d96376f39377d57d419cb94f96eba0a
|
#!/usr/bin/python
# -*- coding: utf-8
'''
This file is part of VetApp.
VetApp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VetApp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VetApp. If not, see <http://www.gnu.org/licenses/>.
'''
from sqlalchemy import Column, Integer, String, Sequence, ForeignKey, DateTime, Table, Float, Boolean, Text
from sqlalchemy.orm import relationship
from models.translationtables import g_medicines_list
from models import Base
from models.vet import Vet
import datetime
'''
id
starttime
endtime
vet_id
owner_id
amanuensis
status
diagnosis
treatment
'''
class VisitAnimal(Base):
__tablename__='visitanimals'
id = Column(Integer, Sequence('visitanimals_id_seq'), primary_key=True)
animal_id = Column(Integer, ForeignKey('animals.id'), nullable=False)
animal = relationship("Animal")
anamnesis = Column(Text)
status = Column(Text)
diagnosis = Column(Text)
treatment = Column(Text)
operations = relationship("Operation", backref='visitanimals', cascade="all, delete-orphan")
items = relationship("VisitItem", backref='visitanimals', cascade="all, delete-orphan")
def __init__(self,animal, anamnesis='', status='', diagnosis='', treatment=''):
self.animal = animal
self.animal_id = animal.id
self.anamnesis = anamnesis
self.status = status
self.diagnosis = diagnosis
self.treatment = treatment
self.operations = []
self.items = []
def stringList(self):
return [self.animal.name, self.animal.official_name,
self.animal.specie.name if self.animal.specie != None else '',
self.animal.race.name if self.animal.race != None else '']
#get visit animal all items
def getAllItems(self):
l =[]
for i in self.items: #list has VisitItems
l.append(i.item)
for oper in self.operations:
l.extend(oper.getItems())
return l
def getAllMedicines(self):
l =[]
from models.item import Medicine
for i in self.items: #list has VisitItems
if(isinstance(i.item, Medicine)):
l.append({"item":i.item, "count":i.count})
for oper in self.operations:
tmp_list = oper.getItems()
for item in tmp_list:
if(isinstance(item, Medicine)):
l.append({"item":item, "count":oper.count})
return l
def getOperations(self):
return self.operations
#return all medicine texts in format {name1 : text1, name2:text2,...}
def getMedicineDict(self):
tmp = {}
for item in self.getAllItems():
if item.getType() in g_medicines_list:
for desc in item.customer_descriptions:
if desc.specie is self.animal.specie:
tmp[item.name] = desc.text
return tmp
def getType(self=None):
return 'VisitAnimal'
def update(self,data):
for key, item in data.items():
try:
setattr(self,key,item)
except:
print("DEBUG ERROR VisitAnimal->update(): wrong variable name: " + str(key))
visit_animals_table = Table('visit_animals_table', Base.metadata,
Column('visitanimal_id', Integer, ForeignKey('visitanimals.id')),
Column('visit_id', Integer, ForeignKey('visits.id')))
class Visit(Base):
__tablename__ = 'visits'
#maaritellaan taulukon olioiden asetukset
id = Column(Integer, Sequence('visits_id_seq'), primary_key=True)
start_time = Column(DateTime)
end_time = Column(DateTime)
vet_id = Column(Integer, ForeignKey('vets.id'), nullable=False)
vet = relationship("Vet")
owner_id = Column(Integer, ForeignKey('owners.id'), nullable=False)
owner = relationship("Owner")
visit_reason = Column(Text, default="")
visitanimals = relationship("VisitAnimal", secondary = visit_animals_table)
archive = Column(Boolean, default=False)
def __init__(self, start_time, owner, vet, reason, end_time=None, visitanimals = []):
self.start_time = start_time
self.owner = owner
self.vet = vet
self.end_time = end_time
self.visitanimals = visitanimals
self.visit_reason = reason
#returns list of dicts {"item"<Medicine>, "count":<float>}
def getAnimalMedicines(self, animal):
for anim in self.visitanimals:
if(anim.animal.id == animal.id):
return anim.getAllMedicines()
print("ERROR:Visit.getAnimalMedicines: did not found animal from visit")
return []
def getAnimalOperations(self, animal):
for anim in self.visitanimals:
if(anim.animal.id == animal.id):
return anim.getOperations()
print("ERROR:Visit.getAnimalOperations: did not found animal from visit")
return []
def getPriceDict(self):
price_dict = {}
price_dict["operation_price"] = 0.0
price_dict["accesories_price"] = 0.0
price_dict["lab_price"] = 0.0
price_dict["medicine_price"] = 0.0
price_dict["diet_price"] = 0.0
for visit_animal in self.visitanimals:
for operation in visit_animal.operations:
tmp = operation.getPriceDict()
for key in tmp:
price_dict[key] += tmp[key]
for visititem in visit_animal.items:
tmp = visititem.getPriceDict()
for key in tmp:
price_dict[key] += tmp[key]
return price_dict
def setCurrentTime(self):
self.endtime = datetime.datetime.now()
def getType(self):
return 'Visit'
def update(self, data):
for key, item in data.items():
try:
setattr(self,key,item)
except:
print("DEBUG ERROR Visit->update(): wrong variable name: " + str(key))
def stringList(self):
return [str(self.id), self.visit_reason, self.start_time.strftime("%H:%M %d.%m.%Y"), str(self.owner.name)]
|
mape90/VetApp
|
models/visit.py
|
Python
|
gpl-3.0
| 6,757
|
[
"VisIt"
] |
2ee4876f2a44bbb0a2598e1148b05f39a87c7311db8268cf3c0003d6046700c7
|
#!/usr/bin/python
# _*_ coding:utf-8 _*_
import flickrapi
import json
import time
#Flickrapi documentation: https://stuvel.eu/flickrapi-doc/2-calling.html
#FIRST: get your own API-keys!
api_key = u"YOUR_API_KEY_HERE" #Request your own key and place the key inside the quotes.
api_secret = u"YOUR_API_SECRET_HERE" #Request your own key and place the secret inside the quotes.
flickr_founded = "1076284800" #Unixtime
timeframe = 43200 #Unixtime for 12 hours == 60seconds *60minutes *24hours; This will query the API for a certain time. Increase this number if there aren't any decent results...
one_week = 604800 #Unixtime for one week == 60seconds *60minutes *24hours* 7days (Scope is too large to be used - only a fraction of the data that is available gets returned)
#Need a Unix convertor?: http://www.unixtimestamp.com/
#Alternatively you can use the built in time module of Python.
raw_file = open("raw_data.csv", "a") #where your datapoints will be stored at
history = open("done_ids.txt", "r") #all photo_ID's that have been added in the past.
donepre = history.readlines() #Preventing adding the same photo twice.
history.close()
done = []
for item in donepre:
item = item.strip()
done.append(item)
donepids = open("done_ids.txt", "a")
print "Ready loading history."
flickr = flickrapi.FlickrAPI(api_key, api_secret, format='json')
flickr.authenticate_via_browser(perms='read') #Requires read authentification: https://www.flickr.com/services/api/flickr.photos.getWithGeoData.html (Needs to be done once per Computer running this)
add_data = True #needed for the while loop
#################THESE ARE YOUR UPPER AND LOWER LIMITS - HARDCODED IN THE SCRIPT!###############
firstdate = 1462822400 #Bottom time limit, we shall call for all photo's that are uploaded after this timepoint.
finaldate = firstdate + timeframe #Upper time limit for our small call, the while loop will keep using this untill it reaches the enddate.)
curtime = time.time() #gets the current time in unixcode.
curtime = int(curtime)
#curtime = 1376284800 #overwrites curtime with a value set in the past. You can comment this line out of you wish to go from point X to now. Carefull however, as calling flickr too long on one end may cause connection termination.
#Moet nog lopen!! 29/3/2017
################City variables: Latitude, Longitude, radius(in KM) HARDCODED, replace according to the example and leave within quotes!##############
latitude = "51.215539"
longitude = "2.928629"
rad = "5"
while add_data:
page = 1
startdate = str(firstdate)
enddate = str(finaldate)
shots = flickr.photos.search(page=str(page), has_geo="1", extras="geo, owner_name", privacy_filter="1", per_page="250", min_upload_date=startdate, max_upload_date=enddate, radius_units="km", radius=rad, lat=latitude, lon=longitude) #There's a max limin on per_page of 250!!
parsed = json.loads(shots.decode('utf-8')) #returns a dictionary
for key in parsed:
part = parsed["photos"]
total_pages = part["pages"]
print "There are %s pages returned by flickr" %(total_pages)
#print finaldate
while page <= total_pages:
shots = flickr.photos.search(page=str(page), has_geo="1", extras="geo, owner_name", privacy_filter="1", per_page="250", min_upload_date=startdate, max_upload_date=enddate, radius_units="km", radius=rad, lat=latitude, lon=longitude)
parsed = json.loads(shots.decode('utf-8'))
for key in parsed:
x = type(parsed[key])
if str(x) == "<type 'dict'>":
newdict = parsed[key]
for key in newdict:
y = type(newdict[key])
if str(y) == "<type 'list'>":
for item in newdict[key]:
for key in item:
photo_id = str(item["id"].encode("utf-8"))
if photo_id not in done:
done.append(photo_id)
longt = str(item["longitude"])
lat = str(item["latitude"])
user_internal_id = str(item["owner"].encode("utf-8"))
user_name = str(item["ownername"].encode("utf-8"))
visit = "https://www.flickr.com/photos/" + user_internal_id + "/" + photo_id
#print lat
#print longt
raw_file.write('"'+ photo_id + '";"' + user_internal_id + '";"' + user_name + '";"' + lat + '";"' + longt + '";"' + visit + '"\n')
donepids.write(photo_id + "\n")
else:
pass
#print "double"
print str(page) + " of " + str(total_pages) + " is done."
page = page+1
#print "page UP"
#print "taking new data"
firstdate = firstdate + timeframe
finaldate = finaldate + timeframe
#print firstdate
print finaldate
if curtime < firstdate:
add_data = False
#
raw_file.close() #Closing the CSV file
donepids.close() #Closing the progress tracker file
print "Process complete"
ext = raw_input("Press enter to terminate the program")
|
Frederic-P/flickr-API-Scraper
|
City Scraper.py
|
Python
|
mit
| 5,867
|
[
"VisIt"
] |
38db483b70a6a04779954272ff3ca7e31b24d9227728f845bc9dfc95a25e330f
|
# Copyright (c) 2016 Robert Bosch LLC, USA.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# This source code is based on Neon
# https://github.com/NervanaSystems/neon/
# Copyright 2015 Nervana Systems Inc., licensed under the Apache-2.0 license,
# cf. 3rd-party-licenses.txt file in the root directory of this source tree.
# ----------------------------------------------------------------------------
"""Timing of stacked auto-encoders."""
import sys
from neon.backends import gen_backend
from neon.data import ArrayIterator
from neon.initializers import Gaussian, GlorotUniform
from neon.layers import GeneralizedCost, Affine
from neon.models import Model
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Logistic, SumSquared, CrossEntropyMulti, Softmax
import numpy as np
import time
import pycuda.driver as drv
class Config(object):
image_width = 28
ydim = 10
batch_size = 64
rng_seed = 23455
backend = 'gpu' # cpu or gpu
num_warmup_iters = 200
num_timing_iters = 200
encoder_size = [400, 200, 100] # the size for each auto-encoder
def measure_time(data, network, config, network_name='unknown',
pre_training=True, create_target=False):
"""measure time for the auto-encoders and final mlp network.
data is an iterator containing samples x and their label t.
During pre-training, we may need to generate target t. This is controlled
by create_target and pre_training inputs."""
if config.backend == 'gpu':
start = drv.Event()
end = drv.Event()
num_iterations = config.num_warmup_iters + config.num_timing_iters
forward_time = np.zeros(config.num_timing_iters)
backward_time = np.zeros(config.num_timing_iters)
iter = 0
flag = True
while flag:
for (x, t) in data:
iter += 1
if iter > num_iterations:
flag = False
break
if pre_training:
if create_target:
# helper network is used to create target output
len_network = len(network.layers.layers)
t = x # target x
# last 4 layers are the actual encoder and decoder if the
# auto-encoder
if len_network > 4:
for i in range(len_network - 4):
# pass through the encoders only to get the target
t = network.layers.layers[i].fprop(t)
else:
sys.exit("something wrong with the configuration")
else:
t = x
if iter > config.num_warmup_iters: # time it
if config.backend == 'cpu':
s = time.time() * 1000
x = network.fprop(x)
network.cost.get_cost(x, t)
e = time.time() * 1000 # in milliseconds
forward_time[iter - config.num_warmup_iters - 1] = e - s
s = time.time() * 1000
delta = network.cost.get_errors(x, t)
network.bprop(delta)
e = time.time() * 1000
backward_time[iter - config.num_warmup_iters - 1] = e - s
else:
start.synchronize()
start.record()
x = network.fprop(x)
network.cost.get_cost(x, t)
end.record()
end.synchronize()
forward_time[iter - config.num_warmup_iters - 1] \
= end.time_since(start)
start.synchronize()
start.record()
delta = network.cost.get_errors(x, t)
network.bprop(delta)
end.record()
end.synchronize()
backward_time[iter - config.num_warmup_iters - 1] \
= end.time_since(start)
else: # warm-up iterations
x = network.fprop(x)
delta = network.cost.get_errors(x, t)
network.bprop(delta)
print("time forward %s: %.4f +- %.4f ms, batch size: %d" %
(network_name, np.mean(forward_time), np.std(forward_time),
config.batch_size))
print("time gradient %s: %.4f +- %.4f ms, batch size: %d" %
(network_name, np.mean(forward_time + backward_time),
np.std(forward_time + backward_time), config.batch_size))
config = Config()
image_size = config.image_width**2
# setup backendoptimizer_default
be = gen_backend(backend=config.backend,
batch_size=config.batch_size,
rng_seed=config.rng_seed,
datatype=np.float32,
stochastic_round=False)
# setup optimizer (no need to do this for timing)
# optimizer_default = GradientDescentMomentum(0.1, momentum_coef=1.0,
# stochastic_round=False)
# optimizer_helper = GradientDescentMomentum(0.0, momentum_coef=1.0,
# stochastic_round=False)
# generate data
X = np.random.rand(config.batch_size, config.image_width**2)
y = np.random.randint(config.ydim, size=config.batch_size)
# setup a training set iterator
data = ArrayIterator(X, y, nclass=config.ydim, lshape=(1, config.image_width,
config.image_width))
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
init_uni = GlorotUniform()
# setting model layers for AE1
encoder1 = Affine(nout=config.encoder_size[0], init=init_norm,
activation=Logistic(), name='encoder1')
decoder1 = Affine(nout=image_size, init=init_norm, activation=Logistic(),
name='decoder1')
encoder2 = Affine(nout=config.encoder_size[1], init=init_norm,
activation=Logistic(), name='encoder2')
decoder2 = Affine(nout=config.encoder_size[0], init=init_norm,
activation=Logistic(), name='decoder2')
encoder3 = Affine(nout=config.encoder_size[2], init=init_norm,
activation=Logistic(), name='encoder3')
decoder3 = Affine(nout=config.encoder_size[1], init=init_norm,
activation=Logistic(), name='decoder3')
classifier = Affine(nout=config.ydim, init=init_norm, activation=Softmax())
cost_reconst = GeneralizedCost(costfunc=SumSquared())
cost_classification = GeneralizedCost(costfunc=CrossEntropyMulti())
# Setting model layers for AE1
AE1 = Model([encoder1, decoder1])
AE1.cost = cost_reconst
AE1.initialize(data, cost_reconst)
# AE1.optimizer = optimizer_default
measure_time(data, AE1, config, 'AE1')
# Setting model layers for AE2
# It has an extra encoder layer compared to what AE should really be. This is
# done to avoid saving the outputs for each AE.
AE2_mimic = Model([encoder1, encoder2, decoder2])
AE2_mimic.cost = cost_reconst
AE2_mimic.initialize(data, cost_reconst)
# Learning rates for extra layers that should not be updated are set to zero.
# opt = MultiOptimizer({'default': optimizer_default,
# 'encoder1': optimizer_helper})
# AE2_mimic.optimizer = opt
measure_time(data, AE2_mimic, config, 'AE2', create_target=True)
# Setting model layers for AE3
AE3_mimic = Model([encoder1, encoder2, encoder3, decoder3])
AE3_mimic.cost = cost_reconst
AE3_mimic.initialize(data, cost_reconst)
# opt = MultiOptimizer({'default': optimizer_default,
# 'encoder1': optimizer_helper,
# 'encoder2': optimizer_helper})
# AE3_mimic.optimizer = opt
measure_time(data, AE3_mimic, config, 'AE3', create_target=True)
# Setting model layers for fine-tuning step
mlp = Model([encoder1, encoder2, encoder3, classifier])
mlp.cost = cost_classification
mlp.initialize(data, cost_classification)
# mlp.optimizer = optimizer_default
measure_time(data, mlp, config, 'mlp', pre_training=False)
|
DL-Benchmarks/DL-Benchmarks
|
neon/stackedAE/sda.py
|
Python
|
mit
| 8,171
|
[
"Gaussian"
] |
3f21ef9753fd60d0bd2356293fc7653a75b6275cdbbd00370cee147637563499
|
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# https://www.lammps.org/ Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# ----------------------------------------------------------------------
# Contributing author: Nicholas Lubbers (LANL)
# -------------------------------------------------------------------------
import numpy as np
import torch
def calc_n_params(model):
return sum(p.nelement() for p in model.parameters())
class TorchWrapper(torch.nn.Module):
def __init__(self, model,n_descriptors,n_elements,n_params=None,device=None,dtype=torch.float64):
super().__init__()
self.model = model
self.device = device
self.dtype = dtype
# Put model on device and convert to dtype
self.to(self.dtype)
self.to(self.device)
if n_params is None:
n_params = calc_n_params(model)
self.n_params = n_params
self.n_descriptors = n_descriptors
self.n_elements = n_elements
def forward(self, elems, bispectrum, beta, energy):
bispectrum = torch.from_numpy(bispectrum).to(dtype=self.dtype, device=self.device).requires_grad_(True)
elems = torch.from_numpy(elems).to(dtype=torch.long, device=self.device) - 1
with torch.autograd.enable_grad():
energy_nn = self.model(bispectrum, elems)
if energy_nn.ndim > 1:
energy_nn = energy_nn.flatten()
beta_nn = torch.autograd.grad(energy_nn.sum(), bispectrum)[0]
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
energy[:] = energy_nn.detach().cpu().numpy().astype(np.float64)
class IgnoreElems(torch.nn.Module):
def __init__(self,subnet):
super().__init__()
self.subnet = subnet
def forward(self,bispectrum,elems):
return self.subnet(bispectrum)
|
akohlmey/lammps
|
python/lammps/mliap/pytorch.py
|
Python
|
gpl-2.0
| 2,354
|
[
"LAMMPS"
] |
aa5c4ee8edbd754f03968f1c50e166baf0d5cc13ba209031550476d5061d453f
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2015 Martin Sandve Alnæs
#
# This file is part of UFLACS.
#
# UFLACS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UFLACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with UFLACS. If not, see <http://www.gnu.org/licenses/>.
"""Assigning symbols to computational graph nodes."""
from ufl import product
from uflacs.datastructures.arrays import int_array, object_array
from uflacs.datastructures.crs import CRS, rows_to_crs
from uflacs.analysis.valuenumbering import ValueNumberer
from uflacs.analysis.expr_shapes import total_shape
def build_node_shapes(V):
"""Build total shapes for each node in list representation of expression graph.
V is an array of ufl expressions, possibly nonscalar and with free indices.
Returning a CRS where row i is the total shape of V[i].
"""
# Dimensions of returned CRS
nv = len(V)
k = 0
# Store shapes intermediately in an array of tuples
V_shapes = object_array(nv)
for i, v in enumerate(V):
# Compute total shape of V[i]
tsh = total_shape(v)
V_shapes[i] = tsh
# Count number of elements for CRS representation
k += len(tsh)
# Return a more memory efficient CRS representation
return rows_to_crs(V_shapes, nv, k, int)
def build_node_sizes(V_shapes):
"Compute all the products of a sequence of shapes."
nv = len(V_shapes)
V_sizes = int_array(nv)
for i, sh in enumerate(V_shapes):
V_sizes[i] = product(sh)
return V_sizes
def build_node_symbols(V, e2i, V_shapes, V_sizes):
"""Tabulate scalar value numbering of all nodes in a a list based representation of an expression graph.
Returns:
V_symbols - CRS of symbols (value numbers) of each component of each node in V.
total_unique_symbols - The number of symbol values assigned to unique scalar components of the nodes in V.
"""
# "Sparse" int matrix for storing variable number of entries (symbols) per row (vertex),
# with a capasity bounded by the number of scalar subexpressions including repetitions
V_symbols = CRS(len(V), sum(V_sizes), int)
# Visit each node with value numberer algorithm, storing the result for each as a row in the V_symbols CRS
value_numberer = ValueNumberer(e2i, V_sizes, V_symbols)
for i, v in enumerate(V):
V_symbols.push_row(value_numberer(v, i))
# Get the actual number of symbols created
total_unique_symbols = value_numberer.symbol_count
# assert all(x < total_unique_symbols for x in V_symbols.data)
# assert (total_unique_symbols-1) in V_symbols.data
return V_symbols, total_unique_symbols
def build_graph_symbols(V, e2i, DEBUG):
"""Tabulate scalar value numbering of all nodes in a a list based representation of an expression graph.
Returns:
V_shapes - CRS of the total shapes of nodes in V.
V_symbols - CRS of symbols (value numbers) of each component of each node in V.
total_unique_symbols - The number of symbol values assigned to unique scalar components of the nodes in V.
"""
# Compute the total shape (value shape x index dimensions) for each node
V_shapes = build_node_shapes(V)
# Compute the total value size for each node
V_sizes = build_node_sizes(V_shapes)
# Mark values with symbols
V_symbols, total_unique_symbols = build_node_symbols(V, e2i, V_shapes, V_sizes)
return V_shapes, V_symbols, total_unique_symbols
|
FEniCS/uflacs
|
uflacs/analysis/graph_symbols.py
|
Python
|
gpl-3.0
| 3,926
|
[
"VisIt"
] |
55eb7b3afa9d714bfbb69feffe533678a8f300cde0848025d89f49f1c4af035a
|
import os
import unittest
import pysal
import numpy as np
class TestDistanceWeights(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
self.polyShp = pysal.examples.get_path('columbus.shp')
self.arcShp = pysal.examples.get_path('stl_hom.shp')
self.points = [(
10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
def test_knnW(self):
x = np.indices((5, 5))
x, y = np.indices((5, 5))
x.shape = (25, 1)
y.shape = (25, 1)
data = np.hstack([x, y])
wnn2 = pysal.knnW(data, k=2)
wnn4 = pysal.knnW(data, k=4)
wnn4.neighbors[0]
self.assertEqual(set(wnn4.neighbors[0]), set([1, 5, 6, 2]))
self.assertEqual(set(wnn2.neighbors[5]), set([0, 6]))
self.assertEqual(wnn2.pct_nonzero, 8.0)
wnn3e = pysal.knnW(data, p=2, k=3)
self.assertEqual(set(wnn3e.neighbors[0]), set([1, 5, 6]))
wc = pysal.knnW_from_shapefile(self.polyShp)
self.assertEqual(wc.pct_nonzero, 4.081632653061225)
self.assertEqual(set(wc.neighbors[0]), set([2, 1]))
wc3 = pysal.knnW_from_shapefile(self.polyShp, k=3)
self.assertEqual(wc3.weights[1], [1, 1, 1])
self.assertEqual(set(wc3.neighbors[1]), set([0,3,7]))
def test_knnW_arc(self):
pts = [x.centroid for x in pysal.open(self.arcShp)]
dist = pysal.cg.sphere.arcdist # default radius is Earth KM
full = np.matrix([[dist(pts[i], pts[j]) for j in xrange(
len(pts))] for i in xrange(len(pts))])
kd = pysal.cg.kdtree.KDTree(pts, distance_metric='Arc',
radius=pysal.cg.sphere.RADIUS_EARTH_KM)
w = pysal.knnW(kd, 4)
self.assertEqual(set(w.neighbors[4]), set([1,3,9,12]))
self.assertEqual(set(w.neighbors[40]), set([31,38,45,49]))
#self.assertTrue((full.argsort()[:, 1:5] == np.array(
# [w.neighbors[x] for x in range(len(pts))])).all())
def test_Kernel(self):
kw = pysal.Kernel(self.points)
self.assertEqual(kw.weights[0], [1.0, 0.50000004999999503,
0.44098306152674649])
kw15 = pysal.Kernel(self.points, bandwidth=15.0)
self.assertEqual(kw15[0], {0: 1.0, 1: 0.33333333333333337,
3: 0.2546440075000701})
self.assertEqual(kw15.bandwidth[0], 15.)
self.assertEqual(kw15.bandwidth[-1], 15.)
bw = [25.0, 15.0, 25.0, 16.0, 14.5, 25.0]
kwa = pysal.Kernel(self.points, bandwidth=bw)
self.assertEqual(kwa.weights[0], [1.0, 0.59999999999999998,
0.55278640450004202,
0.10557280900008403])
self.assertEqual(kwa.neighbors[0], [0, 1, 3, 4])
self.assertEqual(kwa.bandwidth[0], 25.)
self.assertEqual(kwa.bandwidth[1], 15.)
self.assertEqual(kwa.bandwidth[2], 25.)
self.assertEqual(kwa.bandwidth[3], 16.)
self.assertEqual(kwa.bandwidth[4], 14.5)
self.assertEqual(kwa.bandwidth[5], 25.)
kwea = pysal.Kernel(self.points, fixed=False)
self.assertEqual(kwea.weights[0], [1.0, 0.10557289844279438,
9.9999990066379496e-08])
l = kwea.bandwidth.tolist()
self.assertEqual(l, [[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]])
kweag = pysal.Kernel(self.points, fixed=False, function='gaussian')
self.assertEqual(kweag.weights[0], [0.3989422804014327,
0.26741902915776961,
0.24197074871621341])
l = kweag.bandwidth.tolist()
self.assertEqual(l, [[11.180341005532938], [11.180341005532938],
[20.000002000000002], [11.180341005532938],
[14.142137037944515], [18.027758180095585]])
kw = pysal.kernelW_from_shapefile(self.polyShp, idVariable='POLYID')
self.assertEqual(set(kw.weights[1]), set([0.0070787731484506233,
0.2052478782400463,
0.23051223027663237,
1.0
]))
kwa = pysal.adaptive_kernelW_from_shapefile(self.polyShp)
self.assertEqual(kwa.weights[0], [1.0, 0.03178906767736345,
9.9999990066379496e-08])
def test_threshold(self):
md = pysal.min_threshold_dist_from_shapefile(self.polyShp)
self.assertEqual(md, 0.61886415807685413)
wid = pysal.threshold_continuousW_from_array(self.points, 11.2)
self.assertEqual(wid.weights[0], [0.10000000000000001,
0.089442719099991588])
wid2 = pysal.threshold_continuousW_from_array(
self.points, 11.2, alpha=-2.0)
self.assertEqual(wid2.weights[0], [0.01, 0.0079999999999999984])
w = pysal.threshold_continuousW_from_shapefile(
self.polyShp, 0.62, idVariable="POLYID")
self.assertEqual(w.weights[1], [1.6702346893743334,
1.7250729841938093])
def test_DistanceBand(self):
""" see issue #126 """
w = pysal.rook_from_shapefile(
pysal.examples.get_path("lattice10x10.shp"))
polygons = pysal.open(
pysal.examples.get_path("lattice10x10.shp"), "r").read()
points1 = [poly.centroid for poly in polygons]
w1 = pysal.DistanceBand(points1, 1)
for k in range(w.n):
self.assertEqual(w[k], w1[k])
def test_DistanceBand_ints(self):
""" see issue #126 """
w = pysal.rook_from_shapefile(
pysal.examples.get_path("lattice10x10.shp"))
polygons = pysal.open(
pysal.examples.get_path("lattice10x10.shp"), "r").read()
points2 = [tuple(map(int, poly.vertices[0])) for poly in polygons]
w2 = pysal.DistanceBand(points2, 1)
for k in range(w.n):
self.assertEqual(w[k], w2[k])
def test_DistanceBand_arc(self):
pts = [x.centroid for x in pysal.open(self.arcShp)]
dist = pysal.cg.sphere.arcdist # default radius is Earth KM
full = np.matrix([[dist(pts[i], pts[j]) for j in xrange(
len(pts))] for i in xrange(len(pts))])
kd = pysal.cg.kdtree.KDTree(pts, distance_metric='Arc',
radius=pysal.cg.sphere.RADIUS_EARTH_KM)
w = pysal.DistanceBand(kd, full.max(), binary=False, alpha=1.0)
self.assertTrue((w.sparse.todense() == full).all())
suite = unittest.TestLoader().loadTestsFromTestCase(TestDistanceWeights)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
spreg-git/pysal
|
pysal/weights/tests/test_Distance.py
|
Python
|
bsd-3-clause
| 7,005
|
[
"COLUMBUS",
"Gaussian"
] |
2bf07b3422049e024fa2d2d1dd64882f9de4a81bb29520277bd1d7eb2ceb7683
|
'''
PathwayGenie (c) GeneGenie Bioinformatics Ltd. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# pylint: disable=wrong-import-order
import math
import random
import re
from Bio.Seq import Seq
from synbiochem.utils import seq_utils
from parts_genie.nucl_acid_utils import NuclAcidCalcRunner
_START_CODON_PATT = r'(?=([ACGT]TG))'
_RT_EFF = 2.222
_K = 2500.0
class RbsCalculator():
'''Class for calculating RBS.'''
def __init__(self, r_rna, calc, temp=37.0):
self.__r_rna = r_rna.upper()
self.__runner = NuclAcidCalcRunner(calc, temp)
self.__optimal_spacing = 5
self.__cutoff = 35
def calc_dgs(self, m_rna, limit=float('inf')):
''''Calculates each dg term in the free energy model and sums them to
create dg_total.'''
m_rna = m_rna.upper()
start_positions = []
dgs_tirs = []
count = 0
for match in re.finditer(_START_CODON_PATT, m_rna):
start_pos = match.start()
try:
d_g = self.__calc_dg(m_rna, start_pos)
if not math.isinf(d_g):
start_positions.append(start_pos)
dgs_tirs.append((d_g, get_tir(d_g)))
count += 1
except ValueError:
# Occurs when start codon appears at start of sequence, and is
# therefore leaderless. Take no action, as safe to ignore.
continue
if count == limit:
break
return dict(zip(start_positions, dgs_tirs))
def calc_kinetic_score(self, m_rna, start_pos, dangles='none'):
'''Gets kinetic score.'''
sub_m_rna = \
m_rna[max(0, start_pos - self.__cutoff):min(len(m_rna),
start_pos +
self.__cutoff)]
_, bp_xs, bp_ys = self.__runner.mfe([sub_m_rna], dangles=dangles)
largest_range_helix = 0
for (nt_x, nt_y) in zip(bp_xs[0], bp_ys[0]):
if nt_x <= len(sub_m_rna) and nt_y <= len(sub_m_rna):
val = nt_y - nt_x
largest_range_helix = max(val, largest_range_helix)
return float(largest_range_helix) / float(len(sub_m_rna))
def get_initial_rbs(self, rbs_len, cds, tir_target_rel):
'''Generates random initial condition for designing a synthetic rbs
sequence.'''
dg_target_rel = get_dg(tir_target_rel)
cds = cds.upper()
dg_range_high = 25.0
dg_range_low = -18.0
dg_target_rel = (dg_target_rel - dg_range_high) / \
(dg_range_low - dg_range_high)
# 0.0: Low expression
# 1.0: High expression
if dg_target_rel < 0.125:
prob_shine_delgano = 0.50
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.250:
prob_shine_delgano = 0.50
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.5:
prob_shine_delgano = 0.75
core_length = 4
max_nonoptimal_spacing = 10
elif dg_target_rel < 0.7:
prob_shine_delgano = 0.75
core_length = 4
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.8:
prob_shine_delgano = 0.75
core_length = 6
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.9:
prob_shine_delgano = 0.90
core_length = 6
max_nonoptimal_spacing = 5
elif dg_target_rel < 0.95:
prob_shine_delgano = 0.90
core_length = 8
max_nonoptimal_spacing = 3
else:
prob_shine_delgano = 1.0
core_length = 9
max_nonoptimal_spacing = 2
shine_delgano = Seq(self.__r_rna).reverse_complement()
return self.__get_random_rbs(rbs_len, shine_delgano,
prob_shine_delgano, core_length,
max_nonoptimal_spacing)
def __calc_dg(self, m_rna, start_pos):
'''Calculates dG.'''
# Set dangles based on length between 5' end of m_rna and start codon:
max_rbs_len = 35
if start_pos > max_rbs_len:
dangles = 'none'
else:
dangles = 'all'
# Start codon energy:
start_codon_energies = {'ATG': -1.194, 'GTG': -0.0748, 'TTG': -0.0435,
'CTG': -0.03406}
dg_start = start_codon_energies[m_rna[start_pos:start_pos + 3]]
# Energy of m_rna folding:
[dg_m_rna, _, _] = \
self.__calc_dg_m_rna(m_rna, start_pos, dangles)
# Energy of m_rna:r_rna hybridization and folding:
[dg_m_rna_r_rna, m_rna_subseq, bp_x, bp_y, energy_before] = \
self.__calc_dg_m_rna_r_rna(m_rna, start_pos, dangles)
# Standby site correction:
dg_standby = self.__calc_dg_standby_site(m_rna_subseq, bp_x,
bp_y, energy_before,
dangles)
# Total energy is m_rna:r_rna + start - r_rna - m_rna - standby_site:
return dg_m_rna_r_rna + dg_start - dg_m_rna - dg_standby
def __calc_dg_m_rna(self, m_rna, start_pos, dangles='all'):
'''Calculates the dg_m_rna given the m_rna sequence.'''
m_rna_subseq = \
m_rna[max(0, start_pos - self.__cutoff):min(len(m_rna),
start_pos +
self.__cutoff)]
energies, bp_xs, bp_ys = self.__runner.mfe([m_rna_subseq],
dangles=dangles)
return energies[0], bp_xs[0], bp_ys[0]
def __calc_dg_m_rna_r_rna(self, m_rna, start_pos, dangles):
'''Calculates the dg_m_rna_r_rna from the m_rna and r_rna sequence.
Considers all feasible 16S r_rna binding sites and includes the effects
of non-optimal spacing.'''
energy_cutoff = 3.0
# Footprint of the 30S complex that prevents formation of secondary
# structures downstream of the start codon. Here, we assume that the
# entire post-start RNA sequence does not form secondary structures
# once the 30S complex has bound.
footprint = 1000
begin = max(0, start_pos - self.__cutoff)
m_rna_len = min(len(m_rna), start_pos + self.__cutoff)
start_pos_in_subsequence = min(start_pos, self.__cutoff)
startpos_to_end_len = m_rna_len - start_pos_in_subsequence - begin
# 1. identify a list of r_rna-binding sites. Binding sites are
# hybridizations between the m_rna and r_rna and can include
# mismatches, bulges, etc. Intra-molecular folding is also allowed
# within the m_rna.
# The subopt program is used to generate a list of optimal & suboptimal
# binding sites.
# Constraints: the entire r_rna-binding site must be upstream of the
# start codon
m_rna_subseq = m_rna[begin:start_pos]
if not m_rna_subseq:
raise ValueError('Warning: There is a leaderless start codon, ' +
'which is being ignored.')
energies, bp_xs, bp_ys = self.__runner.subopt([m_rna_subseq,
self.__r_rna],
energy_cutoff,
dangles=dangles)
if not bp_xs:
raise ValueError(
'Warning: The 16S r_rna has no predicted binding site. ' +
'Start codon is considered as leaderless and ignored.')
# 2. Calculate dg_spacing for each 16S r_rna binding site
# Calculate the aligned spacing for each binding site in the list
aligned_spacing = []
for (bp_x, bp_y) in zip(bp_xs,
bp_ys):
aligned_spacing.append(
self.__calc_aligned_spacing(m_rna_subseq,
start_pos_in_subsequence,
bp_x, bp_y))
dg_spacing_list = []
dg_m_rna_r_rna = []
dg_m_rna_r_rna_spacing = []
# Calculate dg_spacing using aligned spacing value. Add it to
# dg_m_rna_r_rna.
for counter in range(len(bp_xs)):
dg_m_rna_r_rna.append(energies[counter])
val = self.__calc_dg_spacing(aligned_spacing[counter])
dg_spacing_list.append(val)
dg_m_rna_r_rna_spacing.append(
val + energies[counter])
# 3. Find 16S r_rna binding site that minimizes
# dg_spacing+dg_m_rna_r_rna.
index = dg_m_rna_r_rna_spacing.index(min(dg_m_rna_r_rna_spacing))
dg_spacing_final = dg_spacing_list[index]
# Check: Is the dg spacing large compared to the energy gap? If so,
# this means the list of suboptimal 16S r_rna binding sites generated
# by subopt is too short.
# if dg_spacing_final > energy_cutoff:
# print 'Warning: The spacing penalty is greater than the ' + \
# 'energy gap. dg (spacing) = ', dg_spacing_final
# 4. Identify the 5' and 3' ends of the identified 16S r_rna binding
# site. Create a base pair list.
most_5p_m_rna = float('inf')
most_3p_m_rna = -float('inf')
# Generate a list of r_rna-m_rna base paired nucleotides
bp_x_target = []
bp_y_target = []
bp_x = bp_xs[index]
bp_y = bp_ys[index]
for (nt_x, nt_y) in zip(bp_x, bp_y):
if nt_y > len(m_rna_subseq): # nt is r_rna
most_5p_m_rna = min(most_5p_m_rna, bp_x[bp_y.index(nt_y)])
most_3p_m_rna = max(most_3p_m_rna, bp_x[bp_y.index(nt_y)])
bp_x_target.append(nt_x)
bp_y_target.append(nt_y)
if most_5p_m_rna == float('inf'):
raise ValueError(
'Warning: The 16S r_rna has no predicted binding site. ' +
'Start codon is considered as leaderless and ignored.')
# The r_rna-binding site is between the nucleotides at positions
# most_5p_m_rna and most_3p_m_rna
# Now, fold the pre-sequence, r_rna-binding-sequence and post-sequence
# separately. Take their base pairings and combine them together.
# Calculate the total energy. For secondary structures, this splitting
# operation is allowed.
# We postulate that not all of the post-sequence can form secondary
# structures. Once the 30S complex binds to the m_rna, it prevents the
# formation of secondary structures that are mutually exclusive with
# ribosome binding. We define self.footprint to be the length of the
# 30S complex footprint. Here, we assume that the entire m_rna sequence
# downstream of the 16S r_rna binding site can not form secondary
# structures.
m_rna_pre = m_rna[begin:begin + most_5p_m_rna - 1]
post_window_end = m_rna_len + 1
post_window_begin = min(
start_pos + footprint, post_window_end) # Footprint
post_window_end = m_rna_len + 1
m_rna_post = m_rna[post_window_begin:post_window_end]
total_bp_x = []
total_bp_y = []
# Calculate pre-sequence folding
if m_rna_pre:
_, bp_xs, bp_ys = self.__runner.mfe([m_rna_pre], dangles=dangles)
bp_x_pre = bp_xs[0]
bp_y_pre = bp_ys[0]
else:
bp_x_pre = []
bp_y_pre = []
# Add pre-sequence base pairings to total base pairings
offset = 0 # Begins at 0
for (nt_x, nt_y) in zip(bp_x_pre, bp_y_pre):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + offset)
# Add r_rna-binding site base pairings to total base pairings
offset = 0 # Begins at zero
if startpos_to_end_len < self.__cutoff:
r_rna_offset = startpos_to_end_len
else:
r_rna_offset = startpos_to_end_len
for (nt_x, nt_y) in zip(bp_x_target, bp_y_target):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + r_rna_offset)
# Calculate post-sequence folding
if m_rna_post:
_, bp_xs, bp_ys = self.__runner.mfe([m_rna_post], dangles=dangles)
bp_x_post = bp_xs[0]
bp_y_post = bp_ys[0]
else:
bp_x_post = []
bp_y_post = []
offset = post_window_begin - begin
for (nt_x, nt_y) in zip(bp_x_post, bp_y_post):
total_bp_x.append(nt_x + offset)
total_bp_y.append(nt_y + offset)
m_rna_subseq = m_rna[begin:m_rna_len]
total_energy = self.__runner.energy([m_rna_subseq, self.__r_rna],
total_bp_x, total_bp_y,
dangles=dangles)
total_energy_withspacing = total_energy + dg_spacing_final
return (total_energy_withspacing, m_rna_subseq, total_bp_x, total_bp_y,
total_energy)
def __calc_dg_spacing(self, aligned_spacing):
'''Calculates the dG_spacing according to the value of the aligned
spacing. This relationship was determined through experiments.'''
d_s = aligned_spacing - self.__optimal_spacing
if aligned_spacing < self.__optimal_spacing:
dg_spacing_penalty = 12.2 / \
(1.0 + math.exp(2.5 * (d_s + 2.0))) ** 3.0
else:
dg_spacing_penalty = 0.048 * d_s * d_s + 0.24 * d_s
return dg_spacing_penalty
def __calc_dg_standby_site(self, m_rna, bp_x, bp_y, energy_before,
dangles):
'''Calculates the dg of standby given the structure of the m_rna:r_rna
complex.'''
# To calculate the mfe structure while disallowing base pairing at the
# standby site, we split the folded m_rna sequence into three parts:
# (i) a pre-sequence (before the standby site) that can fold; (ii) the
# standby site, which can not fold; (iii) the 16S r_rna binding site
# and downstream sequence, which has been previously folded.
standby_site_length = 4
# Identify the most 5p m_rna nt that is bound to r_rna
for (nt_x, nt_y) in zip(bp_x, bp_y):
# nt_x is m_rna, nt_y is r_rna, they are bound.
if nt_x <= len(m_rna) and nt_y > len(m_rna):
most_5p_m_rna = nt_x # starts counting from 0
break
# Extract the base pairings that are 3' of the most_5p_m_rna base
# pairing
bp_x_3p = []
bp_y_3p = []
for (nt_x, nt_y) in zip(bp_x, bp_y):
if nt_x >= most_5p_m_rna:
bp_x_3p.append(nt_x)
bp_y_3p.append(nt_y)
# Create the m_rna subsequence
m_rna_subsequence = m_rna[
0:max(0, most_5p_m_rna - standby_site_length - 1)]
# Fold it and extract the base pairings
if m_rna_subsequence:
_, bp_xs, bp_ys = self.__runner.mfe(
[m_rna_subsequence], dangles=dangles)
bp_x_5p = bp_xs[0] # [0] added 12/13/07
bp_y_5p = bp_ys[0]
else:
bp_x_5p = []
bp_y_5p = []
# Put the sets of base pairings together
bp_x_after = []
bp_y_after = []
for (nt_x, nt_y) in zip(bp_x_5p, bp_y_5p):
bp_x_after.append(nt_x)
bp_y_after.append(nt_y)
for (nt_x, nt_y) in zip(bp_x_3p, bp_y_3p):
bp_x_after.append(nt_x)
bp_y_after.append(nt_y)
# Calculate its energy
energy_after = self.__runner.energy([m_rna, self.__r_rna],
bp_x_after, bp_y_after,
dangles=dangles)
d_g = energy_before - energy_after
if d_g > 0.0:
d_g = 0.0
return d_g
def __get_random_rbs(self, rbs_len, shine_delgano, prob_shine_delgano,
core_length, max_nonoptimal_spacing):
'''Generates a random rbs sequence tailored towards the target
translation initiation rate.'''
rbs = []
# Choose core_length nucleotides.
# Choose from the SD sequence with probability prob_shine_delgano
# Choose from non-SD sequence with probability
# (1 - prob_shine_delgano) / 3
# The beginning/end of the core_length wrt to the SD sequence is
# uniformly randomly determined.
# core_length can't be greater then shine_delgano length:
core_length = min(len(shine_delgano), core_length)
diff = len(shine_delgano) - core_length
begin = int(random.random() * diff)
for i in range(core_length):
if random.random() < prob_shine_delgano:
rbs.append(shine_delgano[begin + i])
else:
choices = list(seq_utils.NUCLEOTIDES)
choices.remove(shine_delgano[begin + i])
rbs.append(random.choice(choices))
offset = diff - begin
spacing = random.choice(range(max(
0, offset + self.__optimal_spacing - max_nonoptimal_spacing),
offset + self.__optimal_spacing + max_nonoptimal_spacing))
rbs.extend([random.choice(seq_utils.NUCLEOTIDES)
for _ in range(spacing)])
# if len(rbs) > MAX_RBS_LENGTH:
# rbs = rbs[len(rbs) - MAX_RBS_LENGTH:len(rbs) + 1]
return ''.join([random.choice(seq_utils.NUCLEOTIDES)
for _ in range(rbs_len - len(rbs))] + rbs)
def __calc_aligned_spacing(self, m_rna, start_pos, bp_x, bp_y):
'''Calculates the aligned spacing between the 16S r_rna binding site and
the start codon.'''
# r_rna is the concatenated at the end of the sequence in 5' to 3'
# direction first: identify the farthest 3' nt in the r_rna that binds
# to the mRNA and return its mRNA base pairer
seq_len = len(m_rna) + len(self.__r_rna)
for r_rna_nt in range(seq_len, seq_len - len(self.__r_rna), -1):
if r_rna_nt in bp_y:
r_rna_pos = bp_y.index(r_rna_nt)
if bp_x[r_rna_pos] < start_pos:
farthest_3_prime_r_rna = r_rna_nt - len(m_rna)
m_rna_nt = bp_x[r_rna_pos]
# start_pos is counting starting from 0 (python)
distance_to_start = start_pos - m_rna_nt + 1
return distance_to_start - farthest_3_prime_r_rna
# else:
break
return float('inf')
def get_dg(tir):
'''Gets dg from translation initiation rate.'''
return _RT_EFF * (math.log(_K) - math.log(tir))
def get_tir(d_g):
'''Gets translation initiation rate from dg.'''
return _K * math.exp(-d_g / _RT_EFF)
def _calc_longest_loop_bulge(m_rna, bp_x, bp_y, rbs=None):
''''Calculate the longest helical loop and bulge structure
(longest contiguous list of un-base paired nucleotides starting and
ending with a helix (loop -> same helix, bulge -> different helix)
in the secondary structure.'''
loop_length = 0
begin_helix = 1
bulge_loop_list = []
helical_loop_list = []
bulge_loop_start_end = []
helical_loop_start_end = []
if rbs is not None:
rbs_begin = m_rna.find(rbs)
rbs_end = rbs_begin + len(rbs)
nucleotide_range = range(rbs_begin, rbs_end + 1)
else:
nucleotide_range = range(1, len(m_rna) + 1)
# Find loops. Find bulges.
for nuc in nucleotide_range:
# nth nucleotide is not base-paired.
if bp_x.count(nuc) == 0 and bp_y.count(nuc) == 0:
# Determine if nearest neighbor nucleotides are base-paired
(x_1, x_2, y_1, y_2) = (bp_x.count(nuc - 1),
bp_x.count(nuc + 1),
bp_y.count(nuc - 1),
bp_y.count(nuc + 1))
# middle unpaired nt
if (x_1, x_2, y_1, y_2) == (0, 0, 0, 0):
loop_length += 1
# single mismatch -- loop
elif (x_1, x_2, y_1, y_2) == (1, 0, 0, 1) or \
(x_1, x_2, y_1, y_2) == (0, 1, 1, 0):
loop_length += 1
begin_helix = nuc - 1
end_helix = nuc + 1
# single mismatch -- bulge
elif (x_1, x_2, y_1, y_2) == (1, 1, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 1, 1):
loop_length += 1
begin_helix = nuc - 1
end_helix = nuc + 1
# starting unpaired nt
elif (x_1, x_2, y_1, y_2) == (1, 0, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 1, 0):
loop_length += 1
begin_helix = nuc - 1
# ending unpaired nt
elif (x_1, x_2, y_1, y_2) == (0, 1, 0, 0) or \
(x_1, x_2, y_1, y_2) == (0, 0, 0, 1):
loop_length += 1
end_helix = nuc + 1
# 1,0,1,0 is impossible w/o psuedoknots
# 0,1,0,1 is impossible w/o psuedoknots
# Also, all binary combinations with 3 or 4 true are impossible
# (nuc-1 or nuc+1 can not be in both bp_x and bp_y)
elif loop_length > 0:
# Bulge or loop?
# loop
if bp_x.count(begin_helix) > 0 and bp_y.count(end_helix) > 0 \
and bp_x.index(begin_helix) == bp_y.index(end_helix):
helical_loop_list.append(loop_length)
loop_length = 0
helical_loop_start_end.append((begin_helix, end_helix))
else:
bp_end = 0
bp_begin = 0
if bp_x.count(end_helix) > 0:
bp_begin = bp_y[bp_x.index(end_helix)]
if bp_y.count(end_helix) > 0:
bp_end = bp_x[bp_y.index(end_helix)]
if bp_x.count(begin_helix) > 0:
bp_end = bp_y[bp_x.index(begin_helix)]
if bp_y.count(begin_helix) > 0:
bp_begin = bp_x[bp_y.index(begin_helix)]
if bp_end > bp_begin:
bulge_loop_list.append(loop_length)
loop_length = 0
bulge_loop_start_end.append((begin_helix, end_helix))
else:
loop_length = 0
return helical_loop_list, bulge_loop_list, helical_loop_start_end, \
bulge_loop_start_end
|
neilswainston/PathwayGenie
|
parts_genie/rbs_calculator.py
|
Python
|
mit
| 23,199
|
[
"VisIt"
] |
e9018af414eb86fe66e501f5f7e408a232ad75fd0b461309d36704d3e546177b
|
import os
import pathlib
import platform
import tempfile
import meshio
import meshzoo
import numpy as np
import pytest
import meshplex
from ..helpers import assert_norms, is_near_equal, run
this_dir = pathlib.Path(__file__).resolve().parent
def _compute_polygon_area(pts):
# shoelace formula
return (
np.abs(
np.dot(pts[0], np.roll(pts[1], -1)) - np.dot(np.roll(pts[0], -1), pts[1])
)
/ 2
)
# The dtype restriction is because of np.bincount.
# See <https://github.com/numpy/numpy/issues/17760> and
# <https://github.com/nschloe/meshplex/issues/90>.
cell_dtypes = []
cell_dtypes += [
np.int32,
]
if platform.architecture()[0] == "64bit":
cell_dtypes += [
np.uint32, # when numpy is fixed, this can go to all arches
np.int64,
# np.uint64 # depends on the numpy fix
]
@pytest.mark.parametrize("cells_dtype", cell_dtypes)
def test_unit_triangle(cells_dtype):
points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]])
cells = np.array([[0, 1, 2]], dtype=cells_dtype)
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-14
# ce_ratios
assert is_near_equal(mesh.ce_ratios.T, [0.0, 0.5, 0.5], tol)
# control volumes
assert is_near_equal(mesh.control_volumes, [0.25, 0.125, 0.125], tol)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5], tol)
# circumcenters
assert is_near_equal(mesh.cell_circumcenters, [0.5, 0.5], tol)
# centroids
assert is_near_equal(mesh.cell_centroids, [1.0 / 3.0, 1.0 / 3.0], tol)
assert is_near_equal(mesh.cell_barycenters, [1.0 / 3.0, 1.0 / 3.0], tol)
# control volume centroids
print(mesh.control_volume_centroids)
assert is_near_equal(
mesh.control_volume_centroids,
[[0.25, 0.25], [2.0 / 3.0, 1.0 / 6.0], [1.0 / 6.0, 2.0 / 3.0]],
tol,
)
# incenter
assert is_near_equal(
mesh.cell_incenters, [[(2 - np.sqrt(2)) / 2, (2 - np.sqrt(2)) / 2]], tol
)
# circumcenter
assert is_near_equal(mesh.cell_circumcenters, [[0.5, 0.5]], tol)
assert mesh.num_delaunay_violations == 0
assert mesh.genus == 0
mesh.get_cell_mask()
mesh.get_edge_mask()
mesh.get_vertex_mask()
# dummy subdomain marker test
class Subdomain:
is_boundary_only = False
def is_inside(self, X):
return np.ones(X.shape[1:], dtype=bool)
cell_mask = mesh.get_cell_mask(Subdomain())
assert np.sum(cell_mask) == 1
# save
_, filename = tempfile.mkstemp(suffix=".png")
mesh.save(filename)
os.remove(filename)
_, filename = tempfile.mkstemp(suffix=".vtk")
mesh.save(filename)
os.remove(filename)
def test_regular_tri_additional_points():
points = np.array(
[
[0.0, 3.4, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[3.3, 4.4, 0.0],
]
)
cells = np.array([[1, 2, 3]])
mesh = meshplex.MeshTri(points, cells)
assert np.array_equal(mesh.is_point_used, [False, True, True, True, False])
assert np.array_equal(mesh.is_boundary_point, [False, True, True, True, False])
assert np.array_equal(mesh.is_interior_point, [False, False, False, False, False])
tol = 1.0e-14
assert np.array_equal(mesh.cells("points"), [[1, 2, 3]])
mesh.create_facets()
assert np.array_equal(mesh.cells("edges"), [[2, 1, 0]])
assert np.array_equal(mesh.edges["points"], [[1, 2], [1, 3], [2, 3]])
# ce_ratios
assert is_near_equal(mesh.ce_ratios.T, [0.0, 0.5, 0.5], tol)
# control volumes
assert is_near_equal(mesh.control_volumes, [0.0, 0.25, 0.125, 0.125, 0.0], tol)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5], tol)
# circumcenters
assert is_near_equal(mesh.cell_circumcenters, [0.5, 0.5, 0.0], tol)
# Centroids.
# Nans appear here as the some points aren't part of any cell and hence have no
# control volume.
cvc = mesh.control_volume_centroids
assert np.all(np.isnan(cvc[0]))
assert np.all(np.isnan(cvc[4]))
assert is_near_equal(
cvc[1:4],
[[0.25, 0.25, 0.0], [2.0 / 3.0, 1.0 / 6.0, 0.0], [1.0 / 6.0, 2.0 / 3.0, 0.0]],
tol,
)
assert mesh.num_delaunay_violations == 0
def test_regular_tri_order():
points = np.array([[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
assert all((mesh.cells("points") == [0, 1, 2]).flat)
tol = 1.0e-14
# ce_ratios
assert is_near_equal(mesh.ce_ratios.T, [0.5, 0.0, 0.5], tol)
# control volumes
assert is_near_equal(mesh.control_volumes, [0.125, 0.25, 0.125], tol)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5], tol)
# circumcenters
assert is_near_equal(mesh.cell_circumcenters, [0.5, 0.5, 0.0], tol)
# centroids
assert is_near_equal(
mesh.control_volume_centroids,
[[1.0 / 6.0, 2.0 / 3.0, 0.0], [0.25, 0.25, 0.0], [2.0 / 3.0, 1.0 / 6.0, 0.0]],
tol,
)
assert mesh.num_delaunay_violations == 0
@pytest.mark.parametrize("a", [1.0, 2.0])
def test_regular_tri2(a):
points = (
np.array(
[
[-0.5, -0.5 * np.sqrt(3.0), 0],
[-0.5, +0.5 * np.sqrt(3.0), 0],
[1, 0, 0],
]
)
/ np.sqrt(3)
* a
)
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-14
# ce_ratios
val = 0.5 / np.sqrt(3.0)
assert is_near_equal(mesh.ce_ratios, [val, val, val], tol)
# control volumes
vol = np.sqrt(3.0) / 4 * a ** 2
assert is_near_equal(mesh.control_volumes, [vol / 3.0, vol / 3.0, vol / 3.0], tol)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [vol], tol)
# circumcenters
assert is_near_equal(mesh.cell_circumcenters, [0.0, 0.0, 0.0], tol)
# def test_degenerate_small0():
# h = 1.0e-3
# points = np.array([
# [0, 0, 0],
# [1, 0, 0],
# [0.5, h, 0.0],
# ])
# cells = np.array([[0, 1, 2]])
# mesh = meshplex.MeshTri(
# points,
# cells,
# allow_negative_volumes=True
# )
# tol = 1.0e-14
# # ce_ratios
# alpha = 0.5 * h - 1.0 / (8*h)
# beta = 1.0 / (4*h)
# assertAlmostEqual(mesh.get_ce_ratios_per_edge()[0], alpha, delta=tol)
# self.assertAlmostEqual(mesh.get_ce_ratios_per_edge()[1], beta, delta=tol)
# self.assertAlmostEqual(mesh.get_ce_ratios_per_edge()[2], beta, delta=tol)
# # control volumes
# alpha1 = 0.0625 * (3*h - 1.0/(4*h))
# alpha2 = 0.125 * (h + 1.0 / (4*h))
# assert is_near_equal(
# mesh.get_control_volumes(),
# [alpha1, alpha1, alpha2],
# tol
# )
# # cell volumes
# self.assertAlmostEqual(mesh.cell_volumes[0], 0.5 * h, delta=tol)
# # surface areas
# edge_length = np.sqrt(0.5**2 + h**2)
# # circumference = 1.0 + 2 * edge_length
# alpha = 0.5 * (1.0 + edge_length)
# self.assertAlmostEqual(mesh.surface_areas[0], alpha, delta=tol)
# self.assertAlmostEqual(mesh.surface_areas[1], alpha, delta=tol)
# self.assertAlmostEqual(mesh.surface_areas[2], edge_length, delta=tol)
# # centroids
# alpha = -41.666666669333345
# beta = 0.58333199998399976
# self.assertAlmostEqual(
# mesh.centroids[0][0],
# 0.416668000016,
# delta=tol
# )
# self.assertAlmostEqual(mesh.centroids[0][1], alpha, delta=tol)
# self.assertAlmostEqual(mesh.centroids[0][2], 0.0, delta=tol)
# self.assertAlmostEqual(mesh.centroids[1][0], beta, delta=tol)
# self.assertAlmostEqual(mesh.centroids[1][1], alpha, delta=tol)
# self.assertAlmostEqual(mesh.centroids[1][2], 0.0, delta=tol)
# self.assertAlmostEqual(mesh.centroids[2][0], 0.5, delta=tol)
# self.assertAlmostEqual(mesh.centroids[2][1], -41.666, delta=tol)
# self.assertAlmostEqual(mesh.centroids[2][2], 0.0, delta=tol)
# self.assertEqual(mesh.num_delaunay_violations, 0)
@pytest.mark.parametrize(
"h",
# TODO [1.0e0, 1.0e-1]
[1.0e0],
)
def test_degenerate_small0b(h):
points = np.array([[0, 0, 0], [1, 0, 0], [0.5, h, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells, sort_cells=True) # test sort_cells, too
tol = 1.0e-14
# edge lengths
el = np.sqrt(0.5 ** 2 + h ** 2)
assert is_near_equal(mesh.edge_lengths.T, [el, el, 1.0], tol)
# ce_ratios
ce0 = 0.5 / h * (h ** 2 - 0.25)
ce12 = 0.25 / h
assert is_near_equal(mesh.ce_ratios.T, [ce12, ce12, ce0], tol)
# control volumes
cv12 = 0.25 * (1.0 ** 2 * ce0 + (0.25 + h ** 2) * ce12)
cv0 = 0.5 * (0.25 + h ** 2) * ce12
assert is_near_equal(mesh.control_volumes, [cv12, cv12, cv0], tol)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5 * h], tol)
# circumcenters
assert is_near_equal(mesh.cell_circumcenters, [0.5, 0.375, 0.0], tol)
assert mesh.num_delaunay_violations == 0
# # TODO parametrize with flat boundary correction
# def test_degenerate_small0b_fcc():
# h = 1.0e-3
# points = np.array([[0, 0, 0], [1, 0, 0], [0.5, h, 0.0]])
# cells = np.array([[0, 1, 2]])
# mesh = meshplex.MeshTri(points, cells)
#
# tol = 1.0e-14
#
# # edge lengths
# el = np.sqrt(0.5 ** 2 + h ** 2)
# assert is_near_equal(mesh.edge_lengths.T, [el, el, 1.0], tol)
#
# # ce_ratios
# ce = h
# assert is_near_equal(mesh.ce_ratios.T, [ce, ce, 0.0], tol)
#
# # control volumes
# cv = ce * el
# alpha = 0.25 * el * cv
# beta = 0.5 * h - 2 * alpha
# assert is_near_equal(mesh.control_volumes, [alpha, alpha, beta], tol)
#
# # cell volumes
# assert is_near_equal(mesh.cell_volumes, [0.5 * h], tol)
#
# # surface areas
# g = np.sqrt((0.5 * el) ** 2 + (ce * el) ** 2)
# alpha = 0.5 * el + g
# beta = el + (1.0 - 2 * g)
# assert is_near_equal(mesh.surface_areas, [alpha, alpha, beta], tol)
#
# # centroids
# centroids = mesh.control_volume_centroids
# alpha = 1.0 / 6000.0
# gamma = 0.00038888918518558031
# assert is_near_equal(centroids[0], [0.166667, alpha, 0.0], tol)
# assert is_near_equal(centroids[1], [0.833333, alpha, 0.0], tol)
# assert is_near_equal(centroids[2], [0.5, gamma, 0.0], tol)
# assert mesh.num_delaunay_violations == 0
@pytest.mark.parametrize("h, a", [(1.0e-3, 0.3)])
def test_degenerate_small1(h, a):
points = np.array([[0, 0, 0], [1, 0, 0], [a, h, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-12
# edge lengths
el0 = np.sqrt((1.0 - a) ** 2 + h ** 2)
el1 = np.sqrt(a ** 2 + h ** 2)
el2 = 1.0
assert is_near_equal(mesh.edge_lengths.T, [[el0, el1, el2]], tol)
# ce_ratios
ce0 = 0.5 * a / h
ce1 = 0.5 * (1 - a) / h
ce2 = 0.5 * (h - (1 - a) * a / h) / el2
assert is_near_equal(mesh.ce_ratios[:, 0], [ce0, ce1, ce2], 1.0e-8)
# # control volumes
# cv1 = ce1 * el1
# alpha1 = 0.25 * el1 * cv1
# cv2 = ce2 * el2
# alpha2 = 0.25 * el2 * cv2
# beta = 0.5 * h - (alpha1 + alpha2)
# assert is_near_equal(mesh.control_volumes, [alpha1, alpha2, beta], tol)
# assert abs(sum(mesh.control_volumes) - 0.5 * h) < tol
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5 * h], tol)
# # surface areas
# b1 = np.sqrt((0.5 * el1) ** 2 + cv1 ** 2)
# alpha0 = b1 + 0.5 * el1
# b2 = np.sqrt((0.5 * el2) ** 2 + cv2 ** 2)
# alpha1 = b2 + 0.5 * el2
# total = 1.0 + el1 + el2
# alpha2 = total - alpha0 - alpha1
# assert is_near_equal(mesh.surface_areas, [alpha0, alpha1, alpha2], tol)
assert mesh.num_delaunay_violations == 0
@pytest.mark.parametrize("h", [1.0e-2])
def test_degenerate_small2(h):
points = np.array([[0, 0, 0], [1, 0, 0], [0.5, h, 0.0], [0.5, -h, 0.0]])
cells = np.array([[0, 1, 2], [0, 1, 3]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-11
# ce_ratios
alpha = h - 1.0 / (4 * h)
beta = 1.0 / (4 * h)
assert is_near_equal(mesh.signed_circumcenter_distances, [alpha], tol)
alpha2 = (h - 1.0 / (4 * h)) / 2
assert is_near_equal(
mesh.ce_ratios, [[beta, beta], [beta, beta], [alpha2, alpha2]], tol
)
# control volumes
alpha1 = 0.125 * (3 * h - 1.0 / (4 * h))
alpha2 = 0.125 * (h + 1.0 / (4 * h))
assert is_near_equal(mesh.control_volumes, [alpha1, alpha1, alpha2, alpha2], tol)
# circumcenters
assert is_near_equal(
mesh.cell_circumcenters, [[0.5, -12.495, 0.0], [0.5, +12.495, 0.0]], tol
)
# cell volumes
assert is_near_equal(mesh.cell_volumes, [0.5 * h, 0.5 * h], tol)
assert mesh.num_delaunay_violations == 1
def test_rectanglesmall():
points = np.array(
[[0.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
)
cells = np.array([[0, 1, 2], [0, 2, 3]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-14
assert is_near_equal(mesh.signed_circumcenter_distances, [0.0], tol)
assert is_near_equal(mesh.ce_ratios, [[5.0, 0.05], [0.0, 5.0], [0.05, 0.0]], tol)
assert is_near_equal(mesh.control_volumes, [2.5, 2.5, 2.5, 2.5], tol)
assert is_near_equal(mesh.cell_volumes, [5.0, 5.0], tol)
assert mesh.num_delaunay_violations == 0
def test_pacman():
mesh = meshplex.read(this_dir / ".." / "meshes" / "pacman.vtu")
run(
mesh,
54.312974717523744,
[1.9213504740523146, 0.07954185111555329],
[403.5307055719196, 0.5512267577002408],
[1.3816992621175055, 0.0443755870238773],
)
assert mesh.num_delaunay_violations == 0
def test_shell():
points = np.array(
[
[+0.0, +0.0, +1.0],
[+1.0, +0.0, +0.0],
[+0.0, +1.0, +0.0],
[-1.0, +0.0, +0.0],
[+0.0, -1.0, +0.0],
]
)
cells = np.array([[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 1, 4]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-14
ce_ratios = 0.5 / np.sqrt(3.0) * np.ones((4, 3))
assert is_near_equal(mesh.ce_ratios.T, ce_ratios, tol)
cv = np.array([2.0, 1.0, 1.0, 1.0, 1.0]) / np.sqrt(3.0)
assert is_near_equal(mesh.control_volumes, cv, tol)
cell_vols = np.sqrt(3.0) / 2.0 * np.ones(4)
assert is_near_equal(mesh.cell_volumes, cell_vols, tol)
assert mesh.num_delaunay_violations == 0
def test_sphere():
points, cells = meshzoo.icosa_sphere(5)
mesh = meshplex.Mesh(points, cells)
run(
mesh,
12.413437988936916,
[0.7864027242108207, 0.05524648209283611],
[128.70115197256447, 0.3605511489598192],
[0.5593675314375034, 0.02963260270642986],
)
def test_update_point_coordinates():
mesh = meshio.read(this_dir / ".." / "meshes" / "pacman.vtu")
assert np.all(np.abs(mesh.points[:, 2]) < 1.0e-15)
mesh1 = meshplex.MeshTri(mesh.points, mesh.get_cells_type("triangle"))
np.random.seed(123)
X2 = mesh.points + 1.0e-2 * np.random.rand(*mesh.points.shape)
mesh2 = meshplex.MeshTri(X2, mesh.get_cells_type("triangle"))
mesh1.points = X2
tol = 1.0e-12
assert is_near_equal(mesh1.cell_volumes, mesh2.cell_volumes, tol)
def test_inradius():
# 3-4-5 triangle
points = np.array([[0.0, 0.0, 0.0], [3.0, 0.0, 0.0], [0.0, 4.0, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-15
assert is_near_equal(mesh.cell_inradius, [1.0], tol)
# 30-60-90 triangle
a = 1.0
points = np.array(
[[0.0, 0.0, 0.0], [a / 2, 0.0, 0.0], [0.0, a / 2 * np.sqrt(3.0), 0.0]]
)
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
assert is_near_equal(mesh.cell_inradius, [a / 4 * (np.sqrt(3) - 1)], tol)
def test_circumradius():
# 3-4-5 triangle
points = np.array([[0.0, 0.0, 0.0], [3.0, 0.0, 0.0], [0.0, 4.0, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-15
assert is_near_equal(mesh.cell_circumradius, [2.5], tol)
# 30-60-90 triangle
a = 1.0
points = np.array(
[[0.0, 0.0, 0.0], [a / 2, 0.0, 0.0], [0.0, a / 2 * np.sqrt(3.0), 0.0]]
)
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
assert is_near_equal(mesh.cell_circumradius, [a / 2], tol)
def test_quality():
# 3-4-5 triangle
points = np.array([[0.0, 0.0, 0.0], [3.0, 0.0, 0.0], [0.0, 4.0, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-15
q = mesh.q_radius_ratio
assert is_near_equal(q, 2 * mesh.cell_inradius / mesh.cell_circumradius, tol)
# 30-60-90 triangle
a = 1.0
points = np.array(
[[0.0, 0.0, 0.0], [a / 2, 0.0, 0.0], [0.0, a / 2 * np.sqrt(3.0), 0.0]]
)
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
q = mesh.q_radius_ratio
assert is_near_equal(q, 2 * mesh.cell_inradius / mesh.cell_circumradius, tol)
def test_angles():
# 3-4-5 triangle
points = np.array([[0.0, 0.0, 0.0], [3.0, 0.0, 0.0], [0.0, 4.0, 0.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
tol = 1.0e-14
assert is_near_equal(
mesh.angles,
[[np.pi / 2], [np.arcsin(4.0 / 5.0)], [np.arcsin(3.0 / 5.0)]],
tol,
)
# 30-60-90 triangle
a = 1.0
points = np.array(
[[0.0, 0.0, 0.0], [a / 2, 0.0, 0.0], [0.0, a / 2 * np.sqrt(3.0), 0.0]]
)
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
ic = mesh.angles / np.pi * 180
assert is_near_equal(ic, [[90], [60], [30]], tol)
def test_flat_boundary():
#
# 3___________2
# |\_ 2 _/|
# | \_ _/ |
# | 3 \4/ 1 |
# | _/ \_ |
# | _/ \_ |
# |/ 0 \|
# 0-----------1
#
x = 0.4
y = 0.5
X = np.array(
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[x, y, 0.0],
]
)
cells = np.array([[0, 1, 4], [1, 2, 4], [2, 3, 4], [3, 0, 4]])
mesh = meshplex.MeshTri(X, cells)
# Inspect the covolumes in left cell.
edge_length = np.sqrt(x ** 2 + y ** 2)
ref = np.array([edge_length, edge_length, 1.0])
assert np.all(np.abs(mesh.edge_lengths[:, 3] - ref) < 1.0e-12)
#
alpha = 0.5 / x * y * np.sqrt(y ** 2 + x ** 2)
beta = 0.5 / x * (x ** 2 - y ** 2)
ref = [alpha, alpha, beta]
covolumes = mesh.ce_ratios[:, 3] * mesh.edge_lengths[:, 3]
assert np.all(np.abs(covolumes - ref) < 1.0e-12)
#
beta = np.sqrt(alpha ** 2 + 0.2 ** 2 + 0.25 ** 2)
control_volume_corners = np.array(
[
mesh.cell_circumcenters[0][:2],
mesh.cell_circumcenters[1][:2],
mesh.cell_circumcenters[2][:2],
mesh.cell_circumcenters[3][:2],
]
)
ref_area = _compute_polygon_area(control_volume_corners.T)
assert np.abs(mesh.control_volumes[4] - ref_area) < 1.0e-12
cv = np.zeros(X.shape[0])
for edges, ce_ratios in zip(mesh.idx[1].T, mesh.ce_ratios.T):
for i, ce in zip(edges, ce_ratios):
ei = mesh.points[i[1]] - mesh.points[i[0]]
cv[i] += 0.25 * ce * np.dot(ei, ei)
assert np.all(np.abs(cv - mesh.control_volumes) < 1.0e-12 * cv)
def test_set_points():
points = np.array([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]])
cells = np.array([[0, 1, 2]])
mesh = meshplex.MeshTri(points, cells)
mesh.set_points([0.1, 0.1], [0])
ref = mesh.cell_volumes.copy()
mesh2 = meshplex.MeshTri(mesh.points, mesh.cells("points"))
assert np.all(np.abs(ref - mesh2.cell_volumes) < 1.0e-10)
def test_reference_vals_pacman():
mesh = meshplex.read(this_dir / ".." / "meshes" / "pacman.vtu")
mesh = meshplex.MeshTri(mesh.points[:, :2], mesh.cells("points"))
assert_norms(
mesh.points,
[3.0544932927920363e03, 8.8106015937625088e01, 4.2500000000000000e00],
1.0e-15,
)
assert_norms(
mesh.half_edge_coords,
[1.6674048509514694e03, 1.9463913181988705e01, 3.4718650853971766e-01],
1.0e-15,
)
assert_norms(
mesh.ei_dot_ei,
[3.7884391635599366e02, 5.5826366101867908e00, 1.2739502744897091e-01],
1.0e-15,
)
assert_norms(
mesh.cell_partitions,
[5.4312974717523744e01, 5.6678942198355931e-01, 8.5844237283573006e-03],
1.0e-12,
)
assert_norms(
mesh.cell_centroids,
[5.5718766084264298e03, 1.1728553160741399e02, 4.1694621840309081e00],
1.0e-15,
)
assert_norms(
mesh.edge_lengths,
[1.3250455158127431e03, 1.9463913181988705e01, 3.5692440018716975e-01],
1.0e-15,
)
assert_norms(
mesh.cell_volumes,
[5.4312974717523744e01, 1.3816992621175055e00, 4.4375587023877297e-02],
1.0e-15,
)
assert_norms(
mesh.ce_ratios,
[1.3499477445918124e03, 2.0088073714816950e01, 5.5122675770024077e-01],
1.0e-14,
)
assert_norms(
mesh.control_volumes,
[5.4312974717523744e01, 1.9213504740523146e00, 7.9541851115553286e-02],
1.0e-15,
)
assert_norms(
mesh.control_volume_centroids,
[3.0478135855839828e03, 8.7829558499197603e01, 4.1869842124121526e00],
1.0e-15,
)
assert_norms(
mesh.signed_cell_volumes,
[5.4312974717523744e01, 1.3816992621175055e00, 4.4375587023877297e-02],
1.0e-15,
)
assert_norms(
mesh.cell_circumcenters,
[5.5720855984960217e03, 1.1729075391802718e02, 4.1780370020570583e00],
1.0e-15,
)
assert_norms(
mesh.cell_circumradius,
[2.5571964535497142e02, 6.5009888666474742e00, 1.8757161840809547e-01],
1.0e-15,
)
assert_norms(
mesh.cell_incenters,
[5.5715778346847819e03, 1.1727887700257899e02, 4.1655515539293466e00],
1.0e-15,
)
assert_norms(
mesh.cell_inradius,
[1.2685029853822100e02, 3.2249724655140719e00, 9.1724742697552028e-02],
1.0e-15,
)
assert_norms(
mesh.q_radius_ratio,
[1.5359568026022387e03, 3.9044827334140905e01, 9.9999895608618172e-01],
1.0e-15,
)
|
nschloe/voropy
|
tests/mesh_tri/test_mesh_tri.py
|
Python
|
mit
| 22,491
|
[
"VTK"
] |
6bedeacb91a675c0badf254121c93d4243be81d634baa7af1f82a4220c15eef7
|
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import KRHF
from pyscf.pbc.tdscf.krhf_slow import TDRHF
from pyscf.pbc.gw import KRGW
cell = Cell()
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 7
cell.build()
model = KRHF(cell, cell.make_kpts([2, 1, 1]))
model.kernel()
model_td = TDRHF(model)
model_td.kernel()
model_gw = KRGW(model_td)
model_gw.kernel()
print(model_gw.mo_energy)
|
sunqm/pyscf
|
examples/gw/31-pbc_slow.py
|
Python
|
apache-2.0
| 696
|
[
"PySCF"
] |
fbed8b12376572eb985d3ed44af8ec81135186ed6be13baaf22e5049b0c6dcc1
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Utilities to manipulate FASTQ and Reads"""
import zlib
from itertools import islice
import gzip
import subprocess
from functools import wraps
from collections import Counter, defaultdict
from sequana.lazy import numpy as np
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana.tools import GZLineCounter
from easydev import Progress
import pysam
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import colorlog
logger = colorlog.getLogger(__name__)
# for filter fastq files. see below in FastQ for the usage
# we want to take 4 lines at a time (assuming there is no empty lines)
def grouper(iterable):
args = [iter(iterable)] * 4
return izip_longest(*args)
__all__ = ["Identifier", "FastQ", "FastQC", "is_fastq"]
def is_fastq(filename):
with open(filename, "r") as fin:
try:
line = fin.readline()
assert line.startswith("@")
line = fin.readline()
line = fin.readline()
assert line.startswith("+") and len(line.strip()) == 1
line = fin.readline()
return True
except: # pragma: no cover
return False
class Identifier(object):
"""Class to interpret Read's identifier
.. warning:: Implemented for Illumina 1.8+ and 1.4 . Other cases
will simply stored the identifier without interpretation
.. doctest::
>>> from sequana import Identifier
>>> ident = Identifier('@EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG')
>>> ident.info['x_coordinate']
'15343'
Currently, the following identifiers will be recognised automatically:
:Illumina_1_4: An example is ::
@HWUSI-EAS100R:6:73:941:1973#0/1
:Illumina_1_8: An example is::
@EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
Other that could be implemented are NCBI ::
@FSRRS4401BE7HA [length=395] [gc=36.46] [flows=800] [phred_min=0] \
[phred_max=40] [trimmed_length=95]
Information can also be found here http://support.illumina.com/help/SequencingAnalysisWorkflow/Content/Vault/Informatics/Sequencing_Analysis/CASAVA/swSEQ_mCA_FASTQFiles.htm
"""
def __init__(self, identifier, version="unknown"):
self.identifier = identifier[:]
if version == "Illumina_1.8+":
info = self._interpret_illumina_1_8()
elif version == "Illumina_1.4+":
info = self._interpret_illumina_1_4()
else:
try:
info = self._interpret_illumina_1_8()
version = "Illumina_1.8+"
except:
try:
info = self._interpret_illumina_1_4()
version = "Illumina_1.4+"
except:
info = self.identifier[:]
self.info = info
self.version = version
def _interpret_illumina_1_8(self):
"""
@EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
Note the space and : separators
"""
assert self.identifier.startswith(b"@")
# skip @ character
identifier = self.identifier[1:]
# replace spaces by : character
identifier = b" ".join(identifier.split())
identifier = identifier.replace(b" ", b":")
items = identifier.split(b":")
if len(items) != 11: # pragma: no cover
raise ValueError("Number of items in the identifier should be 11")
res = {}
res["identifier"] = self.identifier[:]
res["instrument"] = items[0]
res["run_id"] = items[1]
res["flowcell_id"] = items[2]
res["flowcell_lane"] = items[3]
res["tile_number"] = items[4]
res["x_coordinate"] = items[5]
res["y_coordinate"] = items[6]
res["member_pair"] = items[7]
res["filtered"] = items[8]
res["control_bits"] = items[9]
res["index_sequence"] = items[10]
res["version"] = "Illumina_1.8+"
return res
def _interpret_illumina_1_4(self):
# skip @ character
identifier = self.identifier[1:]
identifier = identifier.replace("#", ":")
identifier = identifier.replace("/", ":")
items = identifier.split(":")
# ['@HWUSI-EAS100R', '6', '73', '941', '1973#0/1']
res = {}
res["identifier"] = self.identifier[:]
res["instrument_name"] = items[0]
res["flowcell_lane"] = items[1]
res["tile_number"] = items[2]
res["x_coordinate"] = items[3]
res["y_coordinate"] = items[4]
res["index"] = "#" + items[5]
res["member_pair"] = items[6]
res["version"] = "Illumina_1.4+"
return res
def __str__(self):
txt = ""
for key in sorted(self.info.keys()):
txt += "%s: %s\n" % (key, self.info[key])
return txt
def __repr__(self):
return "Identifier (%s)" % self.version
class FastQ(object):
"""Class to handle FastQ files
Some of the methods are based on pysam but a few are also
original to sequana. In general, input can be zipped ot not and
output can be zipped or not (based on the extension).
An example is the :meth:`extract_head` method::
f = FastQ("input_file.fastq.gz")
f.extract_head(100000, output='test.fastq')
f.extract_head(100000, output='test.fastq.gz')
equivalent to::
zcat myreads.fastq.gz | head -100000 | gzip > test100k.fastq.gz
An efficient implementation to count the number of lines is also available::
f.count_lines()
or reads (assuming 4 lines per read)::
f.count_reads()
Operators available:
- equality ==
"""
"""
Features to implement::
- filter out short / long reads
- filter out reads with NNN
- filter out low quality end reads
- cut poly A/T tails
- dereplicate sequences
- split multiplex
- remove contaminants
- compact fastq
- convert to/from sff
"""
_N = 4
def __init__(self, filename, verbose=False):
self.filename = filename
self.verbose = verbose
self._count_reads = None
self._count_lines = None
# opens the file in read mode
self.__enter__()
# Can we identify the type of data ?
try:
self.identifier = Identifier(self.next()["identifier"])
self.rewind()
self.data_format = self.identifier.version
except:
self.data_format = "unknown"
def get_lengths(self):
return [len(x["sequence"]) for x in self]
def _get_count_reads(self):
if self._count_reads is None:
self._count_reads = self.count_reads()
return self._count_reads
n_reads = property(_get_count_reads, doc="return number of reads")
def _get_count_lines(self):
if self._count_lines is None:
self._count_lines = self.count_lines()
return self._count_lines
n_lines = property(_get_count_lines, doc="return number of lines (should be 4 times number of reads)")
def __len__(self):
return self.n_reads
def rewind(self):
"""Allows to iter from the beginning without openning the file or
creating a new instance.
"""
nreads = self._count_reads
self._fileobj.close()
self.__enter__()
self._count_reads = nreads
def _count_lines_gz(self, CHUNKSIZE=65536):
ff = GZLineCounter(self.filename)
return len(ff)
def count_lines(self):
"""Return number of lines"""
if self.filename.endswith("gz"):
count = self._count_lines_gz()
else:
count = self._count_reads_buf()
return count
def count_reads(self):
"""Return count_lines divided by 4"""
nlines = self.count_lines()
if divmod(nlines, self._N)[1] != 0:
print("WARNING. number of lines not multiple of 4.")
return int(nlines / self._N)
def _count_reads_buf(self, block=1024 * 1024):
# 0.12 seconds to read 3.4M lines, faster than wc command
# on 2M reads, takes 0.1 seconds whereas wc takes 1.2 seconds
lines = 0
with open(self.filename, "rb") as f:
buf = f.read(block)
while buf:
lines += buf.count(b"\n")
buf = f.read(block)
return lines
def extract_head(self, N, output_filename):
"""Extract the heads of a FastQ files
:param int N:
:param str output_filename: Based on the extension
the output file is zipped or not (.gz extension only)
This function is convenient since it takes into account
the input file being compressed or not and the output file
being compressed ot not. It is in general 2-3 times faster than the
equivalent unix commands combined together but is 10 times
slower for the case on uncompressed input and uncompressed output.
.. warning:: this function extract the N first lines and does not check
if there are empty lines in your FastQ/FastA files.
"""
if self.filename.endswith(".gz"):
self._extract_head_gz(N, output_filename)
else:
self._extract_head(N, output_filename)
def _extract_head(self, N, output_filename):
with open(self.filename, "r") as fin:
if output_filename.endswith("gz"):
output_filename_nogz = output_filename.replace(".gz", "")
with open(output_filename_nogz, "w") as fout:
fout.writelines(islice(fin, N))
# compress the file
self._gzip(output_filename_nogz)
else:
with open(output_filename, "w") as fout:
fout.writelines(islice(fin, N))
def _gzip(self, filename):
try:
s = subprocess.Popen(["pigz", "-f", filename])
s.wait()
except: # pragma: no cover
s = subprocess.Popen(["gzip", filename, "-f"])
s.wait()
def _extract_head_gz(self, N, output_filename="test.fastq.gz", level=6, CHUNKSIZE=65536):
"""
If input is compressed:
if output not compressed, this is 20% faster than
"zcat file | head -1000000 > output.fastq
If output is compressed, this is 3-4 times faster than :
"zcat file | head -1000000 | gzip > output.fastq
If input is compressed:
if output not compressed, this is 10 times slower than
"head -1000000 > output.fastq
If output is compressed, this is 3-4 times faster than :
"head -1000000 | gzip > output.fastq
Tested with Python 3.5 , Linux box.
"""
# make sure N is integer
N = int(N)
# as fast as zcat file.fastq.gz | head -200000 > out.fastq
# this is to supress the header
decoder = zlib.decompressobj(16 + zlib.MAX_WBITS)
# will we gzip the output file ?
output_filename, tozip = self._istozip(output_filename)
with open(self.filename, "rb") as fin:
buf = fin.read(CHUNKSIZE)
count = 0
with open(output_filename, "wb") as fout:
while buf:
outstr = decoder.decompress(buf)
if len(outstr) == 0: # pragma: no cover
msg = (
"Error while decompressing the zip file. may need"
+ "to dezip/rezip the data. known issue in extract_head"
)
logger.error(msg)
raise ValueError(msg)
this_count = outstr.count(b"\n")
if count + this_count > N:
# there will be too many lines, we need to select a subset
missing = N - count
# outstr = outstr.strip().split(b"\n")
# Fix https://github.com/sequana/sequana/issues/536
outstr = outstr.split(b"\n")
outstr = b"\n".join(outstr[0:missing]) + b"\n"
fout.write(outstr)
break
else: # pragma: no cover
count += this_count
fout.write(outstr) # pragma: no cover
buf = fin.read(CHUNKSIZE) # pragma: no cover
if tozip is True:
self._gzip(output_filename)
return count
def _istozip(self, filename):
if filename.endswith(".gz"):
tozip = True
filename = filename.split(".gz", 1)[0]
else:
tozip = False
return filename, tozip
def select_reads(self, read_identifiers, output_filename=None, progress=True):
"""
identifiers must be the name of the read without starting @ sign and
without comments.
"""
fastq = pysam.FastxFile(self.filename)
if output_filename is None: # pragma: no cover
output_filename = os.path.basename(self.filename) + ".select"
thisN = len(self)
pb = Progress(thisN) # since we scan the entire file
with open(output_filename, "w") as fh:
for i, read in enumerate(fastq):
if read.name in read_identifiers:
fh.write(read.__str__() + "\n")
else:
pass
if progress:
pb.animate(i + 1)
def select_random_reads(self, N=None, output_filename="random.fastq"):
"""Select random reads and save in a file
:param int N: number of random unique reads to select
should provide a number but a list can be used as well.
You can select random reads for R1, and re-use the returned list as
input for the R2 (since pairs must be kept)
:param str output_filename:
If you have a pair of files, the same reads must be selected in R1 and
R2.::
f1 = FastQ(file1)
selection = f1.select_random_reads(N=1000)
f2 = FastQ(file2)
f2.select_random_reads(selection)
.. versionchanged:: 0.9.8 use list instead of set to keep integrity of
paired-data
"""
thisN = len(self)
if isinstance(N, int):
if N > thisN:
N = thisN
# create random set of reads to pick up
cherries = list(range(thisN))
np.random.shuffle(cherries)
cherries = cherries[0:N]
# changes v0.9.8 use list (not sets to keep same order in R2)
elif isinstance(N, list):
cherries = N
fastq = pysam.FastxFile(self.filename)
cherries_set = set(cherries)
pb = Progress(thisN) # since we scan the entire file
with open(output_filename, "w") as fh:
for i, read in enumerate(fastq):
if i in cherries_set:
fh.write(read.__str__() + "\n")
else:
pass
pb.animate(i + 1)
return cherries
def split_lines(self, N=100000, gzip=True):
"""Not implemented"""
if self.filename.endswith(".gz"):
outputs = self._split_lines_gz(N, gzip=gzip)
else:
outputs = self._split_lines(N, gzip=gzip)
return outputs
def _split_lines_gz(self, N, gzip=True, CHUNKSIZE=65536):
# split input in N files
# There is a split function under Unix but (1) not under windows
# and (2) split a gzip into N chunks or n lines will split the
# reads in the middle. So, we want to unzip, select N lines (or chunks)
# and zip each chunk.
self._check_multiple(N)
N_chunk, remainder = divmod(self.n_lines, N)
if remainder > 0:
N_chunk += 1
# let prepare some data first. Let us build the filenames once for all
outputs = []
for i in range(0, N_chunk):
lb = (i) * N + 1
ub = (i + 1) * N
if ub > self.n_lines:
ub = self.n_lines
if self.filename.endswith(".gz"):
input_filename = self.filename.split(".gz")[0]
output_filename = input_filename
else: # pragma: no cover
input_filename = self.filename
output_filename = self.filename
output_filename.split(".", -1)
left, right = input_filename.rsplit(".", 1)
output_filename = left + "_%s_%s." % (lb, ub) + right
outputs.append(output_filename)
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
with open(self.filename, "rb") as fin:
# init buffer
buf = fin.read(CHUNKSIZE)
count = 0
# open an output file handler
current_file_counter = 0
fout = open(outputs[0], "wb")
while buf:
outstr = d.decompress(buf)
count += outstr.count(b"\n")
if count > N:
# if too many lines were read, fill the current file
# and keep remaining data (skip the reading of new
# data for now)
missing = count - N
outstr = outstr.strip().split(b"\n")
NN = len(outstr)
# we need to split the buffer into the part to save
# in this file and the part to save in the next file later
# on (remaining)
# Note that there is no '\n' added here because we do not
# read lines that we may end up in the middle of a line
remaining = b"\n".join(outstr[NN - missing - 1 :])
# whereas here, we are at the end of a line
outstr = b"\n".join(outstr[0 : NN - missing - 1]) + b"\n"
# write and close that file
fout.write(outstr)
fout.close()
# and open the next one where we can already save the end of
# the buffer
current_file_counter += 1
fout = open(outputs[current_file_counter], "wb")
fout.write(remaining)
# we need to keep track of what has be written
count = remaining.count(b"\n")
# and finally we can now read a new chunk of data
buf = fin.read(CHUNKSIZE)
else:
fout.write(outstr)
buf = fin.read(CHUNKSIZE)
if gzip is True:
for output in outputs:
self._gzip(output)
outputs = [x + ".gz" for x in outputs]
return outputs
# def _split_chunks(self, N=10):
# # split per chunks of size N
# pass
def _check_multiple(self, N, multiple=4):
if divmod(N, multiple)[1] != 0:
msg = "split_lines method expects a multiple of %s." % multiple
raise ValueError(msg)
# This could be part of easydev or other software
# we could also use a unix command but won't work on other platforms
def _split_lines(self, N, gzip=True):
# split input in N files
# We will name them with reads number that is
# filename.fastq gives for example:
# --> filename_1_100000.fastq
# --> filename_100001_151234.fastq
self._check_multiple(N)
assert type(N) == int
if N >= self.n_lines:
print("Nothing to do. Choose a lower N value")
return
outputs = []
N_chunk, remainder = divmod(self.n_lines, N)
with open(self.filename) as fin:
for i in range(0, N_chunk):
lb = (i) * N + 1
ub = (i + 1) * N
output_filename = self.filename
output_filename.split(".", -1)
left, right = self.filename.rsplit(".", 1)
output_filename = left + "_%s_%s." % (lb, ub) + right
outputs.append(output_filename)
with open(output_filename, "w") as fout:
fout.writelines(islice(fin, N))
# last chunk is dealt with outside the loop
lb = ub + 1
ub = self.n_lines
output_filename = left + "_%s_%s." % (lb, ub) + right
if remainder != 0:
outputs.append(output_filename)
with open(output_filename, "w") as fout:
fout.writelines(islice(fin, remainder))
if gzip is True:
for output in outputs:
self._gzip(output)
outputs = [x + ".gz" for x in outputs]
return outputs
def split_chunks(self, N=10): # pragma: no cover
"""Not implemented"""
assert N <= 100, "you cannot split a file into more than 100 chunks"
# split per chunks of size N
cmd = "split --number %s %s -d"
"""def random(self, N=10000, output_filename="test.fastq",
bp=50, quality=40):
# a completely random fastq
from .phred import quality
with open(output_filename, "wb") as fh:
count = 1
template = "@Insilico\n"
template += "%(sequence)\n"
template += "+\n"
template += "%s(quality)\n"
fh.writelines(template % {
'sequence': "".join(["ACGT"[random.randint(0,3)] for this in range(bp)]),
'quality': "".join()})
# quality could be q function for a distribution
"""
def joining(self, pattern, output_filename): # pragma: no cover
"""not implemented
zcat Block*.fastq.gz | gzip > combined.fastq.gz
"""
raise NotImplementedError
def __iter__(self):
return self
def __exit__(self, type, value, traceback): # pragma: no cover
try:
self._fileobj.close()
except AttributeError:
pass
finally:
self._fileobj.close()
def __enter__(self):
fh = open(self.filename, "rb")
if self.filename.endswith(".gz"):
self._fileobj = gzip.GzipFile(fileobj=fh)
else:
self._fileobj = fh
return self
def __next__(self): # python 3
return self.next()
def next(self): # python 2
# reads 4 lines
d = {"quality": None, "sequence": None, "quality": None}
try:
"""data = islice(self._fileobj, 4)
d['identifier'] = next(data).strip()
d['sequence'] = next(data).strip()
skip = next(data)
d['quality'] = next(data).strip()
"""
# 15% faster than islice + next
d["identifier"] = self._fileobj.readline().strip()
d["sequence"] = self._fileobj.readline().strip()
temp = self._fileobj.readline()
d["quality"] = self._fileobj.readline().strip()
# can be faster but slower on average
"""d['identifier'] = self._fileobj.readlines(1)[0].strip()
d['sequence'] = self._fileobj.readlines(1)[0].strip()
self._fileobj.readlines(1)
d['quality'] = self._fileobj.readlines(1)[0].strip()
"""
# Somehow the readlines still return "" even if the end of file is
# reached
if temp == b"":
raise StopIteration
except KeyboardInterrupt: # pragma: no cover
# THis should allow developers to break an function that iterates
# through the read to run forever
self._fileobj.close()
self.__enter__()
except:
self.rewind()
raise StopIteration
return d
def __getitem__(self, index):
return 1
def to_fasta(self, output_filename="test.fasta"):
"""
Slow but works for now in pure python with input compressed data.
"""
with open(output_filename, "w") as fout:
for this in self:
fout.write("{}\n{}\n".format(this["identifier"].decode(), this["sequence"].decode()))
return
def filter(self, identifiers_list=[], min_bp=None, max_bp=None, progressbar=True, output_filename="filtered.fastq"):
"""Save reads in a new file if there are not in the identifier_list
:param int min_bp: ignore reads with length shorter than min_bp
:param int max_bp: ignore reads with length above max_bp
"""
# 7 seconds without identifiers to scan the file
# on a 750000 reads
if min_bp is None:
min_bp = 0
if max_bp is None:
max_bp = 1e9
# make sure we are at the beginning
self.rewind()
output_filename, tozip = self._istozip(output_filename)
with open(output_filename, "w") as fout:
pb = Progress(self.n_reads)
buf = ""
filtered = 0
saved = 0
for count, lines in enumerate(grouper(self._fileobj)):
identifier = lines[0].split()[0]
if lines[0].split()[0].decode() in identifiers_list:
filtered += 1
else: # pragma: no cover
N = len(lines[1])
if N <= max_bp and N >= min_bp:
buf += "{}{}+\n{}".format(
lines[0].decode("utf-8"), lines[1].decode("utf-8"), lines[3].decode("utf-8")
)
saved += 1
else:
filtered += 1
if count % 100000 == 0:
fout.write(buf)
buf = ""
if progressbar is True:
pb.animate(count + 1)
fout.write(buf)
if filtered < len(identifiers_list): # pragma: no cover
print("\nWARNING: not all identifiers were found in the fastq file to " + "be filtered.")
logger.info("\n{} reads were filtered out and {} saved in {}".format(filtered, saved, output_filename))
if tozip is True: # pragma: no cover
logger.info("Compressing file")
self._gzip(output_filename)
def to_kmer_content(self, k=7):
"""Return a Series with kmer count across all reads
:param int k: (default to 7-mers)
:return: Pandas Series with index as kmer and values as count.
Takes about 30 seconds on a million reads.
"""
# Counter is slow if we apply it on each read.
# .count is slow as well
from sequana.kmer import get_kmer
counter = Counter()
pb = Progress(len(self))
buffer_ = []
for i, this in enumerate(self):
buffer_.extend(list(get_kmer(this["sequence"], k)))
if len(buffer_) > 100000: # pragma: no cover
counter += Counter(buffer_)
buffer_ = []
pb.animate(i)
counter += Counter(buffer_)
ts = pd.Series(counter)
ts.sort_values(inplace=True, ascending=False)
return ts
def to_krona(self, k=7, output_filename="fastq.krona"):
"""Save Krona file with ACGT content within all k-mers
:param int k: (default to 7-mers)
Save results in file, which can then be translated into a HTML file
using::
ktImportText fastq.krona
open text.krona.html
"""
ts = self.to_kmer_content(k=k)
with open(output_filename, "w") as fout:
for index, count in ts.items():
letters = "\t".join([x for x in index.decode()])
fout.write("%s\t" % count + letters + "\n")
def stats(self):
self.rewind()
data = [len(read["sequence"]) for read in self]
S = sum(data)
N = float(len(data))
return {"mean_read_length": S / N, "N": int(N), "sum_read_length": S}
def __eq__(self, other):
if id(other) == id(self):
return True
self.rewind()
other.rewind()
for this in self:
if this != other.next():
return False
return True
# a simple decorator to check whether the data was computed or not.
# If not, compute it
def run_info(f):
@wraps(f)
def wrapper(*args, **kargs):
# args[0] is the self of the method
try:
args[0].gc_content
except:
args[0]._get_info()
return f(*args, **kargs)
return wrapper
class FastQC(object):
"""Simple QC diagnostic
Similarly to some of the plots of FastQC tools, we scan the
FastQ and generates some diagnostic plots. The interest
is that we'll be able to create more advanced plots later on.
Here is an example of the boxplot quality across all bases:
.. plot::
:include-source:
from sequana import sequana_data
from sequana import FastQC
filename = sequana_data("test.fastq", "testing")
qc = FastQC(filename)
qc.boxplot_quality()
.. warning:: some plots will work for Illumina reads only right now
.. note:: Although all reads are parsed (e.g. to count the number of
nucleotides, some information uses a limited number of reads (e.g.
qualities), which is set to 500,000 by deafult.
"""
def __init__(self, filename, max_sample=500000, verbose=True, skip_nrows=0):
""".. rubric:: constructor
:param filename:
:param int max_sample: Large files will not fit in memory. We therefore
restrict the numbers of reads to be used for some of the statistics
to 500,000. This also reduces the amount of time required to get a
good feeling of the data quality. The entire input file is
parsed tough. This is required for instance to get the number of
nucleotides.
"""
self.verbose = verbose
self.filename = filename
# Later we will use pysam to scan the fastq because
# it iterate quickly while providing the quality already converted
# However, the FastQ implementation in this module is faster at
# computing the length by a factor 3
self.fastq = FastQ(filename)
self.N = len(self.fastq)
# Use only max_sample in some of the computation
self.max_sample = min(max_sample, self.N)
# we may want to skip first rows
self.skip_nrows = skip_nrows
self.summary = {}
self.fontsize = 16
def _get_info(self):
"""Populates the data structures for plotting"""
stats = {"A": 0, "C": 0, "G": 0, "T": 0, "N": 0}
stats["qualities"] = []
stats["mean_qualities"] = []
stats["mean_length"] = 0
stats["sequences"] = []
minimum = 1e6
maximum = 0
# FIXME this self.N takes time in the cosntructor
# do we need it ?
self.lengths = []
self.gc_list = []
total_length = 0
C = defaultdict(int)
if self.verbose:
pb = Progress(self.N)
sequences = []
mean_qualities = []
qualities = []
ff = pysam.FastxFile(self.filename)
for i, record in enumerate(ff):
if i < self.skip_nrows:
continue
if i > self.max_sample + self.skip_nrows:
break
N = len(record.sequence)
if N == 0:
raise ValueError("Read {} has a length equal to zero. Clean your FastQ files".format(i))
self.lengths.append(N)
# we cannot store all qualities and sequences reads, so
# just max_sample are stored:
quality = record.get_quality_array()
mean_qualities.append(sum(quality) / N)
qualities.append(quality)
sequences.append(record.sequence)
# store count of all qualities
for k in quality:
C[k] += 1
GG = record.sequence.count("G")
CC = record.sequence.count("C")
self.gc_list.append((GG + CC) / float(N) * 100)
# not using a counter, or loop speed up the code
stats["A"] += record.sequence.count("A")
stats["C"] += CC
stats["G"] += GG
stats["T"] += record.sequence.count("T")
stats["N"] += record.sequence.count("N")
total_length += len(record.sequence)
if self.verbose:
pb.animate(i + 1)
# other data
self.qualities = qualities
self.mean_qualities = mean_qualities
self.lengths = np.array(self.lengths)
self.minimum = int(self.lengths.min())
self.maximum = int(self.lengths.max())
self.sequences = sequences
self.gc_content = np.mean(self.gc_list)
stats["mean_length"] = total_length / float(self.N)
stats["total_bp"] = stats["A"] + stats["C"] + stats["G"] + stats["T"] + stats["N"]
stats["mean_quality"] = sum([k * v for k, v in C.items()]) / stats["total_bp"]
self.stats = stats
def _get_qualities(self):
logger.info("Extracting qualities")
qualities = []
ff = pysam.FastxFile(self.filename)
for i, rec in enumerate(ff):
if i < self.skip_nrows:
continue
if i > self.max_sample + self.skip_nrows:
break
qualities.append(rec.get_quality_array())
return qualities
def boxplot_quality(self, hold=False, ax=None):
"""Boxplot quality
Same plots as in FastQC that is average quality for all bases.
In addition a 1 sigma error enveloppe is shown (yellow).
Background separate zone of good, average and bad quality (arbitrary).
"""
from sequana.viz import Boxplot
qualities = self._get_qualities()
df = pd.DataFrame(qualities)
bx = Boxplot(df)
try:
bx.plot(ax=ax)
except: # pragma: no cover
bx.plot()
@run_info
def histogram_sequence_lengths(self, logy=True):
"""Histogram sequence lengths
.. plot::
:include-source:
from sequana import sequana_data
from sequana import FastQC
filename = sequana_data("test.fastq", "testing")
qc = FastQC(filename)
qc.histogram_sequence_lengths()
"""
data = [len(x) for x in self.sequences]
bary, barx = np.histogram(data, bins=range(max(data) + 1))
# get rid of zeros to avoid warnings
bx = [x for x, y in zip(barx, bary) if y != 0]
by = [y for x, y in zip(barx, bary) if y != 0]
if logy:
pylab.bar(bx, pylab.log10(by))
else:
pylab.bar(bx, by)
pylab.xlim([1, max(data) + 1])
pylab.grid(True)
pylab.xlabel("position (bp)", fontsize=self.fontsize)
pylab.ylabel("Count (log scale)", fontsize=self.fontsize)
@run_info
def histogram_gc_content(self):
"""Plot histogram of GC content
.. plot::
:include-source:
from sequana import sequana_data
from sequana import FastQC
filename = sequana_data("test.fastq", "testing")
qc = FastQC(filename)
qc.histogram_gc_content()
"""
pylab.hist(self.gc_list, bins=range(0, 100))
pylab.grid()
pylab.title("GC content distribution (per sequence)")
pylab.xlabel(r"Mean GC content (%)", fontsize=self.fontsize)
pylab.xlim([0, 100])
@run_info
def get_stats(self):
# FIXME the information should all be computed in _get_info
# !!! sequences is limited to 500,000 if max_sample set to 500,000
# full stats must be computed in run_info() method
# so do not use .sequences here
stats = self.stats.copy()
stats["GC content"] = self.gc_content
stats["n_reads"] = self.N
stats["total bases"] = self.stats["total_bp"]
stats["mean quality"] = np.mean(self.mean_qualities)
stats["average read length"] = self.stats["mean_length"]
stats["min read length"] = self.minimum
stats["max read length"] = self.maximum
# use DataFrame instead of Series to mix types (int/float)
ts = pd.DataFrame([stats])
cols = ["n_reads", "A", "C", "G", "T", "N", "total bases"]
ts[cols] = ts[cols].astype(int)
ts = ts[cols + ["GC content", "average read length", "mean quality"]]
return ts
@run_info
def get_actg_content(self):
# what is the longest string ?
lengths = [len(x) for x in self.sequences]
max_length = max(lengths)
# count ACGTN in each columns for all sequences
Nseq = len(self.sequences)
data = []
for pos in range(max_length):
# we add empty strings to have all sequences with same lengths
data.append(
Counter([(self.sequences[i] + " " * (max_length - len(self.sequences[i])))[pos] for i in range(Nseq)])
)
# remove the empty strings to normalise the data
df = pd.DataFrame.from_records(data)
if " " in df.columns:
df.drop(" ", axis=1, inplace=True)
df.fillna(0, inplace=True)
df = df.divide(df.sum(axis=1), axis=0)
if "N" in df.columns:
df = df[["A", "C", "G", "T", "N"]]
else:
df = df[["A", "C", "G", "T"]]
return df
def plot_acgt_content(self, stacked=False):
"""Plot histogram of GC content
.. plot::
:include-source:
from sequana import sequana_data
from sequana import FastQC
filename = sequana_data("test.fastq", "testing")
qc = FastQC(filename)
qc.plot_acgt_content()
"""
df = self.get_actg_content()
if stacked is True:
df.plot.bar(stacked=True)
else:
df.plot()
pylab.grid(True)
pylab.xlabel("position (bp)", fontsize=self.fontsize)
pylab.ylabel("percent", fontsize=self.fontsize)
|
sequana/sequana
|
sequana/fastq.py
|
Python
|
bsd-3-clause
| 39,267
|
[
"pysam"
] |
d715a9f9745a07828d45c4e39949906d5834569a2911bbf4aa95865f67c878ac
|
# $Id$
#
# Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" code for dealing with composite models
For a model to be useable here, it should support the following API:
- _ClassifyExample(example)_, returns a classification
Other compatibility notes:
1) To use _Composite.Grow_ there must be some kind of builder
functionality which returns a 2-tuple containing (model,percent accuracy).
2) The models should be pickleable
3) It would be very happy if the models support the __cmp__ method so that
membership tests used to make sure models are unique work.
"""
from __future__ import print_function
import numpy
from rdkit.six.moves import cPickle
from rdkit.ML.Data import DataUtils
class Composite(object):
"""a composite model
**Notes**
- adding a model which is already present just results in its count
field being incremented and the errors being averaged.
- typical usage:
1) grow the composite with AddModel until happy with it
2) call AverageErrors to calculate the average error values
3) call SortModels to put things in order by either error or count
- Composites can support individual models requiring either quantized or
nonquantized data. This is done by keeping a set of quantization bounds
(_QuantBounds_) in the composite and quantizing data passed in when required.
Quantization bounds can be set and interrogated using the
_Get/SetQuantBounds()_ methods. When models are added to the composite,
it can be indicated whether or not they require quantization.
- Composites are also capable of extracting relevant variables from longer lists.
This is accessible using _SetDescriptorNames()_ to register the descriptors about
which the composite cares and _SetInputOrder()_ to tell the composite what the
ordering of input vectors will be. **Note** there is a limitation on this: each
model needs to take the same set of descriptors as inputs. This could be changed.
"""
def __init__(self):
self.modelList = []
self.errList = []
self.countList = []
self.modelVotes = []
self.quantBounds = None
self.nPossibleVals = None
self.quantizationRequirements = []
self._descNames = []
self._mapOrder = None
self.activityQuant = []
def SetModelFilterData(self, modelFilterFrac=0.0, modelFilterVal=0.0):
self._modelFilterFrac = modelFilterFrac
self._modelFilterVal = modelFilterVal
def SetDescriptorNames(self, names):
""" registers the names of the descriptors this composite uses
**Arguments**
- names: a list of descriptor names (strings).
**NOTE**
the _names_ list is not
copied, so if you modify it later, the composite itself will also be modified.
"""
self._descNames = names
def GetDescriptorNames(self):
""" returns the names of the descriptors this composite uses
"""
return self._descNames
def SetQuantBounds(self, qBounds, nPossible=None):
""" sets the quantization bounds that the composite will use
**Arguments**
- qBounds: a list of quantization bounds, each quantbound is a
list of boundaries
- nPossible: a list of integers indicating how many possible values
each descriptor can take on.
**NOTE**
- if the two lists are of different lengths, this will assert out
- neither list is copied, so if you modify it later, the composite
itself will also be modified.
"""
if nPossible is not None:
assert len(qBounds) == len(nPossible), 'qBounds/nPossible mismatch'
self.quantBounds = qBounds
self.nPossibleVals = nPossible
def GetQuantBounds(self):
""" returns the quantization bounds
**Returns**
a 2-tuple consisting of:
1) the list of quantization bounds
2) the nPossibleVals list
"""
return self.quantBounds, self.nPossibleVals
def GetActivityQuantBounds(self):
if not hasattr(self, 'activityQuant'):
self.activityQuant = []
return self.activityQuant
def SetActivityQuantBounds(self, bounds):
self.activityQuant = bounds
def QuantizeActivity(self, example, activityQuant=None, actCol=-1):
if activityQuant is None:
activityQuant = self.activityQuant
if activityQuant:
example = example[:]
act = example[actCol]
for box in range(len(activityQuant)):
if act < activityQuant[box]:
act = box
break
else:
act = box + 1
example[actCol] = act
return example
def QuantizeExample(self, example, quantBounds=None):
""" quantizes an example
**Arguments**
- example: a data point (list, tuple or numpy array)
- quantBounds: a list of quantization bounds, each quantbound is a
list of boundaries. If this argument is not provided, the composite
will use its own quantBounds
**Returns**
the quantized example as a list
**Notes**
- If _example_ is different in length from _quantBounds_, this will
assert out.
- This is primarily intended for internal use
"""
if quantBounds is None:
quantBounds = self.quantBounds
assert len(example) == len(quantBounds), 'example/quantBounds mismatch'
quantExample = [None] * len(example)
for i in range(len(quantBounds)):
bounds = quantBounds[i]
p = example[i]
if len(bounds):
for box in range(len(bounds)):
if p < bounds[box]:
p = box
break
else:
p = box + 1
else:
if i != 0:
p = int(p)
quantExample[i] = p
return quantExample
def MakeHistogram(self):
""" creates a histogram of error/count pairs
**Returns**
the histogram as a series of (error, count) 2-tuples
"""
nExamples = len(self.modelList)
histo = []
i = 1
lastErr = self.errList[0]
countHere = self.countList[0]
eps = 0.001
while i < nExamples:
if self.errList[i] - lastErr > eps:
histo.append((lastErr, countHere))
lastErr = self.errList[i]
countHere = self.countList[i]
else:
countHere = countHere + self.countList[i]
i = i + 1
return histo
def CollectVotes(self, example, quantExample, appendExample=0, onlyModels=None):
""" collects votes across every member of the composite for the given example
**Arguments**
- example: the example to be voted upon
- quantExample: the quantized form of the example
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a list with a vote from each member
"""
if not onlyModels:
onlyModels = list(range(len(self)))
votes = [-1] * len(self)
for i in onlyModels:
if self.quantizationRequirements[i]:
votes[i] = int(
round(self.modelList[i].ClassifyExample(quantExample, appendExamples=appendExample)))
else:
votes[i] = int(
round(self.modelList[i].ClassifyExample(example, appendExamples=appendExample)))
return votes
def ClassifyExample(self, example, threshold=0, appendExample=0, onlyModels=None):
""" classifies the given example using the entire composite
**Arguments**
- example: the data to be classified
- threshold: if this is a number greater than zero, then a
classification will only be returned if the confidence is
above _threshold_. Anything lower is returned as -1.
- appendExample: toggles saving the example on the models
- onlyModels: if provided, this should be a sequence of model
indices. Only the specified models will be used in the
prediction.
**Returns**
a (result,confidence) tuple
**FIX:**
statistics sucks... I'm not seeing an obvious way to get
the confidence intervals. For that matter, I'm not seeing
an unobvious way.
For now, this is just treated as a voting problem with the confidence
measure being the percent of models which voted for the winning result.
"""
if self._mapOrder is not None:
example = self._RemapInput(example)
if self.GetActivityQuantBounds():
example = self.QuantizeActivity(example)
if self.quantBounds is not None and 1 in self.quantizationRequirements:
quantExample = self.QuantizeExample(example, self.quantBounds)
else:
quantExample = []
if not onlyModels:
onlyModels = list(range(len(self)))
self.modelVotes = self.CollectVotes(example, quantExample, appendExample=appendExample,
onlyModels=onlyModels)
votes = [0] * self.nPossibleVals[-1]
for i in onlyModels:
res = self.modelVotes[i]
votes[res] = votes[res] + self.countList[i]
totVotes = sum(votes)
res = numpy.argmax(votes)
conf = float(votes[res]) / float(totVotes)
if conf > threshold:
return res, conf
else:
return -1, conf
def GetVoteDetails(self):
""" returns the votes from the last classification
This will be _None_ if nothing has yet be classified
"""
return self.modelVotes
def _RemapInput(self, inputVect):
""" remaps the input so that it matches the expected internal ordering
**Arguments**
- inputVect: the input to be reordered
**Returns**
- a list with the reordered (and possible shorter) data
**Note**
- you must call _SetDescriptorNames()_ and _SetInputOrder()_ for this to work
- this is primarily intended for internal use
"""
order = self._mapOrder
if order is None:
return inputVect
remappedInput = [None] * len(order)
for i in range(len(order) - 1):
remappedInput[i] = inputVect[order[i]]
if order[-1] == -1:
remappedInput[-1] = 0
else:
remappedInput[-1] = inputVect[order[-1]]
return remappedInput
def GetInputOrder(self):
""" returns the input order (used in remapping inputs)
"""
return self._mapOrder
def SetInputOrder(self, colNames):
""" sets the input order
**Arguments**
- colNames: a list of the names of the data columns that will be passed in
**Note**
- you must call _SetDescriptorNames()_ first for this to work
- if the local descriptor names do not appear in _colNames_, this will
raise an _IndexError_ exception.
"""
if type(colNames) != list:
colNames = list(colNames)
descs = [x.upper() for x in self.GetDescriptorNames()]
self._mapOrder = [None] * len(descs)
colNames = [x.upper() for x in colNames]
# FIX: I believe that we're safe assuming that field 0
# is always the label, and therefore safe to ignore errors,
# but this may not be the case
try:
self._mapOrder[0] = colNames.index(descs[0])
except ValueError:
self._mapOrder[0] = 0
for i in range(1, len(descs) - 1):
try:
self._mapOrder[i] = colNames.index(descs[i])
except ValueError:
raise ValueError('cannot find descriptor name: %s in set %s' %
(repr(descs[i]), repr(colNames)))
try:
self._mapOrder[-1] = colNames.index(descs[-1])
except ValueError:
# ok, there's no obvious match for the final column (activity)
# We'll take the last one:
# self._mapOrder[-1] = len(descs)-1
self._mapOrder[-1] = -1
def Grow(self, examples, attrs, nPossibleVals, buildDriver, pruner=None, nTries=10, pruneIt=0,
needsQuantization=1, progressCallback=None, **buildArgs):
""" Grows the composite
**Arguments**
- examples: a list of examples to be used in training
- attrs: a list of the variables to be used in training
- nPossibleVals: this is used to provide a list of the number
of possible values for each variable. It is used if the
local quantBounds have not been set (for example for when you
are working with data which is already quantized).
- buildDriver: the function to call to build the new models
- pruner: a function used to "prune" (reduce the complexity of)
the resulting model.
- nTries: the number of new models to add
- pruneIt: toggles whether or not pruning is done
- needsQuantization: used to indicate whether or not this type of model
requires quantized data
- **buildArgs: all other keyword args are passed to _buildDriver_
**Note**
- new models are *added* to the existing ones
"""
silent = buildArgs.get('silent', 0)
buildArgs['silent'] = 1
buildArgs['calcTotalError'] = 1
if self._mapOrder is not None:
examples = map(self._RemapInput, examples)
if self.GetActivityQuantBounds():
for i in range(len(examples)):
examples[i] = self.QuantizeActivity(examples[i])
nPossibleVals[-1] = len(self.GetActivityQuantBounds()) + 1
if self.nPossibleVals is None:
self.nPossibleVals = nPossibleVals[:]
if needsQuantization:
trainExamples = [None] * len(examples)
nPossibleVals = self.nPossibleVals
for i in range(len(examples)):
trainExamples[i] = self.QuantizeExample(examples[i], self.quantBounds)
else:
trainExamples = examples
for i in range(nTries):
trainSet = None
if (hasattr(self, '_modelFilterFrac')) and (self._modelFilterFrac != 0):
trainIdx, _ = DataUtils.FilterData(trainExamples, self._modelFilterVal,
self._modelFilterFrac, -1, indicesOnly=1)
trainSet = [trainExamples[x] for x in trainIdx]
else:
trainSet = trainExamples
# print("Training model %i with %i out of %i examples"%(i, len(trainSet), len(trainExamples)))
model, frac = buildDriver(*(trainSet, attrs, nPossibleVals), **buildArgs)
if pruneIt:
model, frac2 = pruner(model, model.GetTrainingExamples(), model.GetTestExamples(),
minimizeTestErrorOnly=0)
frac = frac2
if (hasattr(self, '_modelFilterFrac') and self._modelFilterFrac != 0 and
hasattr(model, '_trainIndices')):
# correct the model's training indices:
trainIndices = [trainIdx[x] for x in model._trainIndices]
model._trainIndices = trainIndices
self.AddModel(model, frac, needsQuantization)
if not silent and (nTries < 10 or i % (nTries / 10) == 0):
print('Cycle: % 4d' % (i))
if progressCallback is not None:
progressCallback(i)
def ClearModelExamples(self):
for i in range(len(self)):
m = self.GetModel(i)
try:
m.ClearExamples()
except AttributeError:
pass
def Pickle(self, fileName='foo.pkl', saveExamples=0):
""" Writes this composite off to a file so that it can be easily loaded later
**Arguments**
- fileName: the name of the file to be written
- saveExamples: if this is zero, the individual models will have
their stored examples cleared.
"""
if not saveExamples:
self.ClearModelExamples()
pFile = open(fileName, 'wb+')
cPickle.dump(self, pFile, 1)
pFile.close()
def AddModel(self, model, error, needsQuantization=1):
""" Adds a model to the composite
**Arguments**
- model: the model to be added
- error: the model's error
- needsQuantization: a toggle to indicate whether or not this model
requires quantized inputs
**NOTE**
- this can be used as an alternative to _Grow()_ if you already have
some models constructed
- the errList is run as an accumulator,
you probably want to call _AverageErrors_ after finishing the forest
"""
if model in self.modelList:
try:
idx = self.modelList.index(model)
except ValueError:
# FIX: we should never get here, but sometimes we do anyway
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
else:
self.errList[idx] = self.errList[idx] + error
self.countList[idx] = self.countList[idx] + 1
else:
self.modelList.append(model)
self.errList.append(error)
self.countList.append(1)
self.quantizationRequirements.append(needsQuantization)
def AverageErrors(self):
""" convert local summed error to average error
"""
self.errList = list(map(lambda x, y: x / y, self.errList, self.countList))
def SortModels(self, sortOnError=True):
""" sorts the list of models
**Arguments**
sortOnError: toggles sorting on the models' errors rather than their counts
"""
if sortOnError:
order = numpy.argsort(self.errList)
else:
order = numpy.argsort(self.countList)
# these elaborate contortions are required because, at the time this
# code was written, Numeric arrays didn't unpickle so well...
# print(order,sortOnError,self.errList,self.countList)
self.modelList = [self.modelList[x] for x in order]
self.countList = [self.countList[x] for x in order]
self.errList = [self.errList[x] for x in order]
def GetModel(self, i):
""" returns a particular model
"""
return self.modelList[i]
def SetModel(self, i, val):
""" replaces a particular model
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i] = val
def GetCount(self, i):
""" returns the count of the _i_th model
"""
return self.countList[i]
def SetCount(self, i, val):
""" sets the count of the _i_th model
"""
self.countList[i] = val
def GetError(self, i):
""" returns the error of the _i_th model
"""
return self.errList[i]
def SetError(self, i, val):
""" sets the error of the _i_th model
"""
self.errList[i] = val
def GetDataTuple(self, i):
""" returns all relevant data about a particular model
**Arguments**
i: an integer indicating which model should be returned
**Returns**
a 3-tuple consisting of:
1) the model
2) its count
3) its error
"""
return (self.modelList[i], self.countList[i], self.errList[i])
def SetDataTuple(self, i, tup):
""" sets all relevant data for a particular tree in the forest
**Arguments**
- i: an integer indicating which model should be returned
- tup: a 3-tuple consisting of:
1) the model
2) its count
3) its error
**Note**
This is included for the sake of completeness, but you need to be
*very* careful when you use it.
"""
self.modelList[i], self.countList[i], self.errList[i] = tup
def GetAllData(self):
""" Returns everything we know
**Returns**
a 3-tuple consisting of:
1) our list of models
2) our list of model counts
3) our list of model errors
"""
return (self.modelList, self.countList, self.errList)
def __len__(self):
""" allows len(composite) to work
"""
return len(self.modelList)
def __getitem__(self, which):
""" allows composite[i] to work, returns the data tuple
"""
return self.GetDataTuple(which)
def __str__(self):
""" returns a string representation of the composite
"""
outStr = 'Composite\n'
for i in range(len(self.modelList)):
outStr = (outStr + ' Model %4d: %5d occurances %%%5.2f average error\n' %
(i, self.countList[i], 100. * self.errList[i]))
return outStr
if __name__ == '__main__': # pragma: nocover
if 0:
from rdkit.ML.DecTree import DecTree
c = Composite()
n = DecTree.DecTreeNode(None, 'foo')
c.AddModel(n, 0.5)
c.AddModel(n, 0.5)
c.AverageErrors()
c.SortModels()
print(c)
qB = [[], [.5, 1, 1.5]]
exs = [['foo', 0], ['foo', .4], ['foo', .6], ['foo', 1.1], ['foo', 2.0]]
print('quantBounds:', qB)
for ex in exs:
q = c.QuantizeExample(ex, qB)
print(ex, q)
else:
pass
|
rvianello/rdkit
|
rdkit/ML/Composite/Composite.py
|
Python
|
bsd-3-clause
| 20,637
|
[
"RDKit"
] |
fea1a71d2013021e4a63ca5a94e5addf9822a0ce716d713d016a318d2cef74bb
|
"""Tests for the thumbs module"""
from workbench import scenarios
from workbench.test.selenium_test import SeleniumTest
class ThreeThumbsTest(SeleniumTest):
"""Test the functionalities of the three thumbs test XBlock."""
def setUp(self):
super(ThreeThumbsTest, self).setUp()
scenarios.add_xml_scenario(
"test_three_thumbs", "three thumbs test",
"""<vertical_demo><thumbs/><thumbs/><thumbs/></vertical_demo>"""
)
self.addCleanup(scenarios.remove_scenario, "test_three_thumbs")
# Suzy opens the browser to visit the workbench
self.browser.get(self.live_server_url)
# She knows it's the site by the header
header1 = self.browser.find_element_by_css_selector('h1')
self.assertEqual(header1.text, 'XBlock scenarios')
def test_three_thumbs_initial_state(self):
# She clicks on the three thumbs at once scenario
link = self.browser.find_element_by_link_text('three thumbs test')
link.click()
# The header reflects the XBlock
header1 = self.browser.find_element_by_css_selector('h1')
self.assertEqual(header1.text, 'XBlock: three thumbs test')
# She sees that there are 3 sets of thumbs
vertical_css = 'div.student_view > div.xblock > div.vertical'
# The following will give a NoSuchElementException error
# if it is not there
vertical = self.browser.find_element_by_css_selector(vertical_css)
# Make sure there are three thumbs blocks
thumb_css = 'div.xblock[data-block-type="thumbs"]'
thumbs = vertical.find_elements_by_css_selector(thumb_css)
self.assertEqual(3, len(thumbs))
# Make sure they all have 0 for upvote and downvote counts
up_count_css = 'span.upvote span.count'
down_count_css = 'span.downvote span.count'
for thumb in thumbs:
up_count = thumb.find_element_by_css_selector(up_count_css)
down_count = thumb.find_element_by_css_selector(down_count_css)
self.assertEqual('0', up_count.text)
self.assertEqual('0', down_count.text)
def test_three_upvoting(self):
# She clicks on the three thumbs at once scenario
link = self.browser.find_element_by_link_text('three thumbs test')
link.click()
# The vertical that contains the thumbs
vertical_css = 'div.student_view > div.xblock > div.vertical'
vertical = self.browser.find_element_by_css_selector(vertical_css)
# The three thumbs blocks
thumb_css = 'div.xblock[data-block-type="thumbs"]'
thumbs = vertical.find_elements_by_css_selector(thumb_css)
# Up and down counts
up_count_css = 'span.upvote span.count'
down_count_css = 'span.downvote span.count'
# Up vote for the first thumb
thumbs[0].find_element_by_css_selector('span.upvote').click()
# Only the first thumb's upcount should increase
self.assertEqual('1', thumbs[0].find_element_by_css_selector(up_count_css).text)
self.assertEqual('0', thumbs[1].find_element_by_css_selector(up_count_css).text)
self.assertEqual('0', thumbs[2].find_element_by_css_selector(up_count_css).text)
# Down counts should all still be zero
for thumb in thumbs:
down_count = thumb.find_element_by_css_selector(down_count_css)
self.assertEqual('0', down_count.text)
def test_three_downvoting(self):
# She clicks on the three thumbs at once scenario
link = self.browser.find_element_by_link_text('three thumbs test')
link.click()
# The vertical that contains the thumbs
vertical_css = 'div.student_view > div.xblock > div.vertical'
vertical = self.browser.find_element_by_css_selector(vertical_css)
# The three thumbs blocks
thumb_css = 'div.xblock[data-block-type="thumbs"]'
thumbs = vertical.find_elements_by_css_selector(thumb_css)
# Up and down counts
up_count_css = 'span.upvote span.count'
down_count_css = 'span.downvote span.count'
# Up vote for the first thumb
thumbs[0].find_element_by_css_selector('span.downvote').click()
# Only the first thumb's downcount should increase
self.assertEqual('1', thumbs[0].find_element_by_css_selector(down_count_css).text)
self.assertEqual('0', thumbs[1].find_element_by_css_selector(down_count_css).text)
self.assertEqual('0', thumbs[2].find_element_by_css_selector(down_count_css).text)
# Up counts should all still be zero
for thumb in thumbs:
down_count = thumb.find_element_by_css_selector(up_count_css)
self.assertEqual('0', down_count.text)
|
dcadams/xblock-sdk
|
workbench/test/test_thumbs.py
|
Python
|
agpl-3.0
| 4,791
|
[
"VisIt"
] |
9e23d284649ce88bc349f7be16253e3ea40da5f7e834b9f8e1d97fc4b55a0f67
|
#!/usr/bin/env python
# generate figures in Getting Started section of User's Manual
# usage:
# $ python basemapfigs.py FILEROOT [FIELD] [DPI]
# where
# FILEROOT root of NetCDF filename and output .png figures
# FIELD optional: one of {velbase_mag, [velsurf_mag], mask, usurf} (edit script to add more)
# DPI optional: resolution in dots per inch [200]
#
# equivalent usages:
# $ python basemapfigs.py g20km_10ka_hy velsurf_mag 200
# $ python basemapfigs.py g20km_10ka_hy velsurf_mag
# $ python basemapfigs.py g20km_10ka_hy
#
# generate figs like those in Getting Started section of User's Manual:
# $ for FLD in velsurf_mag usurf velbase_mag mask; do ./basemapfigs.py g20km_10ka_hy ${FLD}; done
#
# crop out western Greenland with command like this (uses ImageMagick):
# $ ./basemapfigs.py g20km_10ka_hy velsurf_mag 500
# $ convert -crop 600x800+400+800 +repage g20km_10ka_hy-velsurf_mag.png g20km-detail.png
#
# batch generate figures from a parameter study like this:
# $ for QQ in 0.1 0.5 1.0; do for EE in 1 3 6; do ../basemapfigs.py p10km_q${QQ}_e${EE} velsurf_mag 100; done; done
# $ for QQ in 0.1 0.5 1.0; do for EE in 1 3 6; do convert -crop 274x486+50+6 +repage p10km_q${QQ}_e${EE}-velsurf_mag.png p10km-${QQ}-${EE}-csurf.png; done; done
from mpl_toolkits.basemap import Basemap
try:
from netCDF4 import Dataset as NC
except:
print "netCDF4 is not installed!"
sys.exit(1)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import sys
if len(sys.argv) < 2:
print "ERROR: first argument must be root of filename ..."
sys.exit(1)
rootname = sys.argv[1]
try:
nc = NC(rootname + '.nc', 'r')
except:
print "ERROR: can't read from file %s.nc ..." % rootname
sys.exit(2)
if len(sys.argv) >= 3:
field = sys.argv[2]
else:
field = 'velsurf_mag'
if len(sys.argv) >= 4:
mydpi = float(sys.argv[3])
else:
mydpi = 200
bluemarble = False # if True, use Blue Marble background
if (field == 'velsurf_mag') | (field == 'velbase_mag'):
fill = nc.variables[field]._FillValue
logscale = True
contour100 = True
myvmin = 1.0
myvmax = 6.0e3
ticklist = [2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
elif field == 'surfvelmag':
fill = 0.0
logscale = True
contour100 = True
myvmin = 1.0
myvmax = 6.0e3
ticklist = [2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000]
elif field == 'usurf':
fill = 0.0
logscale = False
contour100 = False
myvmin = 1.0
myvmax = 3500.0
ticklist = [100, 500, 1000, 1500, 2000, 2500, 3000, 3500]
elif field == 'mask':
fill = -1.0
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 4.0
ticklist = [0, 1, 2, 3, 4]
elif field == 'bmelt':
fill = -2.0e+09
logscale = True
contour100 = False
myvmin = 0.9e-4
myvmax = 1.1
ticklist = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif field == 'tillwat':
fill = -2.0e+09
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 2.0
ticklist = [0.0, 0.5, 1.0, 1.5, 2.0]
elif field == 'bwat':
fill = -2.0e+09
logscale = True
contour100 = False
myvmin = 0.9e-4
myvmax = 1.1
ticklist = [0.0001, 0.001, 0.01, 0.1, 1.0]
elif field == 'bwprel':
fill = -2.0e+09
logscale = False
contour100 = False
myvmin = 0.0
myvmax = 1.0
ticklist = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
else:
print 'invalid choice for FIELD option'
sys.exit(3)
# we need to know longitudes and latitudes corresponding to grid
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
if field == 'surfvelmag':
lon = np.squeeze(lon).transpose()
lat = np.squeeze(lat).transpose()
# x and y *in the dataset* are only used to determine plotting domain
# dimensions
if field == 'surfvelmag':
x = nc.variables['x1'][:]
y = nc.variables['y1'][:]
else:
x = nc.variables['x'][:]
y = nc.variables['y'][:]
width = x.max() - x.min()
height = y.max() - y.min()
# load data
if field == 'bwprel':
thkvar = np.squeeze(nc.variables['thk'][:])
myvar = np.squeeze(nc.variables['bwp'][:])
myvar = np.ma.array(myvar, mask=(thkvar == 0.0))
thkvar = np.ma.array(thkvar, mask=(thkvar == 0.0))
myvar = myvar / (910.0 * 9.81 * thkvar)
else:
myvar = np.squeeze(nc.variables[field][:])
# mask out ice free etc.; note 'mask' does not get masked
if (field == 'surfvelmag'):
myvar = myvar.transpose()
thkvar = np.squeeze(nc.variables['thk'][:]).transpose()
myvar = np.ma.array(myvar, mask=(thkvar == 0.0))
elif (field != 'mask'):
maskvar = np.squeeze(nc.variables['mask'][:])
if (field == 'bmelt') | (field == 'bwat'):
myvar[myvar < myvmin] = myvmin
if (field == 'usurf'):
myvar = np.ma.array(myvar, mask=(maskvar == 4))
else:
myvar = np.ma.array(myvar, mask=(maskvar != 2))
m = Basemap(width=1.1 * width, # width in projection coordinates, in meters
height=1.05 * height, # height
resolution='l', # coastline resolution, can be 'l' (low), 'h'
# (high) and 'f' (full)
projection='stere', # stereographic projection
lat_ts=71, # latitude of true scale
lon_0=-41, # longitude of the plotting domain center
lat_0=72) # latitude of the plotting domain center
# m.drawcoastlines()
# draw the Blue Marble background (requires PIL, the Python Imaging Library)
if bluemarble: # seems to reverse N and S
m.bluemarble()
# convert longitudes and latitudes to x and y:
xx, yy = m(lon, lat)
if contour100:
# mark 100 m/a contour in black:
m.contour(xx, yy, myvar, [100], colors="black")
# plot log color scale or not
if logscale:
m.pcolormesh(xx, yy, myvar,
norm=colors.LogNorm(vmin=myvmin, vmax=myvmax))
else:
m.pcolormesh(xx, yy, myvar, vmin=myvmin, vmax=myvmax)
# add a colorbar:
plt.colorbar(extend='both',
ticks=ticklist,
format="%d")
# draw parallels and meridians
# labels kwarg is where to draw ticks: [left, right, top, bottom]
m.drawparallels(np.arange(-55., 90., 5.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-120., 30., 10.), labels=[0, 0, 0, 1])
outname = rootname + '-' + field + '.png'
print "saving image to file %s ..." % outname
plt.savefig(outname, dpi=mydpi, bbox_inches='tight')
|
citibeth/twoway
|
pism/std-greenland/basemapfigs.py
|
Python
|
gpl-3.0
| 6,416
|
[
"NetCDF"
] |
925e57b55ca2a21d51be2bc10aecf262fe043f17159fc46d2304140b5e0d1a7f
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 Eric Pascual
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
"""
An assortment of classes modeling specific features of the EV3 brick.
"""
from collections import OrderedDict
OUTPUT_A = 'ev3-ports:outA'
OUTPUT_B = 'ev3-ports:outB'
OUTPUT_C = 'ev3-ports:outC'
OUTPUT_D = 'ev3-ports:outD'
INPUT_1 = 'ev3-ports:in1'
INPUT_2 = 'ev3-ports:in2'
INPUT_3 = 'ev3-ports:in3'
INPUT_4 = 'ev3-ports:in4'
BUTTONS_FILENAME = '/dev/input/by-path/platform-gpio_keys-event'
EVDEV_DEVICE_NAME = 'EV3 Brick Buttons'
LEDS = OrderedDict()
LEDS['red_left'] = 'led0:red:brick-status'
LEDS['red_right'] = 'led1:red:brick-status'
LEDS['green_left'] = 'led0:green:brick-status'
LEDS['green_right'] = 'led1:green:brick-status'
LED_GROUPS = OrderedDict()
LED_GROUPS['LEFT'] = ('red_left', 'green_left')
LED_GROUPS['RIGHT'] = ('red_right', 'green_right')
LED_COLORS = OrderedDict()
LED_COLORS['BLACK'] = (0, 0)
LED_COLORS['RED'] = (1, 0)
LED_COLORS['GREEN'] = (0, 1)
LED_COLORS['AMBER'] = (1, 1)
LED_COLORS['ORANGE'] = (1, 0.5)
LED_COLORS['YELLOW'] = (0.1, 1)
LED_DEFAULT_COLOR = 'GREEN'
|
dwalton76/ev3dev-lang-python
|
ev3dev2/_platform/ev3.py
|
Python
|
mit
| 2,283
|
[
"Amber"
] |
848a1a917ae2e475c76645d73c230a8a85031fa94e6f0acdea6a6cd6037ba587
|
# lintory - keep track of computers and licenses
# Copyright (C) 2008-2009 Brian May
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext, loader
from django.http import HttpResponseRedirect, Http404
from django.db.models import get_model
import django.forms.util as util
from django.utils.translation import ugettext as _
from lintory import models, helpers, forms, tables, filters, webs
def root(request):
breadcrumbs = [ ]
breadcrumbs.append(webs.breadcrumb(reverse("root"),_("Home")))
return render_to_response('lintory/index.html', {
'breadcrumbs': breadcrumbs,
},
context_instance=RequestContext(request))
def get_object_by_string(type_id,object_id):
model = get_model("lintory",type_id)
if model is None:
raise Http404("Bad model type '%s'"%(type_id))
return get_object_or_404(model, pk=object_id)
###########
# HISTORY #
###########
def history_item_add(request, type_id, object_id):
object = get_object_by_string(type_id,object_id)
web = webs.history_item_web()
web.initial_object = object
return web.object_add(request, kwargs={ 'object': object })
def history_item_edit(request, history_item_id):
web = webs.history_item_web()
web.initial_object = None
history_item = get_object_or_404(models.history_item, pk=history_item_id)
return web.object_edit(request, history_item)
def history_item_delete(request, history_item_id):
web = webs.history_item_web()
web.initial_object = None
history_item = get_object_or_404(models.history_item, pk=history_item_id)
return web.object_delete(request, history_item)
#########
# PARTY #
#########
def party_list(request):
web = webs.party_web()
filter = filters.party(request.GET or None)
table = tables.party(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def party_detail(request, object_id):
if object_id != "none":
object = get_object_or_404(models.party, pk=object_id)
else:
object = models.Nobody()
web = webs.party_web()
return web.object_view(request, object)
def party_add(request):
web = webs.party_web()
return web.object_add(request)
def party_edit(request,object_id):
web = webs.party_web()
object = get_object_or_404(models.party, pk=object_id)
return web.object_edit(request, object)
def party_delete(request,object_id):
web = webs.party_web()
object = get_object_or_404(models.party, pk=object_id)
return web.object_delete(request, object)
def party_software_list(request, object_id):
if object_id != "none":
object = get_object_or_404(models.party, pk=object_id)
else:
object = models.Nobody()
template='lintory/party_software_list.html'
web = webs.party_web()
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(web.get_software_list_url(object),"software list"))
return render_to_response(template, {
'object': object,
'breadcrumbs': breadcrumbs,
},context_instance=RequestContext(request))
def party_software_detail(request, object_id, software_id):
if object_id != "none":
object = get_object_or_404(models.party, pk=object_id)
else:
object = models.Nobody()
template='lintory/party_software_detail.html'
software = get_object_or_404(models.software, pk=software_id)
web = webs.party_web()
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(web.get_software_list_url(object),"software list"))
breadcrumbs.append(webs.breadcrumb(web.get_software_view_url(object, software),software))
return render_to_response(template, {
'party': object,
'software': software,
'software_web': webs.software_web(),
'breadcrumbs': breadcrumbs,
},context_instance=RequestContext(request))
##########
# VENDOR #
##########
def vendor_list(request):
web = webs.vendor_web()
filter = filters.vendor(request.GET or None)
table = tables.vendor(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def vendor_detail(request, object_id):
web = webs.vendor_web()
object = get_object_or_404(models.vendor, pk=object_id)
return web.object_view(request, object)
def vendor_add(request):
web = webs.vendor_web()
return web.object_add(request)
def vendor_edit(request,object_id):
web = webs.vendor_web()
object = get_object_or_404(models.vendor, pk=object_id)
return web.object_edit(request, object)
def vendor_delete(request,object_id):
web = webs.vendor_web()
object = get_object_or_404(models.vendor, pk=object_id)
return web.object_delete(request, object)
########
# TASK #
########
def task_list(request):
web = webs.task_web()
filter = filters.task(request.GET or None)
table = tables.task(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def task_detail(request, object_id):
web = webs.task_web()
object = get_object_or_404(models.task, pk=object_id)
return web.object_view(request, object)
def task_add(request):
web = webs.task_web()
return web.object_add(request)
def task_edit(request,object_id):
web = webs.task_web()
object = get_object_or_404(models.task, pk=object_id)
return web.object_edit(request, object)
def task_delete(request,object_id):
web = webs.task_web()
object = get_object_or_404(models.task, pk=object_id)
return web.object_delete(request, object)
#################
# HARDWARE_TASK #
#################
def task_add_hardware(request, object_id):
web = webs.hardware_task_web()
task = get_object_or_404(models.task, pk=object_id)
return web.object_add(request, kwargs={ 'task': task })
def hardware_task_edit(request,object_id):
web = webs.hardware_task_web()
object = get_object_or_404(models.hardware_task, pk=object_id)
return web.object_edit(request, object)
def hardware_task_delete(request,object_id):
web = webs.hardware_task_web()
object = get_object_or_404(models.hardware_task, pk=object_id)
return web.object_delete(request, object)
############
# LOCATION #
############
def location_detail(request, object_id):
web = webs.location_web()
object = get_object_or_404(models.location, pk=object_id)
return web.object_view(request, object)
def location_task_list(request, object_id):
web = webs.location_web()
object = get_object_or_404(models.location, pk=object_id)
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(reverse('location_task_list',kwargs={'object_id':object_id}),"tasks"))
return render_to_response('lintory/location_tasks.html', {
'object': object,
'breadcrumbs': breadcrumbs,
'todo_hardware_tasks':
models.hardware_task.objects.filter(hardware__in=object.get_self_or_children_hardware(),date_complete__isnull=True),
},context_instance=RequestContext(request))
def location_task(request, object_id, task_id):
web = webs.location_web()
object = get_object_or_404(models.location, pk=object_id)
task = get_object_or_404(models.task, pk=task_id)
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(reverse('location_task_list',kwargs={'object_id':object_id}),"tasks"))
breadcrumbs.append(webs.breadcrumb(reverse('location_task',kwargs={'object_id':object_id,'task_id':task_id}),task))
return render_to_response('lintory/location_tasks.html', {
'object': object,
'task': task,
'breadcrumbs': breadcrumbs,
'todo_hardware_tasks':
models.hardware_task.objects.filter(hardware__in=object.get_self_or_children_hardware(),date_complete__isnull=True,task=task),
},context_instance=RequestContext(request))
def location_redirect(request,object_id):
object = get_object_or_404(models.location, pk=object_id)
return HttpResponseRedirect(object.get_view_url())
def location_add(request, object_id):
web = webs.location_web()
parent = get_object_or_404(models.location, pk=object_id)
return web.object_add(request, kwargs={ 'parent': parent })
def location_edit(request,object_id):
web = webs.location_web()
object = get_object_or_404(models.location, pk=object_id)
return web.object_edit(request, object)
def location_delete(request,object_id):
web = webs.location_web()
object = get_object_or_404(models.location, pk=object_id)
return web.object_delete(request, object)
class location_hardware_lookup:
def __init__(self, location):
self.location = location
def computers(self):
list = models.computer.objects.filter(
location=self.location,
date_of_disposal__isnull=True)
list = [ smart_unicode(i) for i in list ]
return ",".join(list)
def self_or_children_computers(self):
location_list = self.location.get_self_or_children()
list = models.computer.objects.filter(
location__in=location_list,
date_of_disposal__isnull=True)
list = [ smart_unicode(i) for i in list ]
return ",".join(list)
# Short cut
def url(self):
web = webs.location_web()
return web.get_view_url(self.location)
def __getitem__(self, key):
value = getattr(self.location, key)
if callable(value):
if getattr(value, 'alters_data', False):
raise IndexError("Method '%s' alters data"%(key))
else:
try: # method call (assuming no args required)
value = value()
except TypeError: # arguments *were* required
# GOTCHA: This will also catch any TypeError
# raised in the function itself.
raise IndexError("Method '%s' raised TypeError"%(key))
return value
class location_lookup:
def __getitem__(self, key):
try:
location = models.location.objects.get(pk=key)
except models.location.DoesNotExist, e:
raise IndexError("Location %d not found"%(key))
return location_hardware_lookup(location)
def location_svg(request, object_id):
object = get_object_or_404(models.location, pk=object_id)
web = webs.location_web()
if not web.has_svg_file(object):
raise Http404
return render_to_response('lintory/locations/%i.svg'%object.pk,
mimetype= "image/svg+xml",
context_instance=RequestContext(request,{
'location': location_lookup()
}))
############
# HARDWARE #
############
# HARDWARE TYPE DATA
class type_data:
def __init__(self, web, type_class):
self.web = web
self.type_class = type_class
type_dict = {
'motherboard': type_data(
web = webs.motherboard_web,
type_class = models.motherboard,
),
'processor': type_data(
web = webs.processor_web,
type_class = models.processor,
),
'video_controller': type_data(
web = webs.video_controller_web,
type_class = models.video_controller,
),
'network_adaptor': type_data(
web = webs.network_adaptor_web,
type_class = models.network_adaptor,
),
'storage': type_data(
web = webs.storage_web,
type_class = models.storage,
),
'computer': type_data(
web = webs.computer_web,
type_class = models.computer,
),
'power_supply': type_data(
web = webs.power_supply_web,
type_class = models.power_supply,
),
'monitor': type_data(
web = webs.monitor_web,
type_class = models.monitor,
),
'multifunction': type_data(
web = webs.multifunction_web,
type_class = models.multifunction,
),
'printer': type_data(
web = webs.printer_web,
type_class = models.printer,
),
'scanner': type_data(
web = webs.scanner_web,
type_class = models.scanner,
),
'docking_station': type_data(
web = webs.docking_station_web,
type_class = models.docking_station,
),
'camera': type_data(
web = webs.camera_web,
type_class = models.camera,
),
}
# HARDWARE OBJECTS
def hardware_list(request):
web = webs.hardware_web()
filter = filters.hardware(request.GET or None)
table = tables.hardware(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def hardware_detail(request, object_id):
object = get_object_or_404(models.hardware, pk=object_id)
object = object.get_object()
web = webs.get_web_from_object(object)
return web.object_view(request, object)
def hardware_add(request, type_id=None, object_id=None):
if object_id is None:
web = webs.hardware_web()
breadcrumbs = web.get_add_breadcrumbs()
else:
object = get_object_or_404(models.hardware, pk=object_id)
object = object.get_object()
web = webs.get_web_from_object(object)
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(web.get_add_to_instance_url(object,type_id),"add hardware"))
if request.method == 'POST':
form = forms.hardware_type_form(request.POST, request.FILES)
if form.is_valid():
new_type = form.cleaned_data['type']
url = web.get_add_url(new_type)
url = request.GET.get("next",url)
return HttpResponseRedirect(url)
else:
form = forms.hardware_type_form()
return render_to_response("lintory/hardware_type.html", {
'breadcrumbs': breadcrumbs,
'form' : form,
'media' : form.media,
},context_instance=RequestContext(request))
def hardware_edit(request, object_id):
object = get_object_or_404(models.hardware, pk=object_id)
type_id = object.type_id
if type_id not in type_dict:
raise Http404(u"Hardware type '%s' not found"%(type_id))
object = object.get_object()
web = webs.get_web_from_object(object)
return web.object_edit(request, object)
def hardware_install(request, object_id):
object = get_object_or_404(models.hardware, pk=object_id)
error_list = [ ]
pks = []
if request.method == 'POST':
pks = request.POST.getlist('pk')
for pk in pks:
requested_object = get_object_or_404(models.hardware, pk=pk)
if requested_object.installed_on is not None:
if requested_object.installed_on.pk != object.pk:
error_list.append(u"Cannot install '%s' as it is already installed on another computer"%(requested_object))
else:
requested_object.installed_on = object
requested_object.save()
web = webs.hardware_web()
filter = filters.hardware(request.GET or {'is_installed': '3'})
table = tables.hardware_list_form(pks, request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table, template="lintory/hardware_list_form.html",
context={ 'object': object, 'error_list': error_list })
def hardware_delete(request, object_id):
object = get_object_or_404(models.hardware, pk=object_id)
object = object.get_object()
web = webs.get_web_from_object(object)
return web.object_delete(request, object)
def hardware_type_add(request, type_id, object_id=None):
if type_id not in type_dict:
raise Http404(u"Hardware type '%s' not found"%(type_id))
web = type_dict[type_id].web()
web.initial_model_class = type_dict[type_id].type_class
if object_id is not None:
web.initial_installed_on = get_object_or_404(models.hardware, pk=object_id)
else:
web.initial_installed_on = None
return web.object_add(request)
############
# SOFTWARE #
############
def software_list(request):
web = webs.software_web()
filter = filters.software(request.GET or None)
table = tables.software(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def software_detail(request, object_id):
web = webs.software_web()
object = get_object_or_404(models.software, pk=object_id)
return web.object_view(request, object)
def software_add(request):
web = webs.software_web()
return web.object_add(request)
def software_edit(request,object_id):
web = webs.software_web()
object = get_object_or_404(models.software, pk=object_id)
return web.object_edit(request, object)
def software_delete(request,object_id):
web = webs.software_web()
object = get_object_or_404(models.software, pk=object_id)
return web.object_delete(request, object)
###########
# LICENSE #
###########
def license_list(request):
web = webs.license_web()
filter = filters.license(request.GET or None)
table = tables.license(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def license_detail(request, object_id):
web = webs.license_web()
object = get_object_or_404(models.license, pk=object_id)
return web.object_view(request, object)
def license_add(request):
web = webs.license_web()
return web.object_add(request)
def license_edit(request,object_id):
web = webs.license_web()
object = get_object_or_404(models.license, pk=object_id)
return web.object_edit(request, object)
def software_add_license(request,object_id):
object = get_object_or_404(models.software, pk=object_id)
web = webs.software_web()
breadcrumbs = web.get_view_breadcrumbs(object)
breadcrumbs.append(webs.breadcrumb(web.get_add_license_url(object),"add software license"))
l_web = webs.license_web()
error = l_web.check_add_perms(request, breadcrumbs)
if error is not None:
return error
if request.method == 'POST':
form = forms.license_add_form(request.POST, request.FILES)
if form.is_valid():
valid = True
# we try to get license_key first, in case something goes wrong.
# if something goes wrong, no license will be created.
key = form.cleaned_data['key'].strip()
lk_web = webs.license_key_web()
try:
# try to find existing license for key
if lk_web.has_edit_perms(request.user):
license_key = models.license_key.objects.get(key=key,software=object)
else:
msg = u"License key exists and no permission to modify"
form._errors["key"] = util.ErrorList([msg])
valid = False
except models.license_key.DoesNotExist, e:
# no license found, we have to create one
if lk_web.has_add_perms(request.user):
license_key = models.license_key()
license_key.software = object
license_key.key = key
else:
msg = u"License key doesn't exist and no permission to add one"
form._errors["key"] = util.ErrorList([msg])
valid = False
# Can we continue?
if valid:
# we need to create the license license
license = models.license()
license.vendor_tag = form.cleaned_data['vendor_tag']
license.installations_max = form.cleaned_data['installations_max']
license.version = form.cleaned_data['version']
license.expires = form.cleaned_data['expires']
license.owner = form.cleaned_data['owner']
license.save()
# Update license_key with license we just got
license_key.license = license
license_key.save()
# we finished
url = l_web.get_view_url(license)
url = request.GET.get("next",url)
return HttpResponseRedirect(url)
else:
form = forms.license_add_form()
return render_to_response('django_webs/object_edit.html', {
'object': None, 'type': 'software license',
'breadcrumbs': breadcrumbs,
'form' : form,
'media' : form.media,
},context_instance=RequestContext(request))
def license_delete(request,object_id):
web = webs.license_web()
object = get_object_or_404(models.license, pk=object_id)
return web.object_delete(request, object)
###############
# LICENSE KEY #
###############
def license_key_detail(request, object_id):
web = webs.license_key_web()
object = get_object_or_404(models.license_key, pk=object_id)
return web.object_view(request, object)
def license_add_license_key(request, object_id):
web = webs.license_key_web()
license = get_object_or_404(models.license, pk=object_id)
return web.object_add(request, kwargs={ 'license': license })
def license_key_edit(request, object_id):
web = webs.license_key_web()
object = get_object_or_404(models.license_key, pk=object_id)
return web.object_edit(request, object)
def license_key_delete(request,object_id):
web = webs.license_key_web()
object = get_object_or_404(models.license_key, pk=object_id)
return web.object_delete(request, object)
#########################
# SOFTWARE INSTALLATION #
#########################
def software_add_software_installation(request, object_id):
web = webs.software_installation_web()
software = get_object_or_404(models.software, pk=object_id)
return web.object_add(request, kwargs={ 'software': software })
def software_installation_edit_license_key(request,object_id):
web = webs.software_installation_web()
software_web = webs.software_web()
object = get_object_or_404(models.software_installation, pk=object_id)
breadcrumbs = software_web.get_view_breadcrumbs(object.software)
breadcrumbs.append(webs.breadcrumb(web.get_edit_license_key_url(object),"edit license key"))
web = webs.software_installation_web()
error = web.check_edit_perms(request, breadcrumbs)
if error is not None:
return error
if request.method == 'POST':
form = forms.license_key_select_form(object.software,request.POST,request.FILES)
if form.is_valid():
if form.cleaned_data['key'] == "":
license_key = None
else:
license_key = get_object_or_404(models.license_key, pk=form.cleaned_data['key'])
object.license_key = license_key
object.save()
url = software_web.get_view_url(object.software)
url = request.GET.get("next",url)
return HttpResponseRedirect(url)
else:
if object.license_key is None:
key = ""
else:
key = object.license_key.pk
form = forms.license_key_select_form(object.software,{'key': key})
# fix me, choice may be null
return render_to_response('django_webs/object_edit.html', {
'object': object,
'breadcrumbs': breadcrumbs,
'form' : form,
'media' : form.media,
},context_instance=RequestContext(request))
def software_installation_edit(request, object_id):
web = webs.software_installation_web()
object = get_object_or_404(models.software_installation, pk=object_id)
return web.object_edit(request, object)
def software_installation_delete(request,object_id):
web = webs.software_installation_web()
object = get_object_or_404(models.software_installation, pk=object_id)
return web.object_delete(request, object)
######
# OS #
######
def os_detail(request, object_id):
web = webs.os_web()
object = get_object_or_404(models.os, pk=object_id)
return web.object_view(request, object)
def os_add(request, object_id):
web = webs.os_web()
storage = get_object_or_404(models.storage, pk=object_id)
return web.object_add(request, kwargs={ 'storage': storage })
def os_edit(request, object_id):
web = webs.os_web()
object = get_object_or_404(models.os, pk=object_id)
return web.object_edit(request, object)
def os_delete(request,object_id):
web = webs.os_web()
object = get_object_or_404(models.os, pk=object_id)
return web.object_delete(request, object)
########
# DATA #
########
def data_list(request):
web = webs.data_web()
filter = filters.data(request.GET or None)
table = tables.data(request, web, filter.qs, order_by=request.GET.get('sort'))
return web.object_list(request, filter.form, table)
def data_detail(request, object_id):
web = webs.data_web()
object = get_object_or_404(models.data, pk=object_id)
return web.object_view(request, object)
def data_add(request):
web = webs.data_web()
template = 'lintory/object_file_edit.html'
return web.object_add(request, template=template)
def data_edit(request, object_id):
web = webs.data_web()
template = 'lintory/object_file_edit.html'
object = get_object_or_404(models.data, pk=object_id)
return web.object_edit(request, object, template=template)
def data_delete(request,object_id):
web = webs.data_web()
object = get_object_or_404(models.data, pk=object_id)
return web.object_delete(request, object)
|
VPAC/lintory
|
lintory/views.py
|
Python
|
gpl-3.0
| 26,596
|
[
"Brian"
] |
a8096736c4772f72fbe4452359a7c7473af799a50cf9d88222e5ce857e20357c
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
import urllib
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.bibauthorid_webapi import create_new_person
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS, BIBAUTHORID_CFG_SITE_NAME, CFG_BIBAUTHORID_ENABLED
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name, clean_string
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
from invenio.bibsched import bibsched_task_finished_successfully, \
bibsched_task_finished_with_error, bibsched_task_running, bibsched_task_waiting, \
UnknownBibschedStatus
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author, \
get_free_author_id
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author, \
get_author_by_canonical_name, get_token, set_token, remove_rtid_from_ticket
from invenio.orcidutils import get_dois_from_orcid, get_dois_from_orcid_using_pid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
from invenio.bibauthorid_general_utils import get_inspire_record_url
from invenio.bibcatalog import BIBCATALOG_SYSTEM
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
from invenio.bibauthorid_name_utils import split_name_parts
from invenio.orcidutils import push_orcid_papers
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
self.person_id = int(identifier)
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
# Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
full_name = webapi.get_longest_name_from_pid(self.person_id)
page_title = '%s - Publications Management' % full_name
guest_prompt = 'true'
if not CFG_INSPIRE_SITE:
guest_prompt = 'false'
if 'prompt_shown' not in session:
session['prompt_shown'] = False
if session['prompt_shown']:
guest_prompt = 'false'
else:
session['prompt_shown'] = True
session.dirty = True
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: %s});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel, guest_prompt)
}))
if debug:
profile_page.add_debug_info(session)
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body("generic", {'html': content}).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(
webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets is None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(
pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] and pinfo['autoclaim']['checkout']:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (
CFG_SITE_URL,
webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review': (str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile': (str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
'rtid': (int, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'close_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'email': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'close_rt_ticket',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system']:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id']:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s'
% (CFG_SITE_URL, urllib.quote(str(redirect_pid))))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param = ''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + urllib.quote(argd['search_param'])
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(primary_cname))
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(str(pid))))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any papers selected. " + \
"Please go back and select which papers would you like to claim.")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return=True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
# return self._error_page(req, ln, "Fatal: cannot create ticket without a
# person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
if pid == CREATE_NEW_PERSON:
pid = create_new_person(getUid(req))
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def close_rt_ticket():
BIBCATALOG_SYSTEM.ticket_set_attribute(0, argd['rtid'], 'status', 'resolved')
remove_rtid_from_ticket(argd['rtid'], argd['pid'])
return redirect_to_url(req, "%s/author/claim/%s#tabTickets" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(argd['pid']))))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid))))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid, error_message = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message
# about the currently attempted merge
session.dirty = True
req.status = apache.HTTP_CONFLICT
c_name = webapi.get_canonical_id_from_person_id(preventing_pid)
return 'Cannot merge profile: %s Reason: %s' % (c_name,
error_message)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
elif 'email' in argd:
# the email was submitted in form
email = argd['email']
pinfo['form_email'] = email
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str),
'uid': uid}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj=('Merge profiles request: %s' % primary_cname))
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(primary_cname))
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# pp = pprint.PrettyPrinter(indent=4)
# session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed,
'uid': uid}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(pid)), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'close_rt_ticket': close_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, urllib.quote(webapi.get_person_redirect_link(str(pid)))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
try:
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
except IndexError:
msg = """This ticket with the tid: %s has already been
removed.""" % tid
return self._error_page(req, message=msg)
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, urllib.quote(str(pid))))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
try:
int(cname)
except ValueError:
is_owner = False
else:
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body("generic", {'html': content})
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params=parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
gFormEmail = ""
if 'form_email' in pinfo:
gFormEmail = pinfo['form_email']
merge_page.add_bootstrapped_data(json.dumps({
"other": ("var gMergeProfile = %s; var gMergeList = %s;" +
"var gUserLevel = '%s'; var gFormEmail = '%s';") %
([primary_cname, '1' if is_available else '0'],
profiles_to_merge, pinfo['ulevel'], gFormEmail)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body("generic", {'html': body})
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if 'profile' in json_data:
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if 'profile' in json_data:
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if 'profile' in json_data:
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if 'jsondata' in form:
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if 'personId' in json_data:
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if 'personId' in json_data:
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if 'personId' in json_data:
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if 'personId' in json_data:
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body("generic", {'html': content})
# In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, urllib.quote(str(redirect_pid))))
else:
# get name strings and email addresses from SSO/Oauth logins:
# {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla',
# 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(
req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's
# more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req,
# '%s/author/claim/action?associate_profile=True&redirect_pid=%s' %
# (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids)
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(
probable_profile_suggestion_info,
last_viewed_profile_suggestion_info,
search_param)
free_id = get_free_author_id()
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator(free_id)
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# Indicates whether we should push the works or not.
orcid_data['push'] = not get_token(person_id)
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(
get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
mr_name = most_relevant_name(name_variants)
if mr_name:
search_param = mr_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'], external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([clean_string(webapi.get_most_frequent_name_from_pid(int(t[0]))),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = "Help Center"
profile_page = WebProfilePage("help", title, no_cache=True)
template_parameters = {'base_url': CFG_BASE_URL}
body = profile_page.get_wrapped_body("help", template_parameters)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'push_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = str()
return
else:
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def _get_orcid_token(self, session, pinfo):
if 'oauth2_access_token' not in session:
return None
token = session['oauth2_access_token']
if token != '':
return token
return None
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_BIBAUTHORID_ENABLED or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
long_name = webapi.get_longest_name_from_pid(self.person_id)
# TODO: Replace dash with —
page_title = "%s - %s" % (long_name, _('Manage Profile'))
menu = WebProfileMenu(
str(cname),
"manage_profile",
ln,
self._is_profile_owner(pinfo['pid']),
self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", long_name, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
orcid_data['token'] = self._get_orcid_token(session, pinfo)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ids_box_html = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
ids_box_html = TEMPLATE.tmpl_ext_ids_box(
person_id,
int_ids_data,
ext_ids_data,
ln,
add_box=False,
loading=False)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(
req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
modal = ''
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
else:
orcid_info = ''
if CFG_INSPIRE_SITE:
html_arxiv = TEMPLATE.tmpl_arxiv_box(arxiv_data, ln, add_box=False, loading=False)
html_orcid, modal = TEMPLATE.tmpl_orcid_box(orcid_data, ln, orcid_info, add_box=False, loading=False)
if hepnames_data is not None:
hepnames_data.update({
'cname': webapi.get_canonical_id_from_person_id(person_id),
'link_to_record': ulevel == "admin",
'hepnames_link': "%s/%s/" % (CFG_BASE_URL, "record"),
'new_record_link': 'https://labs.inspirehep.net/author/new',
'update_link': "http://labs.inspirehep.net/author/update?recid=",
'profile_link': "%s/%s" % (CFG_BASE_URL, "author/profile/")
})
html_hepnames = WebProfilePage.render_template('personal_details_box', hepnames_data)
else:
html_hepnames = "Loading.."
html_support = TEMPLATE.tmpl_support_box(support_data, ln, add_box=False, loading=False)
if autoclaim_data['hidden']:
autoclaim_successful_recs = None
autoclaim_unsuccessful_recs = None
else:
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
autoclaim_successful_recs = autoclaim_data['successful_recids']
autoclaim_unsuccessful_recs = autoclaim_data['unsuccessful_recids']
else:
login_status = webapi.get_login_info(uid, params)
autoclaim_ticket = pinfo['autoclaim']['ticket']
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems,
params,
external_pubs_association))
for paper in papers_to_autoclaim:
operation_parts = {'pid': person_id,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts,
pinfo,
uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
if 'email' in session:
userinfo['email'] = session['email']
elif 'email' not in userinfo:
userinfo['email'] = None
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
already_claimed_recids = set(
[rec for _, _, rec in get_claimed_papers_of_author(person_id)]) & papers_to_autoclaim
successful_recids = set([op['rec'] for op in webapi.get_ticket_status(
autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
unsuccessful_recids = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
autoclaim_successful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in successful_recids]
autoclaim_unsuccessful_recs = [(
autoclaim_data['recids_to_external_ids'][recid],
get_inspire_record_url(recid),
recid) for recid in unsuccessful_recids]
# cache the result in the session
autoclaim_data['successful_recids'] = autoclaim_successful_recs
autoclaim_data['unsuccessful_recids'] = autoclaim_unsuccessful_recs
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
template_parameters = {
"autoclaim_successful_recids": autoclaim_successful_recs,
"autoclaim_unsuccessful_recids": autoclaim_unsuccessful_recs,
"review_autoclaim_link": "%s/author/ticket/review_autoclaim" % CFG_SITE_URL,
"merge": TEMPLATE.tmpl_merge_box(merge_data, ln, add_box=False, loading=False),
"external_ids_box_html": ids_box_html,
"user_level": ulevel,
"base_url": CFG_BASE_URL,
"inspire" : CFG_INSPIRE_SITE,
"orcid_message" : self._generate_orcid_message(req, ln)
}
if 'orcid_info' in session:
session.pop('orcid_info', None)
session.dirty = True
# Inspire specific endpoints.
if CFG_INSPIRE_SITE:
template_parameters["hepnames"] = html_hepnames
template_parameters["arxiv"] = html_arxiv
template_parameters["orcid"] = html_orcid
template_parameters["contact"] = html_support
template_parameters["modal"] = modal
body = profile_page.get_wrapped_body("manage_profile", template_parameters)
# body = profile_page.get_wrapped_body("generic", {'html': content})
return page(title=page_title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _generate_orcid_message(self, req, ln):
'''
Generate the box which informs the user about running ORCID push.
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
orcid_info = None
if 'orcid_info' in session:
orcid_info = session['orcid_info']['status']
if not orcid_info:
return ''
else:
return TEMPLATE.tmpl_orcid_message(orcid_info, ln)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
orcid_id, orcid_dois = get_dois_from_orcid_using_pid(pinfo['pid'])
# TODO: what to do in case some ORCID server error occurs?
if orcid_id is None or orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, urllib.quote(str(pinfo['pid']))))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, urllib.quote(str(pinfo['pid']))))
def _get_identifier_from_path(self, path):
'''Return identifier from path to manage_profile page.
Example: localhost:4000/author/manage_profile/273672/wowow -> 273672
'''
tokens = path.split('/')
return tokens[tokens.index('manage_profile') + 1]
def push_orcid_pubs(self, req, form):
'''Push all claimed papers to ORCID database.
Doesn't push papers which were there earlier. Needs user authentication.
When a user requests a push, this method will be run twice. Firstly,
user should authenticate himself. Then, in the second run, after
receiving the token from ORCID, the push is done.
'''
webapi.session_bareinit(req)
session = get_session(req)
if 'orcid_pid' not in session:
# I can't assume that pid will be available in session
identifier = self._get_identifier_from_path(req.referer)
try:
session['orcid_pid'] = get_author_by_canonical_name(identifier)[0][0]
except:
session['orcid_pid'] = identifier
session.dirty = True
if 'oauth2_access_token' not in session:
session['oauth2_access_token'] = ''
if session['oauth2_access_token'] == '':
# Authenticate
session['pushorcid'] = True
session.dirty = True
redirect_to_url(req, "%s/youraccount/oauth2?provider=%s&scope=/orcid-works/update+/orcid-works/create" % (CFG_SITE_SECURE_URL, 'orcid'))
# We expect user to have only one ORCID
assert(len(webapi.get_orcids_by_pid(session['orcid_pid'])) == 1)
if session['oauth2_orcid'] != webapi.get_orcids_by_pid(session['orcid_pid'])[0]:
# User has authenticated, but he is using different account
session['oauth2_access_token'] = ''
session['orcid_info'] = {'status': 'wrong_account'}
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, urllib.quote(str(person_id))))
set_token(session['orcid_pid'], session['oauth2_access_token'])
session['orcid_info'] = {'status': 'finished'}
# Token may expire. It is better to get rid of it.
session['oauth2_access_token'] = ''
person_id = session.pop('orcid_pid')
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, urllib.quote(str(person_id))))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname': (str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.session_bareinit(req)
session = get_session(req)
webapi.connect_author_with_hepname(cname, hepname, session['uid'])
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, urllib.quote(cname)))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
if 'email' in json_data:
pinfo['form_email'] = json_data['email']
webapi.connect_author_with_hepname(cname, hepname,
session['uid'],
email=json_data['email'])
else:
webapi.connect_author_with_hepname(cname, hepname,
session['uid'])
else:
uid = getUid(req)
add_cname_to_hepname_record({cname: hepname}, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid': (str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCID.")
session = get_session(req)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid, session['uid'])
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, urllib.quote(str(pid))))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
session = get_session(req)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid, session['uid'])
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
keywords="%s, Internal Error" % BIBAUTHORID_CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort',
'review_autoclaim'
]
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type pinfo: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def review_autoclaim(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
try:
autoclaim = pinfo['autoclaim']['ticket']
except KeyError:
autoclaim = list()
ticket = self._get_according_ticket('user', pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
for item in autoclaim:
webapi.add_operation_to_ticket(item, ticket)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_BASE_URL, urllib.quote(str(pinfo['pid']))))
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name', "Default"),
'last_name': json_data.get('last_name', "Default"),
'email': json_data.get('email', "Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# Syncing is done here. Entries that have been handled are removed from
# unsuccessful_tickets so that they do not reappear in the next reload.
if pinfo['autoclaim']['res']:
if 'unsuccessful_recids' in pinfo['autoclaim']['res']:
unsuccessful_recids = pinfo['autoclaim']['res']['unsuccessful_recids']
else:
unsuccessful_recids = []
for entry in ticket:
recid = entry['rec']
unsuccessful_recids = [rec for rec in unsuccessful_recids if rec[2] != recid]
pinfo['autoclaim']['res']['unsuccessful_recids'] = unsuccessful_recids
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if 'jsondata' not in form:
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
if CFG_BIBAUTHORID_ENABLED:
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, urllib.quote(get_person_redirect_link(pid)))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, urllib.quote(self.path)))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, urllib.quote(str(redirect_id)))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
else:
url = "%s/author/profile/%s" % (CFG_BASE_URL, urllib.quote(self.path))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome', 'update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
redirect_to_url(req,
"https://labs.inspirehep.net/author/update?recid=%s" %
hepname_bibrec,
redirection_type=apache.HTTP_MOVED_PERMANENTLY)
# pylint: enable=C0301
# pylint: enable=W0613
|
Panos512/invenio
|
modules/bibauthorid/lib/bibauthorid_webinterface.py
|
Python
|
gpl-2.0
| 147,640
|
[
"VisIt"
] |
ce377e10344d0009493513fe551b478b376168e9634041b98148bbafae3527d4
|
#!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
from multiprocessing import Process, cpu_count, Pool, Queue
from multiprocessing.sharedctypes import Value, Array
from os import makedirs, sys, remove, rename
from sys import path
import re, math, traceback
from copy import copy
from optparse import OptionParser, OptionGroup
from libs.python_modules.utils.metapathways_utils import parse_command_line_parameters, fprintf, printf, eprintf, exit_process, ShortenORFId
from libs.python_modules.utils.sysutil import getstatusoutput
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
sys.exit(3)
usage= sys.argv[0] +" -d dbname1 -b blastout_for_database1 -m map_for_database1 [-d dbname2 -b blastout_for_database2 -m map_for_database2 ] """
parser = None
def createParser():
global parser
epilog = """This script parses BLAST/LAST search results of the amino acid sequences against the reference protein databases, in a tabular format. In the context of MetaPathways these files are available in the in the folder blast_results. The tabular results are put in individual files, one for each of the databases and algorithms combinations. This script parses these results and uses the hits based on the specified cutoffs for the evalue, bit score ratio, etc the parsed results are put in file named according to the format
<samplename><dbname><algorithm>out.parsed.txt. These parsed files are in a tabular format and each row contains information about the hits in terms of start, end, query name, match name, bit score ratio, etc."""
parser = OptionParser(usage, epilog= epilog)
parser.add_option("-b", "--blastoutput", dest="input_blastout", action='append', default=[],
help='the input blastout files [at least 1 REQUIRED]')
parser.add_option("-d", "--dbasename", dest="database_name", action='append', default=[],
help='the database names [at least 1 REQUIRED]')
parser.add_option("-o", "--parsedoutput", dest="parsed_output", default=None,
help='the parsed output file [OPTIONAL]')
parser.add_option("-r", "--ref_score", dest="refscore_file",
help='the refscore table [REQUIRED]')
parser.add_option("-m", "--map_for_database", dest="database_map", action='append', default=[],
help='the map file for the database [at least 1 REQUIRED]')
parser.add_option("-a", "--algorithm", dest="algorithm", choices = ['BLAST', 'LAST'], default = "BLAST",
help='the algorithm used for computing homology [DEFAULT: BLAST]')
cutoffs_group = OptionGroup(parser, 'Cuttoff Related Options')
cutoffs_group.add_option("--min_score", dest="min_score", type='float', default=20,
help='the minimum bit score cutoff [default = 20 ] ')
cutoffs_group.add_option("--min_query_coverage", dest="min_query_coverage", type='float', default=0,
help='the minimum bit query_coverage cutoff [default = 0 ] ')
cutoffs_group.add_option("--max_evalue", dest="max_evalue", type='float', default=1e-6,
help='the maximum E-value cutoff [ default = 1e-6 ] ')
cutoffs_group.add_option("--min_length", dest="min_length", type='float', default=30,
help='the minimum length of query cutoff [default = 30 ] ')
cutoffs_group.add_option("--max_length", dest="max_length", type='float', default=10000,
help='the maximum length of query cutoff [default = 10000 ] ')
cutoffs_group.add_option("--min_identity", dest="min_identity", type='float', default=20,
help='the minimum identity of query cutoff [default 30 ] ')
cutoffs_group.add_option("--max_identity", dest="max_identity", type='float', default=100,
help='the maximum identity of query cutoff [default = 100 ] ')
cutoffs_group.add_option("--max_gaps", dest="max_gaps", type='float', default=1000,
help='the maximum gaps of query cutoff [default = 1000] ')
cutoffs_group.add_option("--limit", dest="limit", type='float', default=5,
help='max number of hits per query cutoff [default = 5 ] ')
cutoffs_group.add_option("--min_bsr", dest="min_bsr", type='float', default=0.30,
help='minimum BIT SCORE RATIO [default = 0.30 ] ')
parser.add_option_group(cutoffs_group)
output_options_group = OptionGroup(parser, 'Output Options')
output_options_group.add_option("--tax", dest="taxonomy", action='store_true', default=False,
help='add the taxonomy info [useful for refseq] ')
output_options_group.add_option("--remove_tax", dest="remove_taxonomy", action='store_true', default=False,
help='removes the taxonomy from product [useful for refseq] ')
output_options_group.add_option("--remove_ec", dest="remove_ec", action='store_true', default=False,
help='removes the EC number from product [useful for kegg/metacyc] ')
output_options_group.add_option( "--compact_output", dest="compact_output", action='store_true', default=False,
help='compact output [OPTIONAL]')
parser.add_option_group(output_options_group)
bitscore_params = OptionGroup(parser, 'Bit Score Parameters')
bitscore_params.add_option("--lambda", dest="Lambda", default=None, type='float',
help='lambda parameter to compute bit score [useful for BSR] ')
bitscore_params.add_option("--k", dest="k", default=None, type='float',
help='k parameter to compute bit score [useful for BSR] ')
parser.add_option_group(bitscore_params)
def check_arguments(opts, args):
if len(opts.input_blastout) == 0:
print "There sould be at least one blastoutput file"
return False
if len(opts.database_name) == 0:
print "There sould be at least one database name"
return False
if len(opts.database_map) == 0:
print "There sould be at least one database map file name"
return False
if len(opts.input_blastout) != len(opts.database_name) or len(opts.input_blastout) != len(opts.database_map) :
print "The number of database names, blastoutputs and database map file should be equal"
return False
if opts.refscore_file == None:
print "Must specify the refscore"
return False
return True
output = Queue()
def work(i, lines, output):
global outputBuf
print 'input size', len(lines)
for line in lines:
if line=="#":
continue
words = line.rstrip().split('\t')
if len(words) != 12:
continue
outputBuf[i].append(words[1])
print 'output size', len(outputBuf[i])
# if algorithm =='LAST':
# if not words[1] in outputBuf:
# # # return words[1]
# #
# # return 'X'
outputBuf = []
def create_query_dictionary(blastoutputfile, query_dictionary, algorithm, errorlogger= None ):
seq_beg_pattern = re.compile("^#")
global outputBuf
try:
blastoutfh = open( blastoutputfile,'r')
except:
print "ERROR : cannot open B/LAST output file " + blastoutputfile + " to parse "
return
num_procs = cpu_count()
inputBuf = [ [] for i in range(num_procs)]
outputBuf = [ [] for i in range(num_procs)]
MAX = 400000
TOTMAX = num_procs*MAX -1
try:
print 'creating dict'
count = 0
i = 0
for line in blastoutfh:
if count%100000==0:
print i
bucket = i%num_procs
inputBuf[bucket].append(line)
if count==TOTMAX:
print 'submitting'
processes = [Process(target = work, args=(i, inputBuf[i], output) ) for i in range(num_procs) ]
for p in processes:
p.start()
print 'join'
for p in processes:
p.join()
print 'joined'
for i in range(num_procs):
print i, outputBuf[i]
#print results
inputBuf = [ [] for i in range(num_procs)]
outputBuf = [ [] for i in range(num_procs)]
count = 0
count += 1
i += 1
#if not seq_beg_pattern.search(line):
#continue
# words = line.rstrip().split('\t')
# continue
# if len(words) != 12:
# continue
# if algorithm =='BLAST':
# if not words[1] in query_dictionary:
# query_dictionary[words[1]] = True
# if algorithm =='LAST':
# if not words[1] in query_dictionary:
# query_dictionary[words[1]] = True
# mergeOutputs(num_procs, outputBufs)
print 'done creating dict'
blastoutfh.close()
except:
print 'index', bucket
print traceback.print_exc(10)
eprintf("\nERROR : while reading B/LAST output file " + blastoutputfile + " to parse " +\
" : make sure B/LAST ing was done for the particular database")
if errorlogger:
errorlogger.write("\nERROR : while reading B/LAST output file %s to parse\n" %(blastoutputfile))
errorlogger.write(" : make sure B/LAST ing was done for the particular database\n")
pass
def create_dictionary(databasemapfile, annot_map, query_dictionary, errorlogger= None):
if not query_dictionary:
print "WARNING : empty query dictionary in parse B/LAST"
if errorlogger:
errologger.write("WARNING : empty query dictionary in parse B/LAST\n")
return
seq_beg_pattern = re.compile(">")
try:
dbmapfile = open( databasemapfile,'r')
except:
if errorlogger:
errologger.write("PARSE_BLAST\tERROR\tCannot open database map file %s\t Please check the file manuallyT\n" %(databasemapfile) )
exit_process("ERROR: Cannot open database map file %s\n" %(databasemapfile))
for line in dbmapfile:
if seq_beg_pattern.search(line):
words = line.rstrip().split()
name = words[0].replace('>','',1)
if not name in query_dictionary:
continue
words.pop(0)
if len(words)==0:
annotation = 'hypothetical protein'
else:
annotation = ' '.join(words)
annot_map[name] = annotation
dbmapfile.close()
if len(annot_map)==0:
if errorlogger:
errorlogger.write( "PARSE_BLAST\tERROR\tFile "+databasemapfile+ " seems to be empty!\tCreate datbasemap file\n")
errorlogger.write( "Try re-running after deleting file : %s\n" %(databasemapfile))
exit_process( "no anntations in file :" + databasemapfile)
class BlastOutputParser(object):
commentPATTERN = re.compile(r'^#')
commentLAST_VERSION_PATTERN = re.compile(r'^#.*LAST[\s]+version[\s]+\d+')
def create_refBitScores(self):
refscorefile = open(self.refscore_file,'r')
print 'refscoreing'
for line in refscorefile:
words =[ x.strip() for x in line.split('\t') ]
if len(words) == 2:
orfid = ShortenORFId(words[0])
try:
self.refBitScores[orfid]= int((self.Lambda*float(words[1]) - self.lnk )/self.ln2)
except:
self.refBitScores[orfid]= int(1)
print 'done'
refscorefile.close()
def __init__(self, dbname, blastoutput, database_mapfile, refscore_file, opts, errorlogger =None):
self.Size = 10000
self.dbname = dbname
self.ln2 = 0.69314718055994530941
self.lnk = math.log(opts.k)
self.Lambda = opts.Lambda
self.blastoutput = blastoutput
self.database_mapfile =database_mapfile
self.refscore_file = refscore_file
self.annot_map = {}
self.i=0
self.opts = opts
self.hits_counts = {}
self.data = {}
self.refscores = {}
self.refBitScores = {}
self.needToPermute = False;
self.MAX_READ_ERRORS_ALLOWED = 10
self.ERROR_COUNT = 0
self.STEP_NAME = 'PARSE_BLAST'
self.error_and_warning_logger = errorlogger
#print "trying to open blastoutput file " + blastoutput
query_dictionary = {}
create_query_dictionary(self.blastoutput, query_dictionary, self.opts.algorithm, errorlogger = errorlogger)
try:
self.blastoutputfile = open(self.blastoutput,'r')
except:
eprintf("\nERROR : cannot open B/LAST output file " + blastoutput + " to parse "+\
" : make sure \"B/LAST\"ing was done for the particular database" )
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : cannot open B/LAST output file %s %s to parse \n" +\
" : make sure \"B/LAST\"ing was done for "+\
"the particular database" %(blastoutput) )
exit_process( "Cannot open B/LAST output file " + blastoutput )
try:
self.create_refBitScores()
except:
print traceback.print_exc(10)
exit_process( "Error while reading from B/LAST refscore file " + self.refscore_file )
try:
create_dictionary(database_mapfile, self.annot_map, query_dictionary)
query_dictionary = {}
except AttributeError:
eprintf("Cannot read the map file for database : %s\n" % (dbname))
if errorlogger!= None:
errorlogger.write("PARSE_BLAST\tERROR\tCannot read the map file %s for database : %s\tDelete the formatted files for the database in the \"formatted\" folder\n" %(database_mapfile, dbname))
exit_process("Cannot read the map file for database " + dbname)
def setMaxErrorsLimit(self, max):
self.MAX_READ_ERRORS_ALLOWED = max
def setErrorAndWarningLogger(self, logger):
self.error_and_warning_logger = logger
def setSTEP_NAME(self, step_name):
self.STEP_NAME = step_name
def incErrorCount(self):
self.ERROR_COUNT += 1
def maxErrorsReached(self):
return (self.ERROR_COUNT > self.MAX_READ_ERRORS_ALLOWED)
def __iter__(self):
return self
def permuteForLAST(self, words):
try :
temp = copy(words)
words[0] = temp[6] # query
words[1] = temp[1] # target
words[2] = 100.0 # percent id
words[3] = temp[3] #aln length
words[6] = temp[2]
words[7] = int(temp[2]) + int(temp[3]) - 1
words[10] = 0.0 # evalue
words[11] = temp[0]
except:
eprintf("ERROR : Invalid B/LAST output file %s \n" % (self.blastoutput))
if self.error_and_warning_logger:
self.error_and_warning_logger.write("ERROR : Invalid B/LAST output file" %(self.blastoutput))
exit_process( "ERROR : Invalid B/LAST output file %s " % (self.blastoutput))
def refillBuffer(self):
i = 0
self.lines = []
line = True # self.blastoutputfile.readline()
while line and i < self.Size:
line=self.blastoutputfile.readline()
if self.commentPATTERN.match(line):
if self.commentLAST_VERSION_PATTERN.match(line) ==False:
self.needToPermute = True
continue
self.lines.append(line)
if not line:
break
i += 1
self.size = len(self.lines)
def next(self):
if self.i % self.Size ==0:
self.refillBuffer()
if self.i % self.Size < self.size:
words = [ x.strip() for x in self.lines[self.i % self.Size].rstrip().split('\t')]
if len(words) != 12:
self.i = self.i + 1
return None
'''shorten the ORF id'''
words[0] = ShortenORFId(words[0])
#if self.opts.algorithm =='LAST':
if self.needToPermute:
self.permuteForLAST(words)
if not words[0] in self.hits_counts:
self.hits_counts[words[0]] = 0
if self.hits_counts[words[0]] >= self.opts.limit:
self.i = self.i + 1
return None
if len(words) != 12 or not self.isWithinCutoffs(words, self.data, self.opts, self.annot_map, self.refBitScores):
self.i = self.i + 1
return None
self.hits_counts[words[0]] += 1
self.i = self.i + 1
try:
return self.data
except:
return None
else:
self.blastoutputfile.close()
raise StopIteration()
def isWithinCutoffs(self, words, data, cutoffs, annot_map, refbitscores):
try:
orfid = ShortORFId(words[0])
except:
orfid = words[0]
data['query'] = orfid
try:
data['target'] = words[1]
except:
data['target'] = 0
try:
data['q_length'] = int(words[7]) - int(words[6]) + 1
except:
data['q_length'] = 0
try:
data['bitscore'] = float(words[11])
except:
data['bitscore'] = 0
try:
data['bsr'] = float(words[11])/refbitscores[orfid]
except:
#print "words 0 " + str(refscores[words[0]])
#print "words 11 " + str( words[11])
data['bsr'] = 0
try:
data['expect'] = float(words[10])
except:
data['expect'] = 0
try:
data['aln_length'] = float(words[3])
except:
data['aln_length'] = 0
try:
data['identity'] = float(words[2])
except:
data['identity'] = 0
try:
data['product'] = annot_map[words[1]]
except:
eprintf("Sequence with name \"" + words[1] + "\" is not present in map file\n")
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Sequence with name %s is not present in map file " %(words[1] ))
self.incErrorCount()
if self.maxErrorsReached():
if self.error_and_warning_logger:
self.error_and_warning_logger.write("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
exit_process("Number of sequence absent in map file %s exceeds %d" %(self.blastoutput, self.ERROR_COUNT ))
data['product'] = 'hypothetical protein'
try:
m = re.search(r'(\d+[.]\d+[.]\d+[.]\d+)', data['product'])
if m != None:
data['ec'] = m.group(0)
else:
data['ec'] = ''
except:
data['ec'] = ''
if cutoffs.taxonomy:
try:
m = re.search(r'\[([^\[]+)\]', data['product'])
if m != None:
data['taxonomy'] = m.group(1)
else:
data['taxonomy'] = ''
except:
data['taxonomy'] = ''
if cutoffs.remove_taxonomy:
try:
data['product'] = re.sub(r'\[([^\[]+)\]','', data['product'])
except:
data['product'] = ''
if cutoffs.remove_ec:
try:
data['product'] = re.sub(r'\([Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\)', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.]\d+\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.]\d+[.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.]\d+[.-.-]\]', '', data['product'])
data['product'] = re.sub(r'\[[Ee][Ce][:]\d+[.-.-.-]\]', '', data['product'])
except:
data['product'] = ''
if data['q_length'] < cutoffs.min_length:
return False
if data['bitscore'] < cutoffs.min_score:
return False
if data['expect'] > cutoffs.max_evalue:
return False
if data['identity'] < cutoffs.min_identity:
return False
if data['bsr'] < cutoffs.min_bsr:
return False
#min_length'
#'min_score'
#'max_evalue'
# 'min_identity'
#'limit'
#'max_length'
#'min_query_coverage'
#'max_gaps'
#min_bsr'
return True
# compute the refscores
def process_blastoutput(dbname, blastoutput, mapfile, refscore_file, opts, errorlogger = None):
blastparser = BlastOutputParser(dbname, blastoutput, mapfile, refscore_file, opts, errorlogger = errorlogger)
blastparser.setMaxErrorsLimit(100)
blastparser.setErrorAndWarningLogger(errorlogger)
blastparser.setSTEP_NAME('PARSE BLAST')
fields = ['target','q_length', 'bitscore', 'bsr', 'expect', 'aln_length', 'identity', 'ec' ]
if opts.taxonomy:
fields.append('taxonomy')
fields.append('product')
output_blastoutput_parsed = opts.parsed_output
# temporary file is used to deal with incomplete processing of the file
output_blastoutput_parsed_tmp = output_blastoutput_parsed + ".tmp"
try:
outputfile = open(output_blastoutput_parsed_tmp, 'w')
except:
if errorlogger:
errorlogger.write("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
exit_process("PARSE_BLAST\tERROR\tCannot open temp file %s to sort\tfor reference db\n" %(soutput_blastoutput_parsed_tmp, dbname))
# write the headers out
fprintf(outputfile, "#%s",'query')
for field in fields:
fprintf(outputfile,"\t%s",field)
fprintf(outputfile, "\n")
pattern = re.compile(r'' + "(\d+_\d+)$")
count = 0;
uniques = {}
for data in blastparser:
if not data:
continue
try:
fprintf(outputfile, "%s",data['query'])
result = pattern.search(data['query'])
if result:
name = result.group(1)
uniques[name] =True
except:
print 'data is : ', data, '\n'
return count, len(uniques)
for field in fields:
fprintf(outputfile, "\t%s",data[field])
fprintf(outputfile, "\n")
count += 1
outputfile.close()
rename(output_blastoutput_parsed_tmp, output_blastoutput_parsed)
return count, len(uniques)
# the main function
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not check_arguments(opts, args):
print usage
sys.exit(0)
if errorlogger:
errorlogger.write("#STEP\tPARSE_BLAST\n")
if opts.Lambda == None or opts.k == None:
if opts.algorithm=='LAST':
opts.Lambda = 0.300471
opts.k = 0.103946
if opts.algorithm=='BLAST':
opts.Lambda = 0.267
opts.k = 0.0410
dictionary={}
priority = 5000;
priority1= 5500;
for dbname, blastoutput, mapfile in zip( opts.database_name, opts.input_blastout, opts.database_map):
temp_refscore = ""
temp_refscore = opts.refscore_file
if opts.parsed_output==None:
opts.parsed_output = blastoutput + ".parsed.txt"
count, unique_count = process_blastoutput(dbname, blastoutput, mapfile, temp_refscore, opts, errorlogger = errorlogger)
if runstatslogger:
runstatslogger.write("%s\tTotal Protein Annotations %s (%s)\t%s\n" %( str(priority), dbname, opts.algorithm, str(count)))
runstatslogger.write("%s\tNumber of ORFs with hits in %s (%s)\t%s\n" %( str(priority1), dbname, opts.algorithm, str(unique_count)))
def MetaPathways_parse_blast(argv, errorlogger = None, runstatslogger = None):
createParser()
main(argv, errorlogger = errorlogger, runstatslogger = runstatslogger)
return (0,'')
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
|
wholebiome/MetaPathways_Python_Koonkie.3.0
|
libs/python_scripts/MetaPathways_parse_blast_threaded.py
|
Python
|
mit
| 25,140
|
[
"BLAST"
] |
b0649ac3c16640d643cc166f8d601e2c1d0c524af2f11b8698488b47a72c0918
|
"""Initializer of parameters."""
import numpy as np
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : str
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if desc.endswith('weight'):
self._init_weight(desc, arr)
elif desc.endswith('bias'):
self._init_bias(desc, arr)
elif desc.endswith('gamma'):
self._init_gamma(desc, arr)
elif desc.endswith('beta'):
self._init_beta(desc, arr)
elif desc.endswith('mean'):
self._init_mean(desc, arr)
elif desc.endswith('var'):
self._init_var(desc, arr)
else:
self._init_default(desc, arr)
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_mean(self, _, arr):
arr[:] = 0.0
def _init_var(self, _, arr):
arr[:] = 1.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
'Unknown initialization pattern for %s. ' \
'Default initialization is now limited to '\
'"weight", "bias", "gamma" (1.0), and "beta" (0.0).' \
'Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern' % name)
class Xavier(Initializer):
""" "Xavier" initialization for weights
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(rnd_type=rnd_type,
factor_type=factor_type,
magnitude=magnitude)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, name, arr):
shape = arr.shape
hw_scale = 1.
if len(shape) < 2:
raise ValueError('Xavier initializer cannot be applied to vector {0}. It requires at'
' least 2D.'.format(name))
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
# Hack for mobilenet, because there is less connectivity
if "depthwise" in name:
factor = 3 * 3
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
arr[:] = np.random.uniform(-scale, scale, size=arr.shape)
else:
raise ValueError("Unknown random type")
|
ZihengJiang/nnvm
|
python/nnvm/testing/init.py
|
Python
|
apache-2.0
| 3,494
|
[
"Gaussian"
] |
8f1ba1e3dc78ad7bc8512f6264003a8d9f88b74446af5506fa59aa1e7a353877
|
import os
import sys
import logging
import datetime
import zlib
import base64
import copy
import socket
import struct
import random
from functools import partial
from time import time
import ujson as json
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from queryparser import Parser
def merge(src, dst):
if dst == None:
return src
if type(src) == dict and type(dst) == dict:
for k, v in src.iteritems():
if type(v) is dict and dst.has_key(k):
dst[k] = merge(v, dst[k])
elif type(v) is list and dst.has_key(k):
if len(v) == len(dst[k]):
for i, item in enumerate(v):
dst[k][i] = merge(item, dst[k][i])
else:
raise Exception("Cannot merge arrays of different length")
elif type(v) is int or type(v) is float and dst.has_key(k):
dst[k] += v
else:
dst[k] = v
elif type(src) == int or type(src) == float:
dst += src
else:
dst = src
return dst
TORNADO_ROUTE = "(.+)"
DEFAULT_USER = "default"
DEFAULT_ALERT_INTERVAL = 60
DEFAULT_ALERT_THROTTLE = 0
class BaseHandler(tornado.web.RequestHandler):
def initialize(self, conf,
loop=tornado.ioloop.IOLoop.current()):
self.io_loop = loop
self.client = AsyncHTTPClient(self.io_loop)
self.passthrough_node = "%s:%d" % (conf["fed"]["host"], conf["fed"]["port"])
def __init__(self, application, request, **kwargs):
super(BaseHandler, self).__init__(application, request, **kwargs)
def _bad_request(self, error):
self.set_status(400)
self.write(json.dumps({"error": error}))
self.finish()
def passthrough(self, **kwargs):
self.request.host = self.passthrough_node
self.request.uri = "/" + "/".join(self.request.uri.split("/")[2:])
uri = self.request.full_url()
req = HTTPRequest(uri,
method=self.request.method,
body=self.request.body,
headers=self.request.headers,
follow_redirects=False,
allow_nonstandard_methods=True
)
self.log.debug("Passing req through %r" % req.url)
self.client.fetch(req, self.passthrough_callback, raise_error=False)
def passthrough_callback(self, response):
if (response.error and not
isinstance(response.error, tornado.httpclient.HTTPError)):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = tornado.httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header not in ('Content-Length', 'Transfer-Encoding', 'Content-Encoding', 'Connection'):
self.add_header(header, v) # some header appear multiple times, eg 'Set-Cookie'
if response.body:
self.set_header('Content-Length', len(response.body))
self.write(response.body)
self.finish()
@tornado.web.asynchronous
def put(self, uri):
self.post(uri)
@tornado.web.asynchronous
def head(self, uri):
self.post(uri)
@tornado.web.asynchronous
def post(self, uri):
# Unless we explicitly want to intercept and federate, pass the req through
# to the first node listed in local_nodes conf
self.passthrough()
@tornado.web.asynchronous
def get(self, uri):
self.post(uri)
def _finish(self):
self.set_header("Content-Type", "application/json")
self.write(json.dumps(self.results))
self.finish()
class SearchHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
self.db = kwargs["db"]
del kwargs["db"]
super(SearchHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.search_handler")
self.parser = Parser()
self.ip_fields = frozenset(["srcip", "dstip", "ip"])
def initialize(self, *args, **kwargs):
super(SearchHandler, self).initialize(*args, **kwargs)
self.user = DEFAULT_USER
# Using the post() coroutine
def get(self, uri):
query_string = self.get_argument("q")
params = dict(
start=self.get_argument("start", None),
end=self.get_argument("end", None)
)
es_query, parsed = self.parser.parse(query_string, params)
self.log.debug("es_query: %r" % es_query)
self.request.parsed = parsed
self.request.es_query = es_query
self.request.raw_query = query_string
self.request.body = json.dumps(es_query)
return self.post(uri)
def compare_searches(self, curr):
# See if the current search is similar enough to the previous to call them related
prev = self.db.execute("SELECT * FROM transcript " +\
"WHERE action='SEARCH' ORDER BY id DESC LIMIT 1").fetchone()
if not prev:
return None
# Split the raw_query into whitespaced tokens and compare
prev["data"] = json.loads(prev["data"])
prev_tokens = set(prev["data"]["raw_query"].split())
curr_tokens = set(curr["raw_query"].split())
self.log.debug("prev: %r, curr: %r" % (prev_tokens, curr_tokens))
if len(curr_tokens - prev_tokens) <= 2:
return prev["id"]
return None
def fixup(self, body):
body = json.loads(body)
self.log.debug("body: %r" % body)
self.log.debug("parsed: %r" % self.request.parsed)
if body.has_key("hits"):
for hit in body["hits"]["hits"]:
hit["_source"]["orig_@timestamp"] = hit["_source"]["@timestamp"]
hit["_source"]["@timestamp"] = datetime.datetime.fromtimestamp(int(hit["_source"]["@timestamp"])/1000).isoformat()
if body.has_key("aggregations"):
for rawfield, buckethash in body["aggregations"].iteritems():
fields = rawfield.split(",")
ipfields = []
for i, field in enumerate(fields):
if field in self.ip_fields:
ipfields.append(i)
self.log.debug("rawfield: %s, ipfields: %r" % (rawfield, ipfields))
for bucket in buckethash["buckets"]:
if bucket.has_key("key_as_string"):
values = [ bucket["key_as_string"] ]
else:
values = str(bucket["key"]).split("\t")
newvalues = []
for i, value in enumerate(values):
if i in ipfields and "." not in value:
newvalues.append(socket.inet_ntoa(struct.pack("!I", int(value))))
else:
newvalues.append(value)
bucket["keys"] = newvalues
bucket["key"] = "-".join(newvalues)
# Build desc
desc = self.request.es_query["query"]["bool"]["must"][0]["query"]["query_string"]["query"]
if self.request.parsed.has_key("groupby"):
desc += " (" + ",".join(self.request.parsed["groupby"][1:]) + ")"
desc = "[%d] " % body.get("hits", {}).get("total", 0) + desc
body = {
"results": body,
"query": self.request.parsed,
"raw_query": self.request.raw_query,
"es_query": self.request.es_query,
"description": desc
}
return body
def record(self, body):
data = {
"raw_query": self.request.raw_query,
"query": self.request.parsed,
"es_query": self.request.es_query
}
scope_id = self.get_argument("scope_id", None)
if scope_id:
scope_id = int(scope_id)
body["scope_id"] = scope_id
ref_id = self.get_argument("ref_id", None)
if ref_id:
body["ref_id"] = data["ref_id"] = int(ref_id)
elif scope_id:
body["ref_id"] = data["ref_id"] = int(scope_id)
else:
body["ref_id"] = data["ref_id"] = self.compare_searches(data)
self.log.debug("ref_id: %r" % body["ref_id"])
row = self.db.execute("SELECT data FROM transcript WHERE id=?",
(body["ref_id"],)).fetchone()
# if row:
# self.log.debug("row: %r" % row)
# body["referenced_search_description"] = json.loads(row["data"])["raw_query"]
if not scope_id:
scope_id = self.db.execute("SELECT id FROM scopes WHERE scope=?",
("default",)).fetchone()["id"]
# Log to results
body["results_id"] = self.db.execute("INSERT INTO results (user_id, results, timestamp) " +\
"VALUES ((SELECT id FROM users WHERE user=?),?,?)",
(DEFAULT_USER, base64.encodestring(zlib.compress(json.dumps(body))), time())).lastrowid
# id = self.db.execute("SELECT id FROM results " +\
# "WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
# "ORDER BY id DESC LIMIT 1", (self.user,)).fetchone()
# body["results_id"] = id["id"]
body["transcript_id"] = self.db.execute("INSERT INTO transcript (user_id, action, data, description, " + \
"ref_id, scope_id, results_id, timestamp) " +\
"VALUES ((SELECT id FROM users WHERE user=?),?,?,?,?,?,?,?)",
(self.user, "SEARCH", json.dumps(data), body["description"],
body["ref_id"], scope_id, id["id"], time())).lastrowid
# newid = self.db.execute("SELECT id FROM transcript " +\
# "ORDER BY timestamp DESC LIMIT 1").fetchone()
# body["transcript_id"] = newid["id"]
return body
@tornado.web.gen.coroutine
def post(self, uri):
# Unless we explicitly want to intercept and federate, pass the req through
# to the first node listed in local_nodes conf
# query = self.request.get_argument("q", default=None)
# if not query:
# return self._bad_request("No q param given for query.")
self.request.host = self.passthrough_node
self.request.uri = "/es/_search"
uri = self.request.full_url()
req = HTTPRequest(uri,
method=self.request.method,
body=self.request.body,
headers=self.request.headers,
follow_redirects=False,
allow_nonstandard_methods=True
)
self.log.debug("Passing req through %r" % req.url)
response = yield self.client.fetch(req, raise_error=False)
self.log.debug("got response: %r" % response)
if (response.error and not
isinstance(response.error, tornado.httpclient.HTTPError)):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = tornado.httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header not in ('Content-Length', 'Transfer-Encoding', 'Content-Encoding', 'Connection'):
self.add_header(header, v) # some header appear multiple times, eg 'Set-Cookie'
if response.body:
# Apply any last minute field translations
fixedup_body = self.fixup(response.body)
fixedup_body = self.record(fixedup_body)
fixedup_body = json.dumps(fixedup_body)
self.set_header('Content-Length', len(fixedup_body))
self.write(fixedup_body)
self.finish()
class BaseWebHandler(tornado.web.RequestHandler):
def __init__(self, *args, **kwargs):
super(BaseWebHandler, self).__init__(*args, **kwargs)
def initialize(self, *args, **kwargs):
super(BaseWebHandler, self).initialize()
self.log = logging.getLogger("galaxy.web.handler")
self.user = DEFAULT_USER
self.set_status(200)
self.set_header("Content-Type", "application/javascript")
class IndexHandler(BaseWebHandler):
def initialize(self, filename, mimetype="text/html"):
super(IndexHandler, self).initialize()
self.filename = filename
self.mimetype = mimetype
def get(self):
self.set_header("Content-Type", self.mimetype)
self.write(open(self.filename).read())
class StaticHandler(BaseWebHandler):
def __init__(self, *args, **kwargs):
super(StaticHandler, self).__init__(*args, **kwargs)
self.mimemap = {
"css": "text/css",
"html": "text/html",
"js": "application/javascript",
"map": "application/javascript",
"png": "image/png",
"woff": "application/octet-stream",
"woff2": "application/octet-stream",
"jpg": "image/jpeg"
}
def initialize(self, path, mimetype="application/javascript"):
super(StaticHandler, self).initialize()
self.content_dir = path
self.mimetype = mimetype
def get(self, path):
extension = path.split(".")[-1]
self.mimetype = self.mimemap[extension]
self.set_header("Content-Type", self.mimetype)
try:
self.write(open(self.content_dir + "/" + path).read())
except IOError:
self.set_status(404)
self.set_header("Content-Type", "text/plain")
self.write("Not found")
class BackgroundHandler(StaticHandler):
def __init__(self, *args, **kwargs):
super(BackgroundHandler, self).__init__(*args, **kwargs)
def initialize(self, *args, **kwargs):
#super(BackgroundHandler, self).initialize(*args, **kwargs)
self.backgrounds = kwargs["backgrounds"]
def get(self):
id = int(self.get_argument("t", 0))
background = self.backgrounds[ id % len(self.backgrounds) ]
print background
extension = background.split(".")[-1]
self.mimetype = self.mimemap[extension]
self.set_header("Content-Type", self.mimetype)
self.set_header("Cache-Control", "no-cache")
self.write(open(background).read())
class TranscriptHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(TranscriptHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.transcript_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(TranscriptHandler, self).initialize(*args, **kwargs)
def get(self):
user = DEFAULT_USER
req_id = self.get_argument("id", None)
if req_id:
needed_row = self.db.execute(
"SELECT a.*, b.description AS referenced_search_description, " +\
"c.scope, c.category, c.search AS scope_search " +\
"FROM transcript AS a " +\
"LEFT JOIN transcript AS b ON a.ref_id=b.id " +\
"LEFT JOIN scopes AS c ON a.scope_id=c.id " +\
"WHERE a.user_id=(SELECT id FROM users WHERE user=?) AND a.id=?",
(user, req_id)).fetchone()
self.write(json.dumps(needed_row))
return
limit = self.get_argument("limit", 50)
self.set_status(200)
self.set_header("Content-Type", "application/javascript")
rows = self.db.execute(
"SELECT a.*, b.description AS referenced_search_description, " +\
"c.scope, c.category, c.search AS scope_search " +\
"FROM transcript AS a " +\
"LEFT JOIN transcript AS b ON a.ref_id=b.id " +\
"LEFT JOIN scopes AS c ON a.scope_id=c.id " +\
"WHERE a.user_id=(SELECT id FROM users WHERE user=?) AND a.visible=1 " +\
"ORDER BY a.id DESC LIMIT ?", (user, limit)).fetchall()
self.write(json.dumps(rows))
def put(self):
user = DEFAULT_USER
action = self.get_argument("action")
rawdata = self.get_argument("data", None)
if rawdata:
try:
data = json.loads(rawdata)
except Exception as e:
self.log.exception("Error parsing JSON from %s" % data, exc_info=e)
self.set_status(400)
self.write("data param must be in JSON format")
return
description = self.get_argument("description", None)
results_id = self.get_argument("results_id", None)
ref_id = self.get_argument("ref_id", None)
scope_id = self.get_argument("scope_id", None)
self.log.debug("user: %s, action: %s, data: %s, description: %s, results_id: %s" %\
(user, action, rawdata, description, results_id))
if not scope_id and action != "SCOPE":
# Get default scope ID
scope_id = self.db.execute("SELECT id FROM scopes " +\
"WHERE user_id=(SELECT id FROM users where user=?) AND scope=?",
(user, "default")).fetchone()["id"]
user_id = self.db.execute("SELECT id FROM users WHERE user=?", (user,)).fetchone()["id"]
if action == "TAG":
tag = data["tag"]
value = data["value"]
if not self.db.execute("INSERT INTO tags (user_id, tag, value, timestamp) " +\
"VALUES (?,?,?,?)",
(user_id, tag, value, time())).rowcount:
self.set_status(400)
self.write("Error tagging value")
return
self.log.debug("New tag %d %s=%s" % (user_id, tag, value))
elif action == "FAVORITE":
value = data["value"]
if not self.db.execute("INSERT INTO favorites (user_id, value, timestamp) " +\
"VALUES (?,?,?)",
(user_id, value, time())).rowcount:
self.set_status(400)
self.write("Error setting favorite value")
return
self.log.debug("New favorite %d %s" % (user_id, value))
elif action == "NOTE":
note = data["note"]
value = data["value"]
if not self.db.execute("INSERT INTO notes (user_id, note, value, timestamp) " +\
"VALUES (?,?,?,?)",
(user_id, note, value, time())).rowcount:
self.set_status(400)
self.write("Error setting favorite value")
return
self.log.debug("New favorite %d %s" % (user_id, value))
elif action == "SCOPE" and not scope_id:
value = data["value"]
description = data.get("description", None)
search = data.get("search", None)
category = data.get("category", None)
scope_id = self.db.execute("SELECT * FROM scopes WHERE user_id=? AND scope=?",
(user_id, value)).fetchone()
if scope_id:
scope_id = scope_id.get("id")
self.log.debug("found scope_id: %s" % scope_id)
if not scope_id:
scope_id = self.db.execute("INSERT INTO scopes (user_id, scope, " +\
"category, search, created) " +\
"VALUES (?,?,?,?,?)",
(user_id, value, category, search, time())).lastrowid
self.log.debug("New scope %d %d %s" % (scope_id, user_id, value))
if results_id:
self.db.execute("INSERT INTO transcript (user_id, action, data, " +\
"description, ref_id, scope_id, results_id, timestamp) " +\
"VALUES ((SELECT id FROM users WHERE user=?),?,?,?,?,?,?,?)",
(user, action, rawdata, description, ref_id, scope_id, results_id, time()))
else:
self.db.execute("INSERT INTO transcript (user_id, action, data, " +\
"description, ref_id, scope_id, timestamp) VALUES " + \
"((SELECT id FROM users WHERE user=?),?,?,?,?,?,?)",
(user, action, rawdata, description, ref_id, scope_id, time()))
transcript_row = self.db.execute("SELECT * FROM transcript " +\
"ORDER BY id DESC LIMIT 1").fetchone()
self.set_status(200)
self.set_header("Content-Type", "application/javascript")
self.write(transcript_row)
def post(self):
user = DEFAULT_USER
action = self.get_argument("action")
id = self.get_argument("id")
self.log.debug("user: %s, action: %s, id: %s" % (user, action, id))
if action == "HIDE":
changed = self.db.execute("UPDATE transcript SET visible=0 " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND id=?", (user, id)).rowcount
if not changed:
self.set_status(400)
self.write("Bad request, unknown user or id")
return
else:
self.set_status(400)
self.write("Bad request, unknown action")
return
self.set_status(200)
self.set_header("Content-Type", "application/javascript")
self.write({"action": action, "id": id, "status": "ok"})
class SearchResultsHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(SearchResultsHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.search_result_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(SearchResultsHandler, self).initialize(*args, **kwargs)
def get(self, id):
user = DEFAULT_USER
try:
id = int(id)
except Exception as e:
self.log.exception("Failed to parse id", exc_info=e)
self.set_status(400)
self.write("Invalid id")
self.finish()
return
result = self.db.execute("SELECT * FROM results " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) AND id=?",
(user, id)).fetchone()
if not result:
self.set_status(404)
self.finish()
return
# ret = {
# "id": result["id"],
# "timestamp": result["timestamp"],
# "results": json.loads(zlib.decompress(base64.decodestring(result["results"])))
# }
self.set_status(200)
self.set_header("Content-Type", "application/javascript")
self.write(zlib.decompress(base64.decodestring(result["results"])))
# self.write(json.dumps(ret))
class TagsHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(TagsHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.tags_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(TagsHandler, self).initialize(*args, **kwargs)
def get(self):
limit = self.get_argument("limit", 50)
self.write(json.dumps(self.db.execute("SELECT * FROM tags " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"ORDER BY timestamp DESC LIMIT ?", (self.user, limit)).fetchall()))
def delete(self):
tag = self.get_argument("tag")
value = self.get_argument("value")
self.write(json.dumps({"ok": self.db.execute("DELETE FROM tags " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND tag=? AND value=?", (self.user, tag, value)).rowcount}))
class FavoritesHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(FavoritesHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.favorites_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(FavoritesHandler, self).initialize(*args, **kwargs)
def get(self):
limit = self.get_argument("limit", 50)
self.write(json.dumps(self.db.execute("SELECT * FROM favorites " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"ORDER BY timestamp DESC LIMIT ?", (self.user, limit)).fetchall()))
def delete(self):
value = self.get_argument("value")
self.write(json.dumps({"ok": self.db.execute("DELETE FROM favorites " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND value=?", (self.user, value)).rowcount}))
class ScopesHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(ScopesHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.scopes_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(ScopesHandler, self).initialize(*args, **kwargs)
def get(self):
limit = self.get_argument("limit", 50)
rows = self.db.execute("SELECT * FROM scopes " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"ORDER BY created DESC LIMIT ?", (self.user, limit)).fetchall()
ret = {}
for row in rows:
if not ret.has_key(row["category"]):
ret[ row["category"] ] = {}
ret[ row["category"] ][ row["scope"] ] = row["search"]
self.log.debug('ret: %r' % ret)
self.write(json.dumps(ret))
def delete(self):
value = self.get_argument("value")
self.write(json.dumps({"ok": self.db.execute("DELETE FROM favorites " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND value=?", (self.user, value)).rowcount}))
class AlertGetterHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(AlertGetterHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.alert_getter_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(AlertGetterHandler, self).initialize(*args, **kwargs)
def get(self):
limit = self.get_argument("limit", 50)
offset = self.get_argument("offset", 0)
rows = self.db.execute("SELECT * FROM alerts " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"ORDER BY created DESC LIMIT ?,?", (self.user, offset, limit)).fetchall()
self.write(json.dumps(rows))
def put(self):
# params = json.loads(self.request.body)
# query = params["query"]
# title = params["title"]
query = self.get_argument("query")
title = self.get_argument("title")
interval = self.get_argument("interval", DEFAULT_ALERT_INTERVAL)
throttle = self.get_argument("throttle", DEFAULT_ALERT_THROTTLE)
id = self.db.execute("INSERT INTO alerts (user_id, title, query, created, interval, throttle) " +\
"VALUES((SELECT id FROM users WHERE user=?),?,?,?,?,?)",
(self.user, title, query, time(), interval, throttle)).lastrowid
self.write(json.dumps(
self.db.execute("SELECT * FROM alerts WHERE id=?", (id,)).fetchone()
))
class AlertManagementHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(AlertManagementHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.alert_management_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(AlertManagementHandler, self).initialize(*args, **kwargs)
def _prepare(self, args):
self.log.debug("type args: %s" % type(args))
self.log.debug("args: %r" % args)
self.id = int(args[0])
if len(args) > 1:
self.field = args[1]
def get(self, *args):
self._prepare(list(args))
self.write(json.dumps(
self.db.execute("SELECT * FROM alerts WHERE user_id=" +\
"(SELECT id FROM users WHERE user=?) and id=?",
(self.user, self.id)).fetchone()
))
def delete(self, *args):
self._prepare(list(args))
self.write(json.dumps(
{
"ok": self.db.execute("DELETE FROM alerts " +\
"WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND id=?", (self.user, self.id)).rowcount
}
))
def post(self, *args):
self._prepare(list(args))
if not self.field or \
self.field not in ["throttle", "active", "title", "query", "interval"]:
self._bad_request("Invalid field")
return;
value = self.get_argument("value")
if not value:
self._bad_request("No value.")
return
if self.field in ["throttle", "active", "interval"]:
try:
value = int(value)
except:
self._bad_request("Invalid value, must be numeric.")
return
#params = json.loads(self.request.body)
self.db.execute(("UPDATE alerts SET %s=?, updated=? WHERE id=? AND user_id=" +\
"(SELECT id FROM users WHERE user=?)") % self.field, (value, time(), self.id, self.user))
self.write(json.dumps(
self.db.execute("SELECT * FROM alerts WHERE id=? AND user_id=" +\
"(SELECT id FROM users WHERE user=?)", (self.id, self.user)).fetchone()
))
class NotificationsHandler(BaseWebHandler):
def __init__(self, application, request, **kwargs):
super(NotificationsHandler, self).__init__(application, request, **kwargs)
self.log = logging.getLogger("galaxy.notifications_handler")
self.db = kwargs["db"]
def initialize(self, *args, **kwargs):
super(NotificationsHandler, self).initialize(*args, **kwargs)
def get(self):
limit = self.get_argument("limit", 50)
inactive = self.get_argument("all", None)
clause = "t1.active=1"
if inactive:
clause = "1=1"
query = ("SELECT t1.id AS id, t1.type, t1.message, t1.timestamp AS timestamp, " +\
"t2.results_id, t3.title, t3.query FROM notifications t1 " +\
"JOIN alert_results t2 ON t1.alert_results_id=t2.id " +\
"JOIN alerts t3 ON t2.alert_id=t3.id " +\
"WHERE %s AND t1.user_id=(SELECT id FROM users WHERE user=?) " +\
"ORDER BY timestamp DESC LIMIT ?") % clause
self.write(json.dumps(self.db.execute(query, (self.user, limit)).fetchall()))
def delete(self):
id = int(self.get_argument("id"))
self.write(json.dumps({"ok": self.db.execute("UPDATE notifications " +\
"SET active=0 WHERE user_id=(SELECT id FROM users WHERE user=?) " +\
"AND id=?", (self.user, id)).rowcount}))
|
mcholste/galaxy
|
lib/handlers.py
|
Python
|
mit
| 26,452
|
[
"Galaxy"
] |
cf8a198a3ef7fb8f561781a79c3f328e2fedeae390fa16b45a969c31c2aabf2e
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Small test configuration, verifying that training loop runs without errors."""
import ml_collections
def config_dict(**kwargs):
return ml_collections.ConfigDict(initial_dictionary=kwargs)
def get_config():
return config_dict(
seed=0,
dataset=config_dict(
name='MockCIFAR10',
args=config_dict(
class_conditional=False,
randflip=True,
),
),
model=config_dict(
# architecture, see main.py and model.py
name='unet0',
args=config_dict(
ch=4,
out_ch=3,
ch_mult=[1, 2],
num_res_blocks=1,
attn_resolutions=[4],
num_heads=1,
dropout=0.1,
model_output='logistic_pars', # logits or logistic_pars
),
# diffusion betas, see diffusion_categorical.get_diffusion_betas
diffusion_betas=config_dict(
type='linear',
# start, stop only relevant for linear, power, jsdtrunc schedules.
start=1.e-4, # 1e-4 gauss, 0.02 uniform
stop=0.02, # 0.02, gauss, 1. uniform
num_timesteps=10,
),
# Settings used in diffusion_categorical.py
model_prediction='x_start', # 'x_start','xprev'
# 'gaussian','uniform','absorbing'
transition_mat_type='gaussian',
transition_bands=None,
loss_type='hybrid', # kl,cross_entropy_x_start, hybrid
hybrid_coeff=0.001, # only used for hybrid loss type.
),
train=config_dict(
# optimizer
batch_size=2,
optimizer='adam',
learning_rate=2e-4,
learning_rate_warmup_steps=0,
weight_decay=0.0,
ema_decay=0.9999,
grad_clip=1.0,
substeps=2,
num_train_steps=20, # multiple of substeps
# logging
log_loss_every_steps=10,
checkpoint_every_secs=1,
retain_checkpoint_every_steps=10,
eval_every_steps=10,
))
|
google-research/google-research
|
d3pm/images/main_test_config.py
|
Python
|
apache-2.0
| 2,693
|
[
"Gaussian"
] |
9e83548be97a0c04cc4627463b14274b502ba5da6728bd230172e85ff4cbc906
|
# This file creates 1D visualizations for our model
# Can create channel or neuron visualizwtions
import numpy as np
import lucid.optvis.objectives as objectives
import lucid.optvis.render as render
import dla_lucid
from dla_lucid import DLA
def vis_channel(model, layer, channel_n):
"""
This function creates a visualization for a single channel in a layer
:param model: model we are visualizing
:type model: lucid.modelzoo
:param layer: the name of the layer we are visualizing
:type layer: string
:param channel_n: The channel number in the layer we are optimizing for
:type channel_n: int
:return: array of pixel values for the visualization
"""
print('Getting vis for ' + layer + ', channel ' + str(channel_n))
l_name = dla_lucid.LAYERS[layer][0]
obj = objectives.channel(l_name, channel_n)
imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D,
thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False)
imgs_array = np.array(imgs)
imgs_reshaped = imgs_array.reshape(400)
return imgs_reshaped
def vis_neuron(model, layer, channel_n):
"""
This function creates a visualization for a single neuron in a layer
The neuron objective defaults to the center neuron in the channel
:param model: model we are visualizing
:type model: lucid.modelzoo
:param layer: the name of the layer we are visualizing
:type layer: string
:param channel_n: The channel number in the layer we are optimizing for
:type channel_n: int
:return: array of pixel values for the visualization
"""
print('getting vis for ' + layer + ', channel ' + str(channel_n))
l_name = dla_lucid.LAYERS[layer][0]
obj = objectives.neuron(l_name, channel_n)
imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D,
thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False)
imgs_array = np.array(imgs)
imgs_reshaped = imgs_array.reshape(400)
return imgs_reshaped
def vis_layer(model, layer, channel):
"""
This function creates visualizations for an entire layer
:param model: model we are visualization
:type model" lucid.modelzoo
:param layer: the name of the layer we are optimizing for
:type layer: string
:param channel: True for creating channel vis, False for creating neuron vis
:type channel: boolean
:return: array of all pixel values in the layers visualizations
"""
num_channels = dla_lucid.LAYERS[layer][1]
all_vis = []
for i in range(num_channels):
if channel is True:
vis = vis_channel(model, layer, i)
else:
vis = vis_neuron(model, layer, i)
all_vis.append(vis)
all_vis_array = np.array(all_vis)
return all_vis_array
def save_layer(model, layer, path, channel):
"""
This function calles vis_layer() to create layer visualizations,
and then saves to a folder
:param model: model we are optimizing for
:type model: lucid.modelzoo
:param layer: the name of the layer we are visualizing
:type layer: string
:param path: path to save visualizations too, must already exist
:type path: string
:param channel: True for creating channel vis, False for creating neuron vis
:type channel: boolean
:return: nothing
"""
# If channel is true, create channel vis
# else create neuron vis
vis_array = vis_layer(model, layer, channel)
outfile = path + layer
np.save(outfile, vis_array)
def main():
model = DLA()
# Neuron Vis of the model
save_layer(model, 'conv1', 'data/', False)
save_layer(model, 'conv1_relu', 'data/', False)
save_layer(model, 'pool1', 'data/', False)
save_layer(model, 'conv2', 'data/', False)
save_layer(model, 'conv2_relu', 'data/', False)
save_layer(model, 'pool2', 'data/', False)
save_layer(model, 'conv3', 'data/', False)
save_layer(model, 'conv3_relu', 'data/', False)
save_layer(model, 'pool3', 'data/', False)
# Channel Vis of the model
save_layer(model, 'conv1', 'data/', True)
save_layer(model, 'conv1_relu', 'data/', True)
save_layer(model, 'pool1', 'data/', True)
save_layer(model, 'conv2', 'data/', True)
save_layer(model, 'conv2_relu', 'data/', True)
save_layer(model, 'pool2', 'data/', True)
save_layer(model, 'conv3', 'data/', True)
save_layer(model, 'conv3_relu', 'data/', True)
save_layer(model, 'pool3', 'data/', True)
if __name__ == "__main__":
main()
|
davidparks21/qso_lya_detection_pipeline
|
lucid_work/vis_1d.py
|
Python
|
mit
| 4,602
|
[
"NEURON"
] |
b9af82313338f551766bb63892ee60c50444363a99c6098689120d60e98a054c
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
import logging
logger = logging.getLogger('remindor_qt')
import gettext
from gettext import gettext as _
gettext.textdomain('remindor-common')
from remindor_qt import helpers
from remindor_qt.CommandDialog import CommandDialog
from remindor_qt.DateDialog import DateDialog
from remindor_qt.TimeDialog import TimeDialog
from remindor_qt.remindor_qtconfig import get_data_file
from remindor_common.helpers import ReminderDialogInfo, insert_values, valid_date, valid_time
from remindor_common import datetimeutil, database as db
class ReminderDialog(QDialog):
added = Signal(int)
delete_id = -1
def __init__(self, parent = None):
super(ReminderDialog, self).__init__(parent)
helpers.setup_ui(self, "ReminderDialog.ui")
self.help_button = self.findChild(QPushButton, "help_button")
self.cancel_button = self.findChild(QPushButton, "cancel_button")
self.add_button = self.findChild(QPushButton, "add_button")
self.save_button = self.findChild(QPushButton, "save_button")
self.save_button.hide()
self.tabs = self.findChild(QTabWidget, "tabs")
self.label_label = self.findChild(QLabel, "label_label")
self.time_label = self.findChild(QLabel, "time_label")
self.date_label = self.findChild(QLabel, "date_label")
self.command_label = self.findChild(QLabel, "command_label")
self.notes_label = self.findChild(QLabel, "notes_label")
self.label_edit = self.findChild(QLineEdit, "label_edit")
self.time_edit = self.findChild(QLineEdit, "time_edit")
self.date_edit = self.findChild(QLineEdit, "date_edit")
self.command_edit = self.findChild(QLineEdit, "command_edit")
self.notes_edit = self.findChild(QPlainTextEdit, "notes_edit")
self.time_button = self.findChild(QPushButton, "time_button")
self.date_button = self.findChild(QPushButton, "date_button")
self.command_button = self.findChild(QPushButton, "command_button")
self.insert_button = self.findChild(QPushButton, "notes_button")
self.time_error = self.findChild(QToolButton, "time_error")
self.time_error.hide()
self.date_error = self.findChild(QToolButton, "date_error")
self.date_error.hide()
self.popup_check = self.findChild(QCheckBox, "popup_check")
self.dialog_check = self.findChild(QCheckBox, "dialog_check")
self.boxcar_check = self.findChild(QCheckBox, "boxcar_check")
self.boxcar_label = self.findChild(QLabel, "boxcar_label")
self.boxcar_label.hide()
self.pushbullet_device_label = self.findChild(QLabel, 'pushbullet_device_label')
self.pushbullet_device_edit = self.findChild(QComboBox, 'pushbullet_device_edit')
self.pushbullet_info_label = self.findChild(QLabel, 'pushbullet_info_label')
self.pushbullet_refresh = self.findChild(QPushButton, 'pushbullet_refresh')
self.sound_label = self.findChild(QLabel, "sound_label")
self.file_label = self.findChild(QLabel, "file_label")
self.length_label = self.findChild(QLabel, "length_label")
self.loop_label = self.findChild(QLabel, "loop_label")
self.length_label2 = self.findChild(QLabel, "length_label2")
self.sound_check = self.findChild(QCheckBox, "sound_check")
self.file_edit = self.findChild(QLineEdit, "file_edit")
self.length_spin = self.findChild(QSpinBox, "length_spin")
self.loop_check = self.findChild(QCheckBox, "loop_check")
self.insert_combo = self.findChild(QComboBox, "insert_combo");
self.info = ReminderDialogInfo(helpers.database_file())
self.set_data(self.info.label, self.info.time, self.info.date, self.info.command,
self.info.notes, self.info.popup, self.info.dialog, self.info.boxcar,
self.info.pushbullet_device, self.info.sound_file,
self.info.sound_length, self.info.sound_loop)
self.translate()
def translate(self):
self.setWindowTitle(_("Add Reminder"))
self.help_button.setText(_("Help"))
self.cancel_button.setText(_("Cancel"))
self.add_button.setText(_("Add"))
self.save_button.setText(_("Save"))
inserts = [
_("Date"),
_("Month"),
_("Month Name"),
_("Day"),
_("Day Name"),
_("Day of Year"),
_("Year"),
_("Time"),
_("Hour (24)"),
_("Hour (12)"),
_("Minutes"),
_("Seconds"),
_("Microseconds"),
_("Sound File/Path"),
_("Sound File"),
_("Command")
]
self.insert_combo.clear()
self.insert_combo.addItems(inserts)
self.tabs.setTabText(0, _("Reminder"))
self.label_label.setText(_("Label"))
self.time_label.setText(_("Time"))
self.date_label.setText(_("Date"))
self.command_label.setText(_("Command"))
self.notes_label.setText(_("Notes:"))
self.time_button.setText(_("Edit"))
self.date_button.setText(_("Edit"))
self.command_button.setText(_("Edit"))
self.insert_button.setText(_("Insert"))
self.tabs.setTabText(1, _("Notification"))
self.popup_check.setText(_("Popup"))
self.dialog_check.setText(_("Dialog Box"))
#self.boxcar_check #doesn't need translated
self.boxcar_label.setText(_("Boxcar has not been\nsetup in Preferences"))
self.pushbullet_device_label.setText(_('Pushbullet Device'))
self.pushbullet_refresh.setText(_('Refresh'))
self.pushbullet_info_label.setText(_('Pushbullet has not been\nsetup in Preferences'))
self.tabs.setTabText(3, _("Sound"))
self.sound_label.setText(_("Play Sound"))
self.file_label.setText(_("Sound File"))
self.length_label.setText(_("Play Length"))
self.loop_label.setText(_("Loop"))
self.length_label2.setText(_("s (0 for end)"))
@Slot()
def on_add_button_pressed(self):
label = self.label_edit.text()
time = self.time_edit.text()
date = self.date_edit.text()
command = self.command_edit.text()
notes = self.notes_edit.document().toPlainText()
popup = self.popup_check.isChecked()
dialog = self.dialog_check.isChecked()
boxcar = self.boxcar_check.isChecked()
pushbullet_device = self.info.get_pushbullet_id(self.pushbullet_device_edit.currentIndex(), self.info.pushbullet_devices)
play = self.sound_check.isChecked()
sound_file = self.file_edit.text()
sound_length = self.length_spin.value()
sound_loop = self.loop_check.isChecked()
(status, id) = self.info.reminder(label, time, date, command, notes, popup, dialog,
boxcar, play, sound_file, sound_length, sound_loop,
pushbullet_device, self.delete_id, True)
if status == self.info.ok:
self.added.emit(id)
self.accept()
else:
if status == self.info.file_error:
title = _("File does not exist")
message = ""
if sound_file != "":
message = "%s\n\n%s" % (_("The following file does not exist.\nPlease choose another sound file."), sound_file)
else:
message = _("Please choose a sound file.")
QMessageBox.warning(self, title, message)
elif status == self.info.time_error:
self.time_error.show()
self.time_edit.setFocus()
elif status == self.info.date_error:
self.date_error.show()
self.date_edit.setFocus()
elif status == self.info.notify_warn:
title = _("Empty Notifications")
message = _("The label and notes for this reminder are empty,\nwould you still like to use a notification?")
ans = QMessageBox.question(self, title, message, QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if ans == QMessageBox.Yes:
(status, id) = self.info.reminder(label, time, date, command, notes,
popup, dialog, boxcar, play,
sound_file, sound_length, sound_loop,
pushbullet_device, self.delete_id)
#already checked the status (boxcar is the last check)
self.added.emit(id)
self.accept()
@Slot()
def on_cancel_button_pressed(self):
self.reject()
@Slot()
def on_help_button_pressed(self):
helpers.show_html_help("add")
@Slot()
def on_time_button_pressed(self):
simple_time = datetimeutil.str_time_simplify(self.time_edit.text())
fixed_time = datetimeutil.fix_time_format(simple_time, self.info.time_format)
dialog = TimeDialog(fixed_time, self)
dialog.update.connect(self.time_updated)
dialog.exec_()
@Slot()
def time_updated(self, time_s):
self.time_edit.setText(time_s)
@Slot()
def on_time_edit_textEdited(self):
if valid_time(self.time_edit.text()):
self.time_error.hide()
else:
self.time_error.show()
@Slot()
def on_date_button_pressed(self):
simple_date = datetimeutil.str_date_simplify(self.date_edit.text(), self.info.date_format)
fixed_date = datetimeutil.fix_date_format(simple_date, self.info.date_format)
dialog = DateDialog(fixed_date, self)
dialog.update.connect(self.date_updated)
dialog.exec_()
@Slot()
def date_updated(self, date_s):
self.date_edit.setText(date_s)
@Slot()
def on_date_edit_textEdited(self):
if valid_date(self.date_edit.text(), self.info.date_format):
self.date_error.hide()
else:
self.date_error.show()
@Slot()
def on_command_button_pressed(self):
dialog = CommandDialog(self.command_edit.text(), self)
dialog.update.connect(self.command_updated)
dialog.exec_()
@Slot()
def command_updated(self, command):
self.command_edit.setText(command)
@Slot()
def on_notes_button_pressed(self):
index = self.insert_combo.currentIndex()
self.notes_edit.insertPlainText(insert_values[index])
@Slot()
def on_file_button_pressed(self):
caption = _("Choose Sound")
sound_dir = get_data_file('media', 'sounds')
file_filter = _("Sounds (*.mp3 *.ogg *.wav);;MP3 (*.mp3);;Ogg (*.ogg);;WAVE (*.wav)")
(filename, selected_filter) = QFileDialog.getOpenFileName(self, caption, sound_dir, file_filter)
self.file_edit.setText(filename)
@Slot()
def on_sound_check_toggled(self):
if not self.sound_check.isChecked():
self.length_spin.setEnabled(False)
else:
if self.loop_check.isChecked():
self.length_spin.setEnabled(False)
else:
self.length_spin.setEnabled(True)
@Slot()
def on_pushbullet_refresh_clicked(self):
self.info.refresh_pushbullet_devices(self.info.pushbullet_api_key)
self.refresh_pushbullet_combobox()
def refresh_pushbullet_combobox(self):
devices = list(self.info.pushbullet_devices)
devices.insert(0, {'id': -1, 'name': _('None')})
self.pushbullet_device_edit.clear()
for device in devices:
self.pushbullet_device_edit.addItem(device['name'])
self.pushbullet_device_edit.setCurrentIndex(self.info.pushbullet_device_index)
def edit(self, reminder):
self.save_button.show()
self.add_button.hide()
self.setWindowTitle(_("Edit Reminder"))
self.database = db.Database(helpers.database_file())
r = self.database.alarm(reminder)
self.database.close()
self.set_data(r.label, datetimeutil.fix_time_format(r.time, self.info.time_format),
datetimeutil.fix_date_format(r.date, self.info.date_format), r.command, r.notes,
r.notification, r.dialog, r.boxcar, r.pushbullet_device, r.sound_file, r.sound_length, r.sound_loop)
self.delete_id = reminder
def set_data(self, label, time, date, command, notes, popup,
dialog, boxcar, pushbullet_device, sound_file, length, loop):
self.label_edit.setText(label)
self.time_edit.setText(time)
self.date_edit.setText(date)
self.command_edit.setText(command)
self.notes_edit.setPlainText(notes)
self.popup_check.setChecked(popup)
self.dialog_check.setChecked(dialog)
self.boxcar_check.setChecked(boxcar)
if not self.info.boxcar_ok:
self.boxcar_check.setChecked(False)
self.boxcar_check.setDisabled(True)
self.boxcar_label.show()
if self.info.pushbullet_ok:
self.pushbullet_info_label.hide()
self.pushbullet_device_edit.setEnabled(True)
self.pushbullet_refresh.setEnabled(True)
self.refresh_pushbullet_combobox()
self.pushbullet_device_edit.setCurrentIndex(self.info.get_pushbullet_index(pushbullet_device))
else:
self.pushbullet_info_label.show()
self.pushbullet_device_edit.setEnabled(False)
self.pushbullet_refresh.setEnabled(False)
if sound_file is not None and not sound_file == "":
self.sound_check.setChecked(True)
else:
self.sound_check.setChecked(True) #to trigger disabling of elements
self.sound_check.setChecked(False)
self.file_edit.setText(sound_file)
self.length_spin.setValue(length)
self.loop_check.setChecked(loop)
self.loop_check.setText(_("(will loop %s times)") % self.info.sound_loop_times)
|
bhdouglass/remindor-qt
|
remindor_qt/ReminderDialog.py
|
Python
|
gpl-3.0
| 14,927
|
[
"Brian"
] |
98c12b28354998da5a90c992bbd7b7ef99ea6e5bdd81d8f6759fb1f17dd26bcb
|
import sys
import numpy as np
from ase.units import Bohr, Hartree
from ase.parallel import paropen
from ase.data import vdw_radii
import _gpaw
from gpaw.io.fmf import FMF
class ExteriorElectronDensity:
"""Exterior electron density to describe MIES spectra.
Simple approach to describe MIES spectra after
Y. Harada et al., Chem. Rev. 97 (1997) 1897
"""
def __init__(self, gd, atoms):
"""Find the grid points outside of the van der Waals radii
of the atoms"""
assert gd.orthogonal
self.gd = gd
n = len(atoms)
atom_c = atoms.positions / Bohr
vdWradius = np.empty((n))
for a, atom in enumerate(atoms):
vdWradius[a] = self.get_vdWradius(atom.number)
# define the exterior region mask
mask = gd.empty(dtype=int)
_gpaw.eed_region(mask, atom_c, gd.beg_c, gd.end_c,
gd.h_cv.diagonal().copy(), vdWradius)
self.mask = mask
def get_weight(self, psit_G):
"""Get the weight of a wave function in the exterior region
(outside of the van der Waals radius). The augmentation sphere
is assumed to be smaller than the van der Waals radius and hence
does not contribute."""
# smooth part
weigth = self.gd.integrate(np.where(self.mask == 1,
(psit_G * psit_G.conj()).real,
0.0))
return weigth
def get_vdWradius(self, Z):
"""Return van der Waals radius in Bohr"""
r = vdw_radii[Z] / Bohr
if np.isnan(r):
msg = 'van der Waals radius for Z=' + str(Z) + ' not known!'
raise RuntimeError(msg)
else:
return r
def write_mies_weights(self, wfs, file=None):
if file is None:
file = 'eed_mies.dat'
if isinstance(file, str):
out = paropen(file, 'aw')
else:
out = file
fmf = FMF(['exterior electron density weights after',
'Y. Harada et al., Chem. Rev. 97 (1997) 1897'])
print >> out, fmf.header(),
print >> out, fmf.data(['band index: n',
'k-point index: k',
'spin index: s',
'k-point weight: weight',
'energy: energy [eV]',
'occupation number: occ',
'relative EED weight: eed_weight']),
print >> out, '#; n k s weight energy occ eed_weight'
for kpt in wfs.kpt_u:
for n in range(wfs.bd.nbands):
print >> out, '%4d %3d %1d %8.5f %10.5f %10.5f %10.5f' % \
(n, kpt.k, kpt.s, kpt.weight,
kpt.eps_n[n] * Hartree,
kpt.f_n[n],
self.get_weight(kpt.psit_nG[n])
)
if hasattr(out, 'flush'):
out.flush()
|
robwarm/gpaw-symm
|
gpaw/analyse/eed.py
|
Python
|
gpl-3.0
| 3,089
|
[
"ASE",
"GPAW"
] |
4ce7fde8a4cec3638a1adce8da26848eb3e887d6cdc6ec08e80a619ca6924655
|
"""
This software is an implementation of
Deep MRI brain extraction: A 3D convolutional neural network for skull stripping
You can download the paper at http://dx.doi.org/10.1016/j.neuroimage.2016.01.024
If you use this software for your projects please cite:
Kleesiek and Urban et al, Deep MRI brain extraction: A 3D convolutional neural network for skull stripping,
NeuroImage, Volume 129, April 2016, Pages 460-469.
The MIT License (MIT)
Copyright (c) 2016 Gregor Urban, Jens Kleesiek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'NNet_Core'))
import numpy as np
import random
import itertools as it
import file_reading
def outputsize_after_convpool(img,filt,pool):
#get output size after applying len(filt) many filters of shape filt
#input: img = size of full image, filt = list of filters (first to last)
if len(filt)==1:
return int(1.0/pool[0]*(img-filt[0]+1))
return outputsize_after_convpool(int(1.0/pool[0]*(img-filt[0]+1)),filt[1:],pool[1:])
def recField(filt,pool,img=1):
"""get receptive field of last neuron, when applying filter and then max-pooling for each layer"""
#recursion, starting with receptive field of last neuron relative to itself (which is 1)
if len(filt)==1:
return (pool[0]*img+filt[0]-1)
return recField(filt[:-1],pool[:-1],(pool[-1]*img+filt[-1]-1))
def PredictionsOffset(filter_size,pooling_factor):
""" offset from left/top of image to where the first label is located (i.e. the center of the receptive field of the prediciton for this point"""
return int((recField(filter_size,pooling_factor)-1.)/2.0)
def PredictionStride(pooling_factor):
"""
in fact this will be the distance between adjacent labels that are predicted (in one pass of the network)
the network thus needs PredictionStride()**2 passes to classify one complete 2D-image (except for the borders if you don't mirror them)
"""
return np.product(pooling_factor)
def PredictMaximumInputSize(INPUT_img_size, filter_sizes, pooling_factors):
"""
e.g. input size is 512 but labels will be predicted only on 510 image!
image size can be reduced in steps of <PredictionStride(pooling_factors) === np.product(pooling_factor)>
"""
workon = INPUT_img_size - recField(filter_sizes,pooling_factors)
stride = PredictionStride(pooling_factors)
#print "input image size must be", (INPUT_img_size - (workon % stride))
return int(INPUT_img_size - (workon % stride))
def make_channel_axis_last_axis(DATA):
assert DATA.ndim==4
nn = np.argmin(DATA.shape)
return np.transpose(DATA , tuple([x for x in range(4) if x != nn] + [nn]) )
def make_channel_axis_second_axis(DATA):
assert DATA.ndim==4
nn = np.argmin(DATA.shape)
other = [x for x in range(4) if x != nn]
return np.transpose(DATA , tuple(other[:1] + [nn] + other[1:]) )
def greyvalue_data_padding(DATA, offset_l, offset_r):
assert DATA.ndim==4
foldback = False
if np.argmin(DATA.shape)!=3:#,'channel axis must be the last one!'
foldback=1
DATA = make_channel_axis_last_axis(DATA)
avg_value = 1./6.*(np.mean(DATA[0])+np.mean(DATA[:,0])+np.mean(DATA[:,:,0])+np.mean(DATA[-1])+np.mean(DATA[:,-1])+np.mean(DATA[:,:,-1]))
sp = DATA.shape
axis=[0,1,2]
dat = avg_value * np.ones( (sp[0]+offset_l+offset_r if 0 in axis else sp[0], sp[1]+offset_l+offset_r if 1 in axis else sp[1], sp[2]+offset_l+offset_r if 2 in axis else sp[2]) + tuple(sp[3:]), dtype="float32")
dat[offset_l*(0 in axis):offset_l*(0 in axis)+sp[0], offset_l*(1 in axis):offset_l*(1 in axis)+sp[1], offset_l*(2 in axis):offset_l*(2 in axis)+sp[2]] = DATA.copy()
if foldback:
dat = make_channel_axis_second_axis(dat)
return dat
def pad_data(x, n_padding, mode):
''' padding will be added to the last axis on both front and end (i.e. size increases by 2 * n_padding
mode:
constant or mean'''
pad = [(0, 0) for i in range(x.ndim-1)]+[(n_padding, n_padding)]
return np.pad(x, pad, mode=mode)
class PatchCreator():
"""
<INPUT_img_size> must be the output of PredictMaximumInputSize() !
use <training_image_reduction_factor> to reduce the size of training images (= mini-batches)
The last <number_of_images_test_set> images are test data
"""
def __init__(self, filter_size, pooling_factor,
n_labels_per_batch=10,
override_data_set_filenames=None,
data_init_preserve_channel_scaling=0,
data_clip_range = None,
use_max_fragment_pooling = False,
auto_threshold_labels = False,
pad_last_dimension = False,
padding_margin = 10):
""" filter_size and pooling_factor are lists (if multilayer)
pad_last_dimension:
True/False; necessary when training data's last channel is smaller than the CNN input window. Will add <padding_margin> more pixels in total than the required minimum.
"""
self.ndim =3
b_shuffle_data = True
self.training_set_size = None
assert not (type(override_data_set_filenames)!=type([]) and type(override_data_set_filenames)!=type({1:0}))
self.CNET_real_imagesize = 256 # only valid for this set np.shape(self.data)[1]
best = 1
#find best matching input size (such that n_labels_per_batch is reached)
for i in range(200):
input_size = PredictMaximumInputSize(self.CNET_real_imagesize * 0.005*i, filter_size, pooling_factor)
n_lab_p_dim = outputsize_after_convpool(input_size, filter_size[:-1],pooling_factor[:-1])
if n_labels_per_batch <= n_lab_p_dim**self.ndim:
best = i * 0.005
break
self.CNET_Input_Size = PredictMaximumInputSize(self.CNET_real_imagesize * best, filter_size, pooling_factor)
offs = PredictionsOffset(filter_size,pooling_factor)
self.CNET_labels_offset = np.asarray((offs,)*self.ndim)
self.CNET_stride = PredictionStride(pooling_factor)
self.number_of_labeled_points_per_dim = outputsize_after_convpool(self.CNET_Input_Size, filter_size[:-1],pooling_factor[:-1])
#need additional margin in order to make predicitons for the whole image (i.e. need (<self.CNET_stride>-1) many 1-pixel displacements)
if self.CNET_real_imagesize - self.CNET_Input_Size < self.CNET_stride-1:
self.CNET_Input_Size -= self.CNET_stride
if use_max_fragment_pooling:
# due to implementation details: increase input size if pooling is used!
# the following is the same as (stride>=2 + 2*(stride>=4) + 4*(stride>=8) + 8*(stride>=16) +...)
self.CNET_Input_Size = self.CNET_Input_Size + PredictionStride(pooling_factor)-1
self.padded_once=False
self.use_max_fragment_pooling = use_max_fragment_pooling
if type(override_data_set_filenames) is dict:
if "data" in override_data_set_filenames.keys():
nfiles = zip(override_data_set_filenames["data"],override_data_set_filenames["labels"])
assert len(override_data_set_filenames["data"]) == len(override_data_set_filenames["labels"]),"seems broken! Fix the dict contents."
if b_shuffle_data:
random.seed(46473)#fixed seed: otherwise saves are INVALID/FRAUD (->const test set)
random.shuffle(nfiles)
random.seed()
else:
assert len(override_data_set_filenames["train_data"]) == len(override_data_set_filenames["train_labels"]),"seems broken! Fix the dict contents."
nfiles = zip(override_data_set_filenames["train_data"],override_data_set_filenames["train_labels"])
self.training_set_size = len(nfiles)
tmp = override_data_set_filenames["test_data"]
nfiles += zip(tmp,[None]*len(tmp))
self.data = []
self.labels = []
self.mask = []
if type(nfiles[0])==type(""):
self.file_names = nfiles
else:
self.file_names = [x[0] for x in nfiles]
print("loading...")
n = len(nfiles)
self.num_channels = None
self.num_classes = 6 #[0,1,2,3,4,5]
for i,f in zip(range(len(nfiles)),nfiles):
addtnl_info_str=''
if type(f) is str:
d = file_reading.load_file(f)
d = d[0,...]
l = None
else:
assert type(f[0]) is str
d = file_reading.load_file(f[0])
d = np.squeeze(d)
if d.ndim==3:
d=d.reshape(d.shape+(1,))# add single channel dimension
if data_clip_range is None:
if data_init_preserve_channel_scaling:
d = (d-0.5)/3.5
else:
d2 = np.transpose(d,axes=[3,0,1,2])
d2 = np.reshape(d2,(d2.shape[0],-1))
std_ = np.std(d2,axis=1)
mean_ = np.mean(d2,axis=1)
d = (d-mean_)/(4.*std_)
else:
assert len(data_clip_range)==2
#warp large values to min
d = np.where(d > data_clip_range[1] + abs(data_clip_range[1]-data_clip_range[0])*0.1, data_clip_range[0], d)
#clip to range
d = np.clip(d, data_clip_range[0], data_clip_range[1])
addtnl_info_str+='clip({},{})'.format(data_clip_range[0], data_clip_range[1])
if 0:
overflow = np.where(d==data_clip_range[1], 1, 0)
d = np.where(d==data_clip_range[1], data_clip_range[0], d)
d -= d.min()
d /= d.max()
d = np.concatenate([d, overflow], axis=-1)
else:
d -= d.min()
d /= d.max()
d *= 0.1
if f[1] is not None:
l = file_reading.load_file(f[1])
l=np.squeeze(l)
uniq = np.unique(l)
else:
l = np.zeros((1,1,1),"uint16")
uniq = [0,1] #small hack...
if len(uniq)==2 and uniq[1]!=1:
l[l==uniq[1]]=1
l[l==uniq[0]]=0
uniq=[0,1]
if len(uniq) !=2:
if auto_threshold_labels:
assert uniq[0]==0
l = (l>0).astype('int16')
else:
assert len(uniq)==2, 'Labels must be binary, but found '+str(len(uniq))+' unique values in the labels!'
if d.shape[:3]!=l.shape[:3] and l.shape[:3]!=(1,1,1):
print("DATA SHAPE MISMATCH! transposing labels...")
l=np.transpose(l,axes=[0,2,1])
assert d.shape[:3]==l.shape[:3] or l.shape[:3]==(1,1,1)
if self.num_channels is None:
self.num_channels = d.shape[3]
assert d.shape[3]==self.num_channels
if self.num_channels==5:
print("warning: removing channel 2 (starting at 0)")
d = np.concatenate( (d[...,:2],d[...,3:]),axis=3) #x,y,z,channels
d = np.transpose(d,(0,3,1,2))
if l is not None:
if l.dtype in [np.dtype('int'),np.dtype('int32'),np.dtype('int16'),np.dtype('uint32'),np.dtype('uint16')]:
l[l==5]=0
l = l.astype("int16")
print('Loaded...',100.*(i+1)/n,"%",d.shape,addtnl_info_str, f)
if pad_last_dimension and (d.shape[-1] < self.CNET_Input_Size + padding_margin):
add_this = int((padding_margin + self.CNET_Input_Size - d.shape[-1])/2.)
d = pad_data(d, add_this, mode='constant')
l = pad_data(l, add_this, mode='constant')
print('>> padded to:', d.shape)
self.data.append(d)# format: (x,channels,y,z)
self.labels.append(l)
self.CNET_data_NumImagesInData = len(self.data)#number of different images
self.number_of_images_test_set = int(self.CNET_data_NumImagesInData - self.training_set_size)
print("Total n. of examples:",self.CNET_data_NumImagesInData,"images/volumes")
print('Training on',self.training_set_size,'images/volumes')
print('Testing on ',self.number_of_images_test_set,'images/volumes')
self._getTestImage_current_file=self.training_set_size # <self.training_set_size> is the first non-training file
def greyvalue_pad_data(self, cnn):
print(self, ':: greyvalue_pad_data()')
assert self.padded_once==False
self.padded_once=True
CNET_stride = self.CNET_stride if self.use_max_fragment_pooling==0 else 1
input_s = cnn.input_shape[-1] + CNET_stride - 1
input_s = cnn.input_shape[-1] + CNET_stride - 1 # input size for runNetOnSlice()
offset_l = self.CNET_labels_offset[0]
offset_r = offset_l + input_s
print('\nold shapes',np.unique([d.shape for d in self.data]))
self.data = [greyvalue_data_padding(dat, offset_l, offset_r) for dat in self.data]
self.labels = [np.asarray(np.pad(lab, pad_width=[(offset_l,offset_r),(offset_l,offset_r),(offset_l,offset_r)],mode='constant'),dtype='int16') for lab in self.labels if lab.shape[0] != 1] + [lab for lab in self.labels if lab.shape[0] == 1]
print('\nnew shapes',np.unique([d.shape for d in self.data]))
for d,l in zip(self.data,self.labels):
if l.shape[0] != 1:
assert d.shape[0]==l.shape[0]
assert d.shape[2:]==l.shape[1:]
def __get_cubes(self, i_min, i_max, num):
""" picks <num> many cubes from [i_min,i_max) (max is excluded) <num> many pictures."""
i_ = np.random.randint(i_min, i_max, size=num)# 0, self.training_set_size,size=num)
dat = np.zeros( (num, self.CNET_Input_Size, self.num_channels, self.CNET_Input_Size, self.CNET_Input_Size), dtype="float32")
labshape = (num,)+(self.number_of_labeled_points_per_dim,)*self.ndim
if self.labels[0].ndim==4:
labshape += (self.labels[0].shape[3],)
lab = np.zeros( labshape, dtype="int16")
for n,i in zip(range(num),i_):
sp = self.data[i].shape
sp = [ sp[x + (1 if x>0 else 0)] for x in range(3)] #ignore channel axis
off = [np.random.randint(0,sp[x]-self.CNET_Input_Size) for x in range(3)]
dat[n,...] = self.data[i][off[0]:off[0]+self.CNET_Input_Size, :, off[1]:off[1]+self.CNET_Input_Size, off[2]:off[2]+self.CNET_Input_Size]
loff = tuple(off) + self.CNET_labels_offset
lab[n,...] = self.labels[i][loff[0]:loff[0]+self.number_of_labeled_points_per_dim*self.CNET_stride:self.CNET_stride, loff[1]:loff[1]+self.number_of_labeled_points_per_dim*self.CNET_stride:self.CNET_stride, loff[2]:loff[2]+self.number_of_labeled_points_per_dim*self.CNET_stride:self.CNET_stride]
return dat, lab
def __get_random_string(self):
r1 = np.random.randint(0,5)
r2 = np.random.randint(0,5)
r3 = np.random.randint(0,5)
r4 = random.choice(list(it.permutations(range(3),3))) # r4==(0,1,2) is IDENTITY
return (r1,r2,r3,r4)
def __transform_data(self, dat, transform, transformable=[1,3,4]):
""" function is the inverse of itself!
values in <transformable> are only considered if working in 5dim (currently)"""
assert dat.ndim in [4,5],"__transform_data::TODO"
ret = dat.copy()
if dat.ndim==4: #__get_cubes:: (1, 10, 10, 10)
(r1,r2,r3,r4)=transform
if r1==1:
ret = ret[:,::-1,:,:]
if r2==1:
ret = ret[:,:,::-1,:]
if r3==1:
ret = ret[:,:,:,::-1]
ret = np.transpose(ret,(0,)+tuple(np.asarray(r4)+1) )
elif dat.ndim==5: #__get_cubes:: (1, 28, 5, 28, 28)
(r1,r2,r3,r4)=transform
if r1==1:
idx = [slice(None)] * (transformable[0]) + [slice(None,None,-1)] + [Ellipsis]
ret = ret[idx]
if r2==1:
idx = [slice(None)] * (transformable[1]) + [slice(None,None,-1)] + [Ellipsis]
ret = ret[idx]
if r3==1:
idx = [slice(None)] * (transformable[2]) + [slice(None,None,-1)] + [Ellipsis]
ret = ret[idx]
transp = range(dat.ndim)
pick_count=0
for i in range(dat.ndim):
if i in transformable:
transp[i] = transformable[r4[pick_count]]
pick_count+=1
ret = np.transpose(ret, transp)
return ret
def makeTrainingPatch(self, batchsize):
""" """
da,la = self.__get_cubes(i_min=0,i_max=self.training_set_size, num=batchsize)
tr = self.__get_random_string()
da = self.__transform_data(da,tr,transformable=[1,3,4])
la = self.__transform_data(la,tr,transformable=[1,2,3])
return da,la
if __name__ == '__main__':
print("please execute main_train.py instead!")
|
GUR9000/Deep_MRI_brain_extraction
|
utils/helper_seg.py
|
Python
|
mit
| 18,821
|
[
"NEURON"
] |
3773a15a162b24957840cf3104c5ac64ab17417faee524f090f2ef189a1f27ea
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010-2011 The Orca Team
# Copyright 2012 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""The main module for the Orca screen reader."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2011 The Orca Team" \
"Copyright (c) 2012 Igalia, S.L."
__license__ = "LGPL"
import gi
import importlib
import os
import pyatspi
import re
import signal
import subprocess
import sys
try:
from gi.repository.Gio import Settings
a11yAppSettings = Settings(schema_id='org.gnome.desktop.a11y.applications')
except:
a11yAppSettings = None
try:
# This can fail due to gtk not being available. We want to
# be able to recover from that if possible. The main driver
# for this is to allow "orca --text-setup" to work even if
# the desktop is not running.
#
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
gi.require_version("Gdk", "3.0")
from gi.repository import Gdk
# Note: This last import is here due to bgo #673396.
# See bgo#673397 for the rest of the story.
gi.require_version("GdkX11", "3.0")
from gi.repository.GdkX11 import X11Screen
except:
pass
from . import braille
from . import debug
from . import event_manager
from . import keybindings
from . import logger
from . import messages
from . import notification_messages
from . import orca_state
from . import orca_platform
from . import script_manager
from . import settings
from . import settings_manager
from . import speech
from .input_event import BrailleEvent
from .input_event import KeyboardEvent
_eventManager = event_manager.getManager()
_scriptManager = script_manager.getManager()
_settingsManager = settings_manager.getManager()
_logger = logger.getLogger()
try:
# If we don't have an active desktop, we will get a RuntimeError.
from . import mouse_review
except RuntimeError:
pass
def onEnabledChanged(gsetting, key):
try:
enabled = gsetting.get_boolean(key)
except:
return
if key == 'screen-reader-enabled' and not enabled:
shutdown()
def getSettingsManager():
return _settingsManager
def getLogger():
return _logger
EXIT_CODE_HANG = 50
# The user-settings module (see loadUserSettings).
#
_userSettings = None
# A subset of the original Xmodmap info prior to our stomping on it.
# Right now, this is just for the user's chosen Orca modifier(s).
#
_originalXmodmap = ""
_orcaModifiers = settings.DESKTOP_MODIFIER_KEYS + settings.LAPTOP_MODIFIER_KEYS
_capsLockCleared = False
_restoreOrcaKeys = False
########################################################################
# #
# METHODS TO HANDLE APPLICATION LIST AND FOCUSED OBJECTS #
# #
########################################################################
def setLocusOfFocus(event, obj, notifyScript=True, force=False):
"""Sets the locus of focus (i.e., the object with visual focus) and
notifies the script of the change should the script wish to present
the change to the user.
Arguments:
- event: if not None, the Event that caused this to happen
- obj: the Accessible with the new locus of focus.
- notifyScript: if True, propagate this event
- force: if True, don't worry if this is the same object as the
current locusOfFocus
"""
if not force and obj == orca_state.locusOfFocus:
return
# If this event is not for the currently active script, then just return.
#
if event and event.source and \
event.host_application and orca_state.activeScript:
currentApp = orca_state.activeScript.app
try:
appList = [event.host_application, event.source.getApplication()]
except (LookupError, RuntimeError):
appList = []
debug.println(debug.LEVEL_SEVERE,
"orca.setLocusOfFocus() application Error")
if not currentApp in appList:
return
oldLocusOfFocus = orca_state.locusOfFocus
try:
# Just to see if we have a valid object.
oldLocusOfFocus.getRole()
except:
# Either it's None or it's an invalid remote object.
oldLocusOfFocus = None
orca_state.locusOfFocus = obj
try:
app = orca_state.locusOfFocus.getApplication()
except:
orca_state.locusOfFocus = None
if event:
debug.println(debug.LEVEL_FINE,
"LOCUS OF FOCUS: None event='%s'" % event.type)
else:
debug.println(debug.LEVEL_FINE,
"LOCUS OF FOCUS: None event=None")
else:
try:
appname = "'" + app.name + "'"
except:
appname = "None"
try:
name = orca_state.locusOfFocus.name
rolename = orca_state.locusOfFocus.getRoleName()
except:
name = "Error"
rolename = "Error"
debug.println(debug.LEVEL_FINE,
"LOCUS OF FOCUS: app=%s name='%s' role='%s'" \
% (appname, name, rolename))
if event:
debug.println(debug.LEVEL_FINE,
" event='%s'" % event.type)
else:
debug.println(debug.LEVEL_FINE,
" event=None")
if notifyScript and orca_state.activeScript:
orca_state.activeScript.locusOfFocusChanged(
event, oldLocusOfFocus, orca_state.locusOfFocus)
########################################################################
# #
# METHODS FOR PRE-PROCESSING AND MASSAGING KEYBOARD EVENTS. #
# #
########################################################################
_orcaModifierPressed = False
def _processKeyboardEvent(event):
"""The primary key event handler for Orca. Keeps track of various
attributes, such as the lastInputEvent. Also does key echo as well
as any local keybindings before passing the event on to the active
script. This method is called synchronously from the AT-SPI registry
and should be performant. In addition, it must return True if it has
consumed the event (and False if not).
Arguments:
- event: an AT-SPI DeviceEvent
Returns True if the event should be consumed.
"""
global _orcaModifierPressed
keyboardEvent = KeyboardEvent(event)
debug.println(debug.LEVEL_FINE, keyboardEvent.toString())
# Weed out duplicate and otherwise bogus events.
# TODO - JD: Be sure these are the right values to return
if not keyboardEvent.timestamp:
debug.println(debug.LEVEL_FINE, "IGNORING EVENT: NO TIMESTAMP")
return False
if keyboardEvent == orca_state.lastInputEvent:
debug.println(debug.LEVEL_FINE, "IGNORING EVENT: DUPLICATE")
return False
# Figure out what we've got.
isOrcaModifier = keyboardEvent.isOrcaModifier()
isPressedEvent = keyboardEvent.isPressedKey()
if isOrcaModifier:
_orcaModifierPressed = isPressedEvent
if _orcaModifierPressed:
keyboardEvent.modifiers |= keybindings.ORCA_MODIFIER_MASK
# Update our state.
orca_state.lastInputEvent = keyboardEvent
if not keyboardEvent.isModifierKey():
keyboardEvent.setClickCount()
orca_state.lastNonModifierKeyEvent = keyboardEvent
# Echo it based on what it is and the user's settings.
script = orca_state.activeScript
if not script:
debug.println(debug.LEVEL_FINE, "IGNORING EVENT DUE TO NO SCRIPT")
return False
if isPressedEvent:
script.presentationInterrupt()
script.presentKeyboardEvent(keyboardEvent)
if keyboardEvent.isModifierKey() and not isOrcaModifier:
return False
# Special modes.
if not isPressedEvent and keyboardEvent.event_string == "Escape":
script.exitLearnMode(keyboardEvent)
if orca_state.learnModeEnabled and not keyboardEvent.modifiers:
if keyboardEvent.event_string == "F1":
orca_state.learnModeEnabled = False
return helpForOrca()
if isPressedEvent and keyboardEvent.event_string in ["F2", "F3"]:
return script.listOrcaShortcuts(keyboardEvent)
if orca_state.capturingKeys:
return False
if notification_messages.listNotificationMessagesModeEnabled:
return notification_messages.listNotificationMessages(script, keyboardEvent)
# See if the event manager wants it (i.e. it is bound to a command.
if _eventManager.processKeyboardEvent(keyboardEvent):
return True
# Do any needed xmodmap crap.
global _restoreOrcaKeys
if not isPressedEvent:
if keyboardEvent.event_string in settings.orcaModifierKeys \
and orca_state.bypassNextCommand:
_restoreXmodmap()
_restoreOrcaKeys = True
elif _restoreOrcaKeys and not orca_state.bypassNextCommand:
_createOrcaXmodmap()
_restoreOrcaKeys = False
elif not keyboardEvent.isModifierKey():
_orcaModifierPressed = False
orca_state.bypassNextCommand = False
return isOrcaModifier or orca_state.learnModeEnabled
########################################################################
# #
# METHODS FOR PRE-PROCESSING AND MASSAGING BRAILLE EVENTS. #
# #
########################################################################
def _processBrailleEvent(event):
"""Called whenever a key is pressed on the Braille display.
Arguments:
- command: the BrlAPI event for the key that was pressed.
Returns True if the event was consumed; otherwise False
"""
consumed = False
# Braille key presses always interrupt speech.
#
event = BrailleEvent(event)
if event.event['command'] not in braille.dontInteruptSpeechKeys:
speech.stop()
orca_state.lastInputEvent = event
try:
consumed = _eventManager.processBrailleEvent(event)
except:
debug.printException(debug.LEVEL_SEVERE)
if (not consumed) and orca_state.learnModeEnabled:
consumed = True
return consumed
########################################################################
# #
# METHODS FOR HANDLING INITIALIZATION, SHUTDOWN, AND USE. #
# #
########################################################################
def _setXmodmap(xkbmap):
"""Set the keyboard map using xkbcomp."""
p = subprocess.Popen(['xkbcomp', '-w0', '-', os.environ['DISPLAY']],
stdin=subprocess.PIPE, stdout=None, stderr=None)
p.communicate(xkbmap)
def _setCapsLockAsOrcaModifier(enable):
"""Enable or disable use of the caps lock key as an Orca modifier key."""
interpretCapsLineProg = re.compile(
r'^\s*interpret\s+Caps[_+]Lock[_+]AnyOfOrNone\s*\(all\)\s*{\s*$', re.I)
capsModLineProg = re.compile(
r'^\s*action\s*=\s*SetMods\s*\(\s*modifiers\s*=\s*Lock\s*,\s*clearLocks\s*\)\s*;\s*$', re.I)
normalCapsLineProg = re.compile(
r'^\s*action\s*=\s*LockMods\s*\(\s*modifiers\s*=\s*Lock\s*\)\s*;\s*$', re.I)
normalCapsLine = ' action= LockMods(modifiers=Lock);'
capsModLine = ' action= SetMods(modifiers=Lock,clearLocks);'
lines = _originalXmodmap.decode('UTF-8').split('\n')
foundCapsInterpretSection = False
for i in range(len(lines)):
line = lines[i]
if not foundCapsInterpretSection:
if interpretCapsLineProg.match(line):
foundCapsInterpretSection = True
else:
if enable:
if normalCapsLineProg.match(line):
lines[i] = capsModLine
_setXmodmap(bytes('\n'.join(lines), 'UTF-8'))
return
else:
if capsModLineProg.match(line):
lines[i] = normalCapsLine
_setXmodmap(bytes('\n'.join(lines), 'UTF-8'))
return
if line.find('}'):
# Failed to find the line we need to change
return
def _createOrcaXmodmap():
"""Makes an Orca-specific Xmodmap so that the keys behave as we
need them to do. This is especially the case for the Orca modifier.
"""
global _capsLockCleared
cmd = []
if "Caps_Lock" in settings.orcaModifierKeys:
_setCapsLockAsOrcaModifier(True)
_capsLockCleared = True
elif _capsLockCleared:
_setCapsLockAsOrcaModifier(False)
_capsLockCleared = False
def _storeXmodmap(keyList):
"""Save the original xmodmap for the keys in keyList before we alter it.
Arguments:
- keyList: A list of named keys to look for.
"""
global _originalXmodmap
_originalXmodmap = subprocess.check_output(['xkbcomp', os.environ['DISPLAY'], '-'])
def _restoreXmodmap(keyList=[]):
"""Restore the original xmodmap values for the keys in keyList.
Arguments:
- keyList: A list of named keys to look for. An empty list means
to restore the entire saved xmodmap.
"""
global _capsLockCleared
_capsLockCleared = False
p = subprocess.Popen(['xkbcomp', '-w0', '-', os.environ['DISPLAY']],
stdin=subprocess.PIPE, stdout=None, stderr=None)
p.communicate(_originalXmodmap)
def loadUserSettings(script=None, inputEvent=None, skipReloadMessage=False):
"""Loads (and reloads) the user settings module, reinitializing
things such as speech if necessary.
Returns True to indicate the input event has been consumed.
"""
debug.println(debug.LEVEL_FINEST, 'INFO: Loading User Settings')
global _userSettings
# Shutdown the output drivers and give them a chance to die.
speech.shutdown()
braille.shutdown()
_scriptManager.deactivate()
reloaded = False
if _userSettings:
_profile = _settingsManager.getSetting('activeProfile')[1]
try:
_userSettings = _settingsManager.getGeneralSettings(_profile)
_settingsManager.setProfile(_profile)
reloaded = True
except ImportError:
debug.printException(debug.LEVEL_FINEST)
except:
debug.printException(debug.LEVEL_SEVERE)
else:
_profile = _settingsManager.profile
try:
_userSettings = _settingsManager.getGeneralSettings(_profile)
except ImportError:
debug.printException(debug.LEVEL_FINEST)
except:
debug.printException(debug.LEVEL_SEVERE)
_settingsManager.loadAppSettings(script)
if _settingsManager.getSetting('enableSpeech'):
try:
speech.init()
if reloaded and not skipReloadMessage:
speech.speak(messages.SETTINGS_RELOADED,
settings.voices.get(settings.SYSTEM_VOICE))
debug.println(debug.LEVEL_CONFIGURATION,
"Speech module has been initialized.")
except:
debug.printException(debug.LEVEL_SEVERE)
debug.println(debug.LEVEL_SEVERE,
"Could not initialize connection to speech.")
else:
debug.println(debug.LEVEL_CONFIGURATION,
"Speech module has NOT been initialized.")
if _settingsManager.getSetting('enableBraille'):
try:
braille.init(_processBrailleEvent, settings.tty)
except:
debug.printException(debug.LEVEL_WARNING)
debug.println(debug.LEVEL_WARNING,
"Could not initialize connection to braille.")
# I'm not sure where else this should go. But it doesn't really look
# right here.
try:
mouse_review.mouse_reviewer.toggle(on=settings.enableMouseReview)
except NameError:
pass
global _orcaModifiers
custom = [k for k in settings.orcaModifierKeys if k not in _orcaModifiers]
_orcaModifiers += custom
# Handle the case where a change was made in the Orca Preferences dialog.
#
if _originalXmodmap:
_restoreXmodmap(_orcaModifiers)
_storeXmodmap(_orcaModifiers)
_createOrcaXmodmap()
_scriptManager.activate()
_eventManager.activate()
debug.println(debug.LEVEL_FINEST, 'INFO: User Settings Loaded')
return True
def _showPreferencesUI(script, prefs):
if orca_state.orcaOS:
orca_state.orcaOS.showGUI()
return
try:
module = importlib.import_module('.orca_gui_prefs', 'orca')
except:
debug.printException(debug.LEVEL_SEVERE)
return
uiFile = os.path.join(orca_platform.datadir,
orca_platform.package,
"ui",
"orca-setup.ui")
orca_state.orcaOS = module.OrcaSetupGUI(uiFile, "orcaSetupWindow", prefs)
orca_state.orcaOS.init(script)
orca_state.orcaOS.showGUI()
def showAppPreferencesGUI(script=None, inputEvent=None):
"""Displays the user interace to configure the settings for a
specific applications within Orca and set up those app-specific
user preferences using a GUI.
Returns True to indicate the input event has been consumed.
"""
prefs = {}
for key in settings.userCustomizableSettings:
prefs[key] = _settingsManager.getSetting(key)
script = script or orca_state.activeScript
_showPreferencesUI(script, prefs)
return True
def showPreferencesGUI(script=None, inputEvent=None):
"""Displays the user interace to configure Orca and set up
user preferences using a GUI.
Returns True to indicate the input event has been consumed.
"""
prefs = _settingsManager.getGeneralSettings(_settingsManager.profile)
script = _scriptManager.getDefaultScript()
_showPreferencesUI(script, prefs)
return True
def helpForOrca(script=None, inputEvent=None, page=""):
"""Show Orca Help window (part of the GNOME Access Guide).
Returns True to indicate the input event has been consumed.
"""
uri = "help:orca"
if page:
uri += "?%s" % page
Gtk.show_uri(Gdk.Screen.get_default(),
uri,
Gtk.get_current_event_time())
return True
def quitOrca(script=None, inputEvent=None):
"""Quit Orca. Check if the user wants to confirm this action.
If so, show the confirmation GUI otherwise just shutdown.
Returns True to indicate the input event has been consumed.
"""
shutdown()
return True
def showFindGUI(script=None, inputEvent=None):
"""Displays the user interace to perform an Orca Find.
Returns True to indicate the input event has been consumed.
"""
try:
module = importlib.import_module('.orca_gui_find', 'orca')
module.showFindUI()
except:
debug.printException(debug.LEVEL_SEVERE)
# If True, this module has been initialized.
#
_initialized = False
def init(registry):
"""Initialize the orca module, which initializes the speech and braille
modules. Also builds up the application list, registers for AT-SPI events,
and creates scripts for all known applications.
Returns True if the initialization procedure has run, or False if this
module has already been initialized.
"""
debug.println(debug.LEVEL_FINEST, 'INFO: Initializing Orca module')
global _initialized
if _initialized and _settingsManager.isScreenReaderServiceEnabled():
return False
# Do not hang on initialization if we can help it.
#
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.signal(signal.SIGALRM, settings.timeoutCallback)
signal.alarm(settings.timeoutTime)
loadUserSettings()
_eventManager.registerKeystrokeListener(_processKeyboardEvent)
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.alarm(0)
_initialized = True
# In theory, we can do this through dbus. In practice, it fails to
# work sometimes. Until we know why, we need to leave this as-is
# so that we respond when gnome-control-center is used to stop Orca.
if a11yAppSettings:
a11yAppSettings.connect('changed', onEnabledChanged)
debug.println(debug.LEVEL_FINEST, 'INFO: Orca module initialized')
return True
def start(registry, cacheValues):
"""Starts Orca.
"""
debug.println(debug.LEVEL_FINEST, 'INFO: Starting Orca')
if not _initialized:
init(registry)
# Do not hang on startup if we can help it.
#
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.signal(signal.SIGALRM, settings.timeoutCallback)
signal.alarm(settings.timeoutTime)
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.alarm(0)
if cacheValues:
pyatspi.setCacheLevel(pyatspi.CACHE_PROPERTIES)
debug.println(debug.LEVEL_FINEST, 'INFO: Orca starting registry')
registry.start(gil=False)
def die(exitCode=1):
pid = os.getpid()
if exitCode == EXIT_CODE_HANG:
# Someting is hung and we wish to abort.
os.kill(pid, signal.SIGKILL)
return
shutdown()
sys.exit(exitCode)
if exitCode > 1:
os.kill(pid, signal.SIGTERM)
def timeout(signum=None, frame=None):
debug.println(debug.LEVEL_SEVERE,
"TIMEOUT: something has hung. Aborting.")
debug.printStack(debug.LEVEL_ALL)
debug.examineProcesses()
die(EXIT_CODE_HANG)
def shutdown(script=None, inputEvent=None):
"""Exits Orca. Unregisters any event listeners and cleans up.
Returns True if the shutdown procedure ran or False if this module
was never initialized.
"""
debug.println(debug.LEVEL_FINEST, 'INFO: Shutting down Orca')
global _initialized
if not _initialized:
return False
# Try to say goodbye, but be defensive if something has hung.
#
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.signal(signal.SIGALRM, settings.timeoutCallback)
signal.alarm(settings.timeoutTime)
orca_state.activeScript.presentMessage(messages.STOP_ORCA)
_scriptManager.deactivate()
_eventManager.deactivate()
# Shutdown all the other support.
#
if settings.enableSpeech:
speech.shutdown()
if settings.enableBraille:
braille.shutdown()
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.alarm(0)
_initialized = False
_restoreXmodmap(_orcaModifiers)
debug.println(debug.LEVEL_FINEST, 'INFO: Orca stopping registry')
pyatspi.Registry.stop()
debug.println(debug.LEVEL_FINEST, 'INFO: Orca shutdown complete')
return True
exitCount = 0
def shutdownOnSignal(signum, frame):
global exitCount
debug.println(debug.LEVEL_ALL,
"Shutting down and exiting due to signal = %d" \
% signum)
debug.println(debug.LEVEL_ALL, "Current stack is:")
debug.printStack(debug.LEVEL_ALL)
# Well...we'll try to exit nicely, but if we keep getting called,
# something bad is happening, so just quit.
#
if exitCount:
die(signum)
else:
exitCount += 1
# Try to do a graceful shutdown if we can.
#
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.signal(signal.SIGALRM, settings.timeoutCallback)
signal.alarm(settings.timeoutTime)
try:
if _initialized:
shutdown()
else:
# We always want to try to shutdown speech since the
# speech servers are very persistent about living.
#
speech.shutdown()
shutdown()
cleanExit = True
except:
cleanExit = False
if settings.timeoutCallback and (settings.timeoutTime > 0):
signal.alarm(0)
if not cleanExit:
die(EXIT_CODE_HANG)
def abortOnSignal(signum, frame):
debug.println(debug.LEVEL_ALL,
"Aborting due to signal = %d" \
% signum)
die(signum)
def main(cacheValues=True):
"""The main entry point for Orca. The exit codes for Orca will
loosely be based on signals, where the exit code will be the
signal used to terminate Orca (if a signal was used). Otherwise,
an exit code of 0 means normal completion and an exit code of 50
means Orca exited because of a hang."""
# Method to call when we think something might be hung.
#
settings.timeoutCallback = timeout
# Various signal handlers we want to listen for.
#
signal.signal(signal.SIGHUP, shutdownOnSignal)
signal.signal(signal.SIGINT, shutdownOnSignal)
signal.signal(signal.SIGTERM, shutdownOnSignal)
signal.signal(signal.SIGQUIT, shutdownOnSignal)
signal.signal(signal.SIGSEGV, abortOnSignal)
if not _settingsManager.isAccessibilityEnabled():
_settingsManager.setAccessibility(True)
init(pyatspi.Registry)
try:
message = messages.START_ORCA
if not _settingsManager.getSetting('onlySpeakDisplayedText'):
speech.speak(message, settings.voices.get(settings.SYSTEM_VOICE))
if _settingsManager.getSetting('enableBraille') \
or _settingsManager.getSetting('enableBrailleMonitor'):
braille.displayMessage(message)
except:
debug.printException(debug.LEVEL_SEVERE)
script = orca_state.activeScript
if script:
window = script.utilities.activeWindow()
if window and not orca_state.locusOfFocus:
setLocusOfFocus(None, window)
try:
start(pyatspi.Registry, cacheValues) # waits until we stop the registry
except:
die(EXIT_CODE_HANG)
return 0
if __name__ == "__main__":
sys.exit(main())
|
pvagner/orca
|
src/orca/orca.py
|
Python
|
lgpl-2.1
| 27,027
|
[
"ORCA"
] |
32a17f5e93f755e4522b640ff28c199a54fc90bf2da66a3adddda1d347e9fea9
|
"""
picasso/imageprocess
~~~~~~~~~~~~~~~~~~~~
Image processing functions
:author: Joerg Schnitzbauer, 2016
:copyright: Copyright (c) 2016 Jungmann Lab, MPI of Biochemistry
"""
import matplotlib.pyplot as _plt
import numpy as _np
from numpy import fft as _fft
import lmfit as _lmfit
from tqdm import tqdm as _tqdm
from . import lib as _lib
_plt.style.use("ggplot")
def xcorr(imageA, imageB):
FimageA = _fft.fft2(imageA)
CFimageB = _np.conj(_fft.fft2(imageB))
return _fft.fftshift(
_np.real(_fft.ifft2((FimageA * CFimageB)))
) / _np.sqrt(imageA.size)
def get_image_shift(imageA, imageB, box, roi=None, display=False):
""" Computes the shift from imageA to imageB """
if (_np.sum(imageA) == 0) or (_np.sum(imageB) == 0):
return 0, 0
# Compute image correlation
XCorr = xcorr(imageA, imageB)
# Cut out center roi
Y, X = imageA.shape
if roi is not None:
Y_ = int((Y - roi) / 2)
X_ = int((X - roi) / 2)
if Y_ > 0:
XCorr = XCorr[Y_:-Y_, :]
else:
Y_ = 0
if X_ > 0:
XCorr = XCorr[:, X_:-X_]
else:
X_ = 0
else:
Y_ = X_ = 0
# A quarter of the fit ROI
fit_X = int(box / 2)
# A coordinate grid for the fitting ROI
y, x = _np.mgrid[-fit_X: fit_X + 1, -fit_X: fit_X + 1]
# Find the brightest pixel and cut out the fit ROI
y_max_, x_max_ = _np.unravel_index(XCorr.argmax(), XCorr.shape)
FitROI = XCorr[
y_max_ - fit_X: y_max_ + fit_X + 1,
x_max_ - fit_X: x_max_ + fit_X + 1,
]
dimensions = FitROI.shape
if 0 in dimensions or dimensions[0] != dimensions[1]:
xc, yc = 0, 0
else:
# The fit model
def flat_2d_gaussian(a, xc, yc, s, b):
A = a * _np.exp(-0.5 * ((x - xc) ** 2 + (y - yc) ** 2) / s ** 2) + b
return A.flatten()
gaussian2d = _lmfit.Model(
flat_2d_gaussian, name="2D Gaussian", independent_vars=[]
)
# Set up initial parameters and fit
params = _lmfit.Parameters()
params.add("a", value=FitROI.max(), vary=True, min=0)
params.add("xc", value=0, vary=True)
params.add("yc", value=0, vary=True)
params.add("s", value=1, vary=True, min=0)
params.add("b", value=FitROI.min(), vary=True, min=0)
results = gaussian2d.fit(FitROI.flatten(), params)
# Get maximum coordinates and add offsets
xc = results.best_values["xc"]
yc = results.best_values["yc"]
xc += X_ + x_max_
yc += Y_ + y_max_
if display:
_plt.figure(figsize=(17, 10))
_plt.subplot(1, 3, 1)
_plt.imshow(imageA, interpolation="none")
_plt.subplot(1, 3, 2)
_plt.imshow(imageB, interpolation="none")
_plt.subplot(1, 3, 3)
_plt.imshow(XCorr, interpolation="none")
_plt.plot(xc, yc, "x")
_plt.show()
xc -= _np.floor(X / 2)
yc -= _np.floor(Y / 2)
return -yc, -xc
def rcc(segments, max_shift=None, callback=None):
n_segments = len(segments)
shifts_x = _np.zeros((n_segments, n_segments))
shifts_y = _np.zeros((n_segments, n_segments))
n_pairs = int(n_segments * (n_segments - 1) / 2)
flag = 0
with _tqdm(
total=n_pairs, desc="Correlating image pairs", unit="pairs"
) as progress_bar:
if callback is not None:
callback(0)
for i in range(n_segments - 1):
for j in range(i + 1, n_segments):
progress_bar.update()
shifts_y[i, j], shifts_x[i, j] = get_image_shift(
segments[i], segments[j], 5, max_shift
)
flag += 1
if callback is not None:
callback(flag)
return _lib.minimize_shifts(shifts_x, shifts_y)
|
jungmannlab/picasso
|
picasso/imageprocess.py
|
Python
|
mit
| 3,919
|
[
"Gaussian"
] |
09a4eb1c1490a750b47f9a5161b90f34c668534fe56cf6998838bf1022bcdbc0
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Code to test the mdconvert script. These tests take about two minutes to run.
This checks all pairs for formats, converting from format x -> format y. it
also trys using striding to subsample the trajectory and atom_indices, so it
does significant integration testing of the XXXTrajectoryFile modules as well.
"""
##############################################################################
# imports
##############################################################################
import os
import sys
import tempfile
import shutil
import numpy as np
import mdtraj as md
from mdtraj import element
from mdtraj.utils import import_
from mdtraj.testing import skipif, get_fn, eq, slow
on_win = (sys.platform == 'win32')
on_py3 = (sys.version_info >= (3, 0))
try:
scripttest = import_('scripttest')
HAVE_SCRIPTTEST = True
except SystemExit:
HAVE_SCRIPTTEST = False
##############################################################################
# globals
##############################################################################
# if you switch DEBUG_MODE to True, none of the files will deleted
# at the end of the execution of this suite, so that you can debug the
# problem by running mdconvert manually.
DEBUG_MODE = False
# DEBUG_MODE = False
staging_dir = tempfile.mkdtemp()
output_dir = os.path.join(staging_dir, 'output')
def teardown_module(module):
if not DEBUG_MODE:
shutil.rmtree(staging_dir)
def setup_module():
global TRAJ
xyz = np.around(np.random.randn(10, 5, 3).astype(np.float32), 2)
topology = md.Topology()
chain = topology.add_chain()
residue = topology.add_residue('ALA', chain)
topology.add_atom('CA', element.carbon, residue)
topology.add_atom('HG1', element.hydrogen, residue)
topology.add_atom('SG', element.sulfur, residue)
topology.add_atom('OD1', element.oxygen, residue)
topology.add_atom('NE', element.nitrogen, residue)
time = np.arange(10)**2
unitcell_lengths = np.array([[1.1,1.2,1.3]] * 10)
unitcell_angles = np.array([[90, 90, 95]] * 10)
TRAJ = md.Trajectory(xyz, topology=topology, time=time,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
##############################################################################
# test
##############################################################################
@skipif(not HAVE_SCRIPTTEST)
def test_mdconvert_index():
"Check that extracting a specific index works"
env = scripttest.TestFileEnvironment(output_dir)
path = os.path.join(staging_dir, 'traj.h5')
TRAJ.save(path)
command = ['mdconvert', path, '-i 4', '-o', 'frame4.pdb']
env.run(*command, expect_stderr=True)
frame4 = md.load(os.path.join(output_dir, 'frame4.pdb'))
eq(frame4.xyz, TRAJ[4].xyz)
os.unlink(path)
@skipif(not HAVE_SCRIPTTEST)
def test_mdconvert_slice():
"Check that extracting a specific slice works"
env = scripttest.TestFileEnvironment(output_dir)
path = os.path.join(staging_dir, 'traj.h5')
TRAJ.save(path)
command = ['mdconvert', path, '-i 1:5:2', '-o', 'frame13.pdb']
env.run(*command, expect_stderr=True)
frame13 = md.load(os.path.join(output_dir, 'frame13.pdb'))
eq(frame13.xyz, TRAJ[1:5:2].xyz)
os.unlink(path)
@slow
@skipif(not HAVE_SCRIPTTEST)
def test_mdconvert_0():
"""ensure that the xyz coordinates are preserved by a trip
from python -> save in format X -> mdconvert to format Y -> python
"""
env = scripttest.TestFileEnvironment(output_dir)
# save one copy of traj for use as a topology file
topology_fn = os.path.join(staging_dir, 'topology.pdb')
TRAJ[0].save(topology_fn)
# save a .dat file for the atom_indices so that we can test
# mdconvert's atom_indices flag
atom_indices = np.array([0, 3])
atom_indices_fn = os.path.join(staging_dir, 'atom_indices.dat')
np.savetxt(atom_indices_fn, atom_indices, fmt='%d')
fns = ['traj.xtc', 'traj.dcd', 'traj.binpos', 'traj.trr', 'traj.nc',
'traj.pdb', 'traj.h5', 'traj.lh5', 'traj.netcdf']
if on_win and on_py3:
fns.remove('traj.lh5')
fns.remove('traj.h5')
for fn in fns:
path = os.path.join(staging_dir, fn)
TRAJ.save(path)
for fn2 in filter(lambda e: e != fn, fns):
ext1, ext2 = [os.path.splitext(f)[1] for f in [fn, fn2]]
command1 = ['mdconvert', path, '-o', fn2, '-c 6']
if ext2 in ['.pdb', '.h5', '.lh5']:
# if we're saving a pdb or h5, we need to give it a topology too
command1 += ['-t', topology_fn]
# one set of tests, with no extra flags to mdconvert
execution1 = lambda : env.run(*command1, expect_stderr=True)
execution1.description = 'mdconvert: converting %s -> %s' % (fn, fn2)
# lets try using the --atom_indices flag to mdconvert
command2 = command1 + ['-a', atom_indices_fn]
command2[3] = 'subset.' + fn2 # make sure the output goes to a different file
execution2 = lambda : env.run(*command2, expect_stderr=True)
execution2.description = 'mdconvert: converting %s -> %s (atom_indices)' % (fn, 'subset.' + fn2)
# lets try one using the --stride 3 flag
command3 = command1 + ['-s 3']
command3[3] = 'stride.' + fn2 # change the out filename, so they don't clobbed
execution3 = lambda : env.run(*command3, expect_stderr=True)
execution3.description = 'mdconvert: converting %s -> %s (stride)' % (fn, 'stride.' + fn2)
yield execution1
yield execution2
yield execution3
# ensure that the xyz coordinates are preserved by a trip
# from python -> save in format X -> mdconvert to format Y -> python
load_kwargs_check1, load_kwargs_check2 = {}, {}
if ext2 not in ['.pdb', '.h5', '.lh5']:
load_kwargs_check1['top'] = TRAJ.topology
load_kwargs_check2['top'] = TRAJ.topology.subset(atom_indices)
def check():
out1 = md.load(os.path.join(output_dir, fn2), **load_kwargs_check1)
out2 = md.load(os.path.join(output_dir, 'subset.' + fn2), **load_kwargs_check2)
out3 = md.load(os.path.join(output_dir, 'stride.' + fn2), **load_kwargs_check1)
if ext1 in ['.lh5'] or ext2 in ['.lh5']:
decimal = 3
else:
decimal = 6
eq(out1.xyz, TRAJ.xyz, decimal=decimal)
eq(out2.xyz, TRAJ.xyz[:, atom_indices], decimal=decimal)
eq(out3.xyz, TRAJ.xyz[::3], decimal=decimal)
if ext1 not in ['.binpos', '.lh5'] and ext2 not in ['.binpos', '.lh5']:
# binpos doesn't save unitcell information
eq(out1.unitcell_vectors, TRAJ.unitcell_vectors, decimal=2)
eq(out2.unitcell_vectors, TRAJ.unitcell_vectors, decimal=2)
eq(out3.unitcell_vectors, TRAJ.unitcell_vectors[::3], decimal=2)
if all(e in ['.xtc', '.trr', '.nc', '.h5'] for e in [ext1, ext2]):
# these formats contain time information
eq(out1.time, TRAJ.time)
eq(out2.time, TRAJ.time)
eq(out3.time, TRAJ.time[::3])
if ext2 in ['.pdb', '.h5', '.lh5']:
# these formats contain a topology in the file that was
# read from disk
eq(out1.topology, TRAJ.topology)
eq(out2.topology, TRAJ.topology.subset(atom_indices))
eq(out3.topology, TRAJ.topology)
check.description = 'mdconvert: checking %s -> %s' % (fn, fn2)
yield check
if not DEBUG_MODE:
os.unlink(os.path.join(output_dir, fn2))
os.unlink(os.path.join(output_dir, 'subset.' + fn2))
os.unlink(os.path.join(output_dir, 'stride.' + fn2))
if not DEBUG_MODE:
os.unlink(path)
@slow
@skipif(not HAVE_SCRIPTTEST)
def test_mdconvert_1():
env = scripttest.TestFileEnvironment(output_dir)
command = ['mdconvert', get_fn('alanine-dipeptide-explicit.binpos'), '--top',
get_fn('alanine-dipeptide-explicit.prmtop'), '-o', 'out.dcd']
env.run(*command, expect_stderr=True)
t = md.load(os.path.join(output_dir, 'out.dcd'), top=get_fn('alanine-dipeptide-explicit.prmtop'))
t2 = md.load(get_fn('alanine-dipeptide-explicit.binpos'), top=get_fn('alanine-dipeptide-explicit.prmtop'))
eq(t.xyz, t2.xyz)
eq(t.topology, t2.topology)
|
ctk3b/mdtraj
|
mdtraj/tests/test_mdconvert.py
|
Python
|
lgpl-2.1
| 9,785
|
[
"MDTraj",
"NetCDF"
] |
283a8a9661ce59621406ee50454eaeef4133dd3b31f50fff1e8b35053ff37874
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
import mock
import logging
from moosesqa import SQAReport, SilentRecordHandler, LogHelper
class TestSQAReport(unittest.TestCase):
def testStatus(self):
self.assertEqual(SQAReport.Status.PASS, 0)
self.assertEqual(SQAReport.Status.WARNING, 1)
self.assertEqual(SQAReport.Status.ERROR, 2)
def testReport(self):
with self.assertRaises(NotImplementedError):
r = SQAReport()
r.execute()
logger = logging.getLogger('moosesqa')
class TestReport(SQAReport):
def execute(self):
logger = LogHelper('moosesqa', 'log_error', log_critical=logging.CRITICAL,
log_warning=logging.WARNING)
logger.log('log_warning', 'warning message')
logger.log('log_error', 'error message')
logger.log('log_critical', 'critical message')
return logger
report = TestReport()
r = str(report.getReport())
self.assertEqual(report.status, SQAReport.Status.ERROR)
self.assertIn('log_warning: 1', r)
self.assertIn('log_error: 1', r)
self.assertIn('log_critical: 1', r)
self.assertIn('warning message', r)
self.assertIn('error message', r)
self.assertIn('critical message', r)
report = TestReport(show_warning=False)
r = str(report.getReport())
self.assertEqual(report.status, SQAReport.Status.ERROR)
self.assertIn('log_warning: 1', r)
self.assertIn('log_error: 1', r)
self.assertIn('log_critical: 1', r)
self.assertNotIn('warning message', r)
self.assertIn('error message', r)
self.assertIn('critical message', r)
report = TestReport(show_error=False)
r = str(report.getReport())
self.assertEqual(report.status, SQAReport.Status.ERROR)
self.assertIn('log_warning: 1', r)
self.assertIn('log_error: 1', r)
self.assertIn('log_critical: 1', r)
self.assertIn('warning message', r)
self.assertNotIn('error message', r)
self.assertIn('critical message', r)
report = TestReport(show_critical=False)
r = str(report.getReport())
self.assertEqual(report.status, SQAReport.Status.ERROR)
self.assertIn('log_warning: 1', r)
self.assertIn('log_error: 1', r)
self.assertIn('log_critical: 1', r)
self.assertIn('warning message', r)
self.assertIn('error message', r)
self.assertNotIn('critical message', r)
@mock.patch('mooseutils.colorText', side_effect=lambda t, c, **kwargs: '{}::{}'.format(c, t))
def testColorText(self, color_text):
r = SQAReport()
txt = r._colorTextByStatus(1, SQAReport.Status.PASS)
self.assertEqual(txt, 'LIGHT_GREEN::1')
txt = r._colorTextByStatus(1, SQAReport.Status.ERROR)
self.assertEqual(txt, 'LIGHT_RED::1')
txt = r._colorTextByStatus(1, SQAReport.Status.WARNING)
self.assertEqual(txt, 'LIGHT_YELLOW::1')
txt = r._colorTextByMode(1, logging.ERROR)
self.assertEqual(txt, 'LIGHT_RED::1')
txt = r._colorTextByMode(1, logging.WARNING)
self.assertEqual(txt, 'LIGHT_YELLOW::1')
@mock.patch('mooseutils.colorText', side_effect=lambda t, c, **kwargs: '{}::{}'.format(c, t))
def testGetStatusText(self, color_text):
r = SQAReport()
txt = r._getStatusText(SQAReport.Status.PASS)
self.assertEqual(txt, 'LIGHT_GREEN::OK')
txt = r._getStatusText(SQAReport.Status.WARNING)
self.assertEqual(txt, 'LIGHT_YELLOW::WARNING')
txt = r._getStatusText(SQAReport.Status.ERROR)
self.assertEqual(txt, 'LIGHT_RED::FAIL')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/moosesqa/test/test_SQAReport.py
|
Python
|
lgpl-2.1
| 4,138
|
[
"MOOSE"
] |
00113b7dd02139ac9dec3776f26c359aecae168c292bc55f9f620c23cd7d4df8
|
"""
This script compares Amber energies from GMIN binding and two different ways via OpenMM.
GMIN Input files are coords.inpcrd, coords.prmtop and min.in. From Fortran code the energy is -21.7345926639 kcal/mol
One of the OpenMM calculation uses coords.inpcrd for coordinates and coords.prmtop for ff params.
The other OpenMM calc uses coords.pdb for coordinates and picks Amber ff params from OpenMM's own implementation.
Strangely the second calculation is in better agreement with GMIN energy!
Amber system class in not used here. So this script would be a good starting point to understand how OpenMM and GMIN function calls work.
"""
import ambgmin_ as GMIN
import pygmin.potentials.gminpotential as gminpot
# OpenMM
from simtk.openmm.app import AmberPrmtopFile, AmberInpcrdFile, Simulation
from simtk.openmm.app import pdbfile as openmmpdb
from simtk.openmm import *
from simtk.unit import picosecond
import simtk.openmm.app.forcefield as openmmff
#from sys import stdout
# energy from GMIN
GMIN.initialize() # reads coords.inpcrd and coords.prmtop
pot = gminpot.GMINPotential(GMIN)
coords = pot.getCoords()
enerGmin = pot.getEnergy(coords)*4.184
# ----- OpenMM
# setup using inpcrd and prmtop
prmtop = AmberPrmtopFile('coords.prmtop')
inpcrd = AmberInpcrdFile('coords.inpcrd')
system1 = prmtop.createSystem(nonbondedMethod=openmmff.NoCutoff )
integrator1 = VerletIntegrator(0.001*picosecond)
simulation1 = Simulation(prmtop.topology, system1, integrator1)
simulation1.context.setPositions(inpcrd.positions)
# get energy
ener1 = simulation1.context.getState(getEnergy=True).getPotentialEnergy()
# setup using pdb and built-in amber ff
pdb = openmmpdb.PDBFile('coords.pdb')
forcefield = openmmff.ForceField('amber99sb.xml', 'tip3p.xml')
system2 = forcefield.createSystem(pdb.topology, nonbondedMethod=openmmff.NoCutoff)
integrator2 = VerletIntegrator(0.001*picosecond)
simulation2 = Simulation(pdb.topology, system2, integrator2)
simulation2.context.setPositions(pdb.positions)
# get energy
ener2 = simulation2.context.getState(getEnergy=True).getPotentialEnergy()
# print all energies
print "Energies (kJ/mol)"
print "AMBGMIN OpenMM inpcrd/prmtop OpenMM pdb/amb99sb "
print "-------------------------------------------------------- "
print enerGmin , ener1, ener2
|
js850/PyGMIN
|
examples/amber/gmin_vs_openmm.py
|
Python
|
gpl-3.0
| 2,319
|
[
"Amber",
"OpenMM"
] |
6b4518588797e6cc7014ab4ba53bfdd42d88ee88d987eaba6aaeba0daf92cdf8
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Example of connecting with exPASy and parsing SwissProt records."""
# biopython
from __future__ import print_function
from Bio import ExPASy, SwissProt
# 'O23729', 'O23730', 'O23731', Chalcone synthases from Orchid
ids = ['O23729', 'O23730', 'O23731']
for id in ids:
handle = ExPASy.get_sprot_raw(id)
record = SwissProt.read(handle)
print("description: %s" % record.description)
for ref in record.references:
print("authors: %s" % ref.authors)
print("title: %s" % ref.title)
print("classification: %s" % record.organism_classification)
print("")
|
updownlife/multipleK
|
dependencies/biopython-1.65/Doc/examples/swissprot.py
|
Python
|
gpl-2.0
| 766
|
[
"Biopython"
] |
9b4c0b42c196e79467a7c11f368947c05559040ab25adbc86d89dffff747362f
|
"""
This module adapted ANUGA
https://anuga.anu.edu.au/
"""
#FIXME: Ensure that all attributes of a georef are treated everywhere
#and unit test
import types, sys
import copy
import numpy as num
DEFAULT_ZONE = -1
TITLE = '#geo reference' + "\n" # this title is referred to in the test format
DEFAULT_PROJECTION = 'UTM'
DEFAULT_DATUM = 'wgs84'
DEFAULT_UNITS = 'm'
DEFAULT_FALSE_EASTING = 500000
DEFAULT_FALSE_NORTHING = 10000000 # Default for southern hemisphere
##
# @brief A class for ...
class Geo_reference:
"""
Attributes of the Geo_reference class:
.zone The UTM zone (default is -1)
.false_easting ??
.false_northing ??
.datum The Datum used (default is wgs84)
.projection The projection used (default is 'UTM')
.units The units of measure used (default metres)
.xllcorner The X coord of origin (default is 0.0 wrt UTM grid)
.yllcorner The y coord of origin (default is 0.0 wrt UTM grid)
.is_absolute ??
"""
##
# @brief Instantiate an instance of class Geo_reference.
# @param zone The UTM zone.
# @param xllcorner X coord of origin of georef.
# @param yllcorner Y coord of origin of georef.
# @param datum ??
# @param projection The projection used (default UTM).
# @param units Units used in measuring distance (default m).
# @param false_easting ??
# @param false_northing ??
# @param NetCDFObject NetCDF file *handle* to write to.
# @param ASCIIFile ASCII text file *handle* to write to.
# @param read_title Title of the georeference text.
def __init__(self,
zone=DEFAULT_ZONE,
xllcorner=0.0,
yllcorner=0.0,
datum=DEFAULT_DATUM,
projection=DEFAULT_PROJECTION,
units=DEFAULT_UNITS,
false_easting=DEFAULT_FALSE_EASTING,
false_northing=DEFAULT_FALSE_NORTHING,
NetCDFObject=None,
ASCIIFile=None,
read_title=None):
"""
input:
NetCDFObject - a handle to the netCDF file to be written to
ASCIIFile - a handle to the text file
read_title - the title of the georeference text, if it was read in.
If the function that calls this has already read the title line,
it can't unread it, so this info has to be passed.
If you know of a way to unread this info, then tell us.
Note, the text file only saves a sub set of the info the
points file does. Currently the info not written in text
must be the default info, since ANUGA assumes it isn't
changing.
"""
if zone is None:
zone = DEFAULT_ZONE
self.false_easting = int(false_easting)
self.false_northing = int(false_northing)
self.datum = datum
self.projection = projection
self.zone = int(zone)
self.units = units
self.xllcorner = float(xllcorner)
self.yllcorner = float(yllcorner)
if NetCDFObject is not None:
self.read_NetCDF(NetCDFObject)
if ASCIIFile is not None:
self.read_ASCII(ASCIIFile, read_title=read_title)
# Set flag for absolute points (used by get_absolute)
self.absolute = num.allclose([self.xllcorner, self.yllcorner], 0)
def get_xllcorner(self):
return self.xllcorner
##
# @brief Get the Y coordinate of the origin of this georef.
def get_yllcorner(self):
return self.yllcorner
##
# @brief Get the zone of this georef.
def get_zone(self):
return self.zone
##
# @brief Write <something> to an open NetCDF file.
# @param outfile Handle to open NetCDF file.
def write_NetCDF(self, outfile):
outfile.xllcorner = self.xllcorner
outfile.yllcorner = self.yllcorner
outfile.zone = self.zone
outfile.false_easting = self.false_easting
outfile.false_northing = self.false_northing
outfile.datum = self.datum
outfile.projection = self.projection
outfile.units = self.units
##
# @brief Read data from an open NetCDF file.
# @param infile Handle to open NetCDF file.
def read_NetCDF(self, infile):
self.xllcorner = float(infile.xllcorner[0])
self.yllcorner = float(infile.yllcorner[0])
self.zone = int(infile.zone[0])
try:
self.false_easting = int(infile.false_easting[0])
self.false_northing = int(infile.false_northing[0])
self.datum = infile.datum
self.projection = infile.projection
self.units = infile.units
except:
pass
if self.false_easting != DEFAULT_FALSE_EASTING:
print "WARNING: False easting of %f specified." % self.false_easting
print "Default false easting is %f." % DEFAULT_FALSE_EASTING
print "ANUGA does not correct for differences in False Eastings."
if self.false_northing != DEFAULT_FALSE_NORTHING:
print ("WARNING: False northing of %f specified."
% self.false_northing)
print "Default false northing is %f." % DEFAULT_FALSE_NORTHING
print "ANUGA does not correct for differences in False Northings."
if self.datum.upper() != DEFAULT_DATUM.upper():
print "WARNING: Datum of %s specified." % self.datum
print "Default Datum is %s." % DEFAULT_DATUM
print "ANUGA does not correct for differences in datums."
if self.projection.upper() != DEFAULT_PROJECTION.upper():
print "WARNING: Projection of %s specified." % self.projection
print "Default Projection is %s." % DEFAULT_PROJECTION
print "ANUGA does not correct for differences in Projection."
if self.units.upper() != DEFAULT_UNITS.upper():
print "WARNING: Units of %s specified." % self.units
print "Default units is %s." % DEFAULT_UNITS
print "ANUGA does not correct for differences in units."
################################################################################
# ASCII files with geo-refs are currently not used
################################################################################
##
# @brief Write georef data to an open text file.
# @param fd Handle to open text file.
def write_ASCII(self, fd):
fd.write(TITLE)
fd.write(str(self.zone) + "\n")
fd.write(str(self.xllcorner) + "\n")
fd.write(str(self.yllcorner) + "\n")
##
# @brief Read georef data from an open text file.
# @param fd Handle to open text file.
def read_ASCII(self, fd, read_title=None):
try:
if read_title == None:
read_title = fd.readline() # remove the title line
if read_title[0:2].upper() != TITLE[0:2].upper():
msg = ('File error. Expecting line: %s. Got this line: %s'
% (TITLE, read_title))
raise TitleError, msg
self.zone = int(fd.readline())
self.xllcorner = float(fd.readline())
self.yllcorner = float(fd.readline())
except SyntaxError:
msg = 'File error. Got syntax error while parsing geo reference'
raise ParsingError, msg
# Fix some assertion failures
if isinstance(self.zone, num.ndarray) and self.zone.shape == ():
self.zone = self.zone[0]
if (isinstance(self.xllcorner, num.ndarray) and
self.xllcorner.shape == ()):
self.xllcorner = self.xllcorner[0]
if (isinstance(self.yllcorner, num.ndarray) and
self.yllcorner.shape == ()):
self.yllcorner = self.yllcorner[0]
assert (type(self.xllcorner) == types.FloatType)
assert (type(self.yllcorner) == types.FloatType)
assert (type(self.zone) == types.IntType)
################################################################################
##
# @brief Change points to be absolute wrt new georef 'points_geo_ref'.
# @param points The points to change.
# @param points_geo_ref The new georef to make points absolute wrt.
# @return The changed points.
# @note If 'points' is a list then a changed list is returned.
def change_points_geo_ref(self, points, points_geo_ref=None):
"""Change the geo reference of a list or numeric array of points to
be this reference.(The reference used for this object)
If the points do not have a geo ref, assume 'absolute' values
"""
import copy
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
# sanity checks
if len(points.shape) == 1:
#One point has been passed
msg = 'Single point must have two elements'
assert len(points) == 2, msg
points = num.reshape(points, (1,2))
msg = 'Points array must be two dimensional.\n'
msg += 'I got %d dimensions' %len(points.shape)
assert len(points.shape) == 2, msg
msg = 'Input must be an N x 2 array or list of (x,y) values. '
msg += 'I got an %d x %d array' %points.shape
assert points.shape[1] == 2, msg
# FIXME (Ole): Could also check if zone, xllcorner, yllcorner
# are identical in the two geo refs.
if points_geo_ref is not self:
# If georeferences are different
points = copy.copy(points) # Don't destroy input
if not points_geo_ref is None:
# Convert points to absolute coordinates
points[:,0] += points_geo_ref.xllcorner
points[:,1] += points_geo_ref.yllcorner
# Make points relative to primary geo reference
points[:,0] -= self.xllcorner
points[:,1] -= self.yllcorner
if is_list:
points = points.tolist()
return points
def is_absolute(self):
"""Return True if xllcorner==yllcorner==0 indicating that points
in question are absolute.
"""
# FIXME(Ole): It is unfortunate that decision about whether points
# are absolute or not lies with the georeference object. Ross pointed this out.
# Moreover, this little function is responsible for a large fraction of the time
# using in data fitting (something in like 40 - 50%.
# This was due to the repeated calls to allclose.
# With the flag method fitting is much faster (18 Mar 2009).
# FIXME(Ole): HACK to be able to reuse data already cached (18 Mar 2009).
# Remove at some point
if not hasattr(self, 'absolute'):
self.absolute = num.allclose([self.xllcorner, self.yllcorner], 0)
# Return absolute flag
return self.absolute
def get_absolute(self, points):
"""Given a set of points geo referenced to this instance,
return the points as absolute values.
"""
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
if len(points.shape) == 1:
# One point has been passed
msg = 'Single point must have two elements'
if not len(points) == 2:
raise ShapeError, msg
msg = 'Input must be an N x 2 array or list of (x,y) values. '
msg += 'I got an %d x %d array' %points.shape
if not points.shape[1] == 2:
raise ShapeError, msg
# Add geo ref to points
if not self.is_absolute():
points = copy.copy(points) # Don't destroy input
points[:,0] += self.xllcorner
points[:,1] += self.yllcorner
if is_list:
points = points.tolist()
return points
##
# @brief Convert points to relative measurement.
# @param points Points to convert to relative measurements.
# @return A set of points relative to the geo_reference instance.
def get_relative(self, points):
"""Given a set of points in absolute UTM coordinates,
make them relative to this geo_reference instance,
return the points as relative values.
This is the inverse of get_absolute.
"""
# remember if we got a list
is_list = isinstance(points, list)
points = ensure_numeric(points, num.float)
if len(points.shape) == 1:
#One point has been passed
msg = 'Single point must have two elements'
if not len(points) == 2:
raise ShapeError, msg
if not points.shape[1] == 2:
msg = ('Input must be an N x 2 array or list of (x,y) values. '
'I got an %d x %d array' % points.shape)
raise ShapeError, msg
# Subtract geo ref from points
if not self.is_absolute():
points = copy.copy(points) # Don't destroy input
points[:,0] -= self.xllcorner
points[:,1] -= self.yllcorner
if is_list:
points = points.tolist()
return points
##
# @brief ??
# @param other ??
def reconcile_zones(self, other):
if other is None:
other = Geo_reference()
if (self.zone == other.zone or
self.zone == DEFAULT_ZONE and
other.zone == DEFAULT_ZONE):
pass
elif self.zone == DEFAULT_ZONE:
self.zone = other.zone
elif other.zone == DEFAULT_ZONE:
other.zone = self.zone
else:
msg = ('Geospatial data must be in the same '
'ZONE to allow reconciliation. I got zone %d and %d'
% (self.zone, other.zone))
raise ANUGAError, msg
#def easting_northing2geo_reffed_point(self, x, y):
# return [x-self.xllcorner, y - self.xllcorner]
#def easting_northing2geo_reffed_points(self, x, y):
# return [x-self.xllcorner, y - self.xllcorner]
##
# @brief Get origin of this geo_reference.
# @return (zone, xllcorner, yllcorner).
def get_origin(self):
return (self.zone, self.xllcorner, self.yllcorner)
##
# @brief Get a string representation of this geo_reference instance.
def __repr__(self):
return ('(zone=%i easting=%f, northing=%f)'
% (self.zone, self.xllcorner, self.yllcorner))
##
# @brief Compare two geo_reference instances.
# @param self This geo_reference instance.
# @param other Another geo_reference instance to compare against.
# @return 0 if instances have the same attributes, else 1.
# @note Attributes are: zone, xllcorner, yllcorner.
def __cmp__(self, other):
# FIXME (DSG) add a tolerence
if other is None:
return 1
cmp = 0
if not (self.xllcorner == self.xllcorner):
cmp = 1
if not (self.yllcorner == self.yllcorner):
cmp = 1
if not (self.zone == self.zone):
cmp = 1
return cmp
##
# @brief Write a geo_reference to a NetCDF file (usually SWW).
# @param origin A georef instance or parameters to create a georef instance.
# @param outfile Path to file to write.
# @return A normalized geo_reference.
def write_NetCDF_georeference(origin, outfile):
"""Write georeference info to a netcdf file, usually sww.
The origin can be a georef instance or parameters for a geo_ref instance
outfile is the name of the file to be written to.
"""
geo_ref = ensure_geo_reference(origin)
geo_ref.write_NetCDF(outfile)
return geo_ref
##
# @brief Convert an object to a georeference instance.
# @param origin A georef instance or (zone, xllcorner, yllcorner)
# @return A georef object, or None if 'origin' was None.
def ensure_geo_reference(origin):
"""
Given a list/tuple of zone, xllcorner and yllcorner of a geo-ref object,
return a geo ref object.
If the origin is None, return None, so calling this function doesn't
effect code logic
"""
if isinstance(origin, Geo_reference):
geo_ref = origin
elif origin is None:
geo_ref = None
else:
geo_ref = apply(Geo_reference, origin)
return geo_ref
#-----------------------------------------------------------------------
if __name__ == "__main__":
pass
|
bugobliterator/MAVProxy
|
MAVProxy/modules/lib/ANUGA/geo_reference.py
|
Python
|
gpl-3.0
| 16,841
|
[
"NetCDF"
] |
7496fc55ce60765e7fb3a4f90fed04147852772eaca4ad923a13fa7e30e264af
|
# Copyright 2017 Max Planck Society
# Distributed under the BSD-3 Software license,
# (See accompanying file ./LICENSE.txt or copy at
# https://opensource.org/licenses/BSD-3-Clause)
"""Training AdaGAN on various datasets.
Refer to the arXiv paper 'AdaGAN: Boosting Generative Models'
Coded by Ilya Tolstikhin, Carl-Johann Simon-Gabriel
"""
import os
import argparse
import logging
import tensorflow as tf
import numpy as np
from datahandler import DataHandler
from adagan import AdaGan
from metrics import Metrics
import utils
flags = tf.app.flags
flags.DEFINE_float("g_learning_rate", 0.0002,
"Learning rate for Generator optimizers [16e-4]")
flags.DEFINE_float("d_learning_rate", 0.0001,
"Learning rate for Discriminator optimizers [4e-4]")
flags.DEFINE_float("learning_rate", 0.003,
"Learning rate for other optimizers [8e-4]")
flags.DEFINE_float("adam_beta1", 0.5, "Beta1 parameter for Adam optimizer [0.5]")
flags.DEFINE_integer("zdim", 50, "Dimensionality of the latent space [100]")
flags.DEFINE_float("init_std", 0.01, "Initial variance for weights [0.02]")
flags.DEFINE_string("workdir", 'results_cifar10_pot_conv', "Working directory ['results']")
flags.DEFINE_bool("unrolled", False, "Use unrolled GAN training [True]")
flags.DEFINE_bool("vae", False, "Use VAE instead of GAN")
flags.DEFINE_bool("pot", True, "Use POT instead of GAN")
flags.DEFINE_float("pot_lambda", 1., "POT regularization")
flags.DEFINE_bool("is_bagging", False, "Do we want to use bagging instead of adagan? [False]")
FLAGS = flags.FLAGS
def main():
opts = {}
# Utility
opts['random_seed'] = 66
opts['dataset'] = 'cifar10' # gmm, circle_gmm, mnist, mnist3 ...
opts['data_dir'] = 'cifar10'
opts['trained_model_path'] = None #'models'
opts['mnist_trained_model_file'] = None #'mnist_trainSteps_19999_yhat' # 'mnist_trainSteps_20000'
opts['work_dir'] = FLAGS.workdir
opts['ckpt_dir'] = 'checkpoints'
opts["verbose"] = 1
opts['tf_run_batch_size'] = 128
opts["early_stop"] = -1 # set -1 to run normally
opts["plot_every"] = 150
opts["save_every_epoch"] = 10
opts['gmm_max_val'] = 15.
# Datasets
opts['toy_dataset_size'] = 10000
opts['toy_dataset_dim'] = 2
opts['mnist3_dataset_size'] = 2 * 64 # 64 * 2500
opts['mnist3_to_channels'] = False # Hide 3 digits of MNIST to channels
opts['input_normalize_sym'] = False # Normalize data to [-1, 1]
opts['gmm_modes_num'] = 5
# AdaGAN parameters
opts['adagan_steps_total'] = 1
opts['samples_per_component'] = 1000
opts['is_bagging'] = FLAGS.is_bagging
opts['beta_heur'] = 'uniform' # uniform, constant
opts['weights_heur'] = 'theory_star' # theory_star, theory_dagger, topk
opts['beta_constant'] = 0.5
opts['topk_constant'] = 0.5
opts["mixture_c_epoch_num"] = 5
opts["eval_points_num"] = 25600
opts['digit_classification_threshold'] = 0.999
opts['inverse_metric'] = False # Use metric from the Unrolled GAN paper?
opts['inverse_num'] = 100 # Number of real points to inverse.
opts['objective'] = None
# Generative model parameters
opts["init_std"] = FLAGS.init_std
opts["init_bias"] = 0.0
opts['latent_space_distr'] = 'normal' # uniform, normal
opts['latent_space_dim'] = FLAGS.zdim
opts["gan_epoch_num"] = 200
opts['convolutions'] = True
opts['d_num_filters'] = 512
opts['d_num_layers'] = 4
opts['g_num_filters'] = 1024
opts['g_num_layers'] = 3
opts['e_is_random'] = False
opts['e_num_filters'] = 1024
opts['e_num_layers'] = 3
opts['g_arch'] = 'dcgan_mod'
opts['g_stride1_deconv'] = False
opts['g_3x3_conv'] = 0
opts['e_arch'] = 'dcgan'
opts['e_3x3_conv'] = 0
opts['conv_filters_dim'] = 5
# --GAN specific:
opts['conditional'] = False
opts['unrolled'] = FLAGS.unrolled # Use Unrolled GAN? (only for images)
opts['unrolling_steps'] = 5 # Used only if unrolled = True
# --VAE specific
opts['vae'] = FLAGS.vae
opts['vae_sigma'] = 0.01
# --POT specific
opts['pot'] = FLAGS.pot
opts['pot_pz_std'] = 2.
opts['pot_lambda'] = FLAGS.pot_lambda
opts['adv_c_loss'] = 'none'
opts['vgg_layer'] = 'pool2'
opts['adv_c_patches_size'] = 5
opts['adv_c_num_units'] = 32
opts['adv_c_loss_w'] = 0.0
opts['cross_p_w'] = 0.0
opts['diag_p_w'] = 0.0
opts['emb_c_loss_w'] = 0.0
opts['reconstr_w'] = 1.0
opts['z_test'] = 'gan'
opts['z_test_corr_w'] = 0.1
opts['z_test_proj_dim'] = 50
# Optimizer parameters
opts['optimizer'] = 'adam' # sgd, adam
opts["batch_size"] = 100
opts["d_steps"] = 1
opts['d_new_minibatch'] = False
opts["g_steps"] = 2
opts['batch_norm'] = True
opts['dropout'] = True
opts['dropout_keep_prob'] = 0.5
opts['recon_loss'] = 'l2'
# "manual" or number (float or int) giving the number of epochs to divide
# the learning rate by 10 (converted into an exp decay per epoch).
opts['decay_schedule'] = 100
opts['opt_learning_rate'] = FLAGS.learning_rate
opts['opt_d_learning_rate'] = FLAGS.d_learning_rate
opts['opt_g_learning_rate'] = FLAGS.g_learning_rate
opts["opt_beta1"] = FLAGS.adam_beta1
opts['batch_norm_eps'] = 1e-05
opts['batch_norm_decay'] = 0.9
if opts['e_is_random']:
assert opts['latent_space_distr'] == 'normal',\
'Random encoders currently work only with Gaussian Pz'
# Data augmentation
opts['data_augm'] = False
if opts['verbose']:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
utils.create_dir(opts['work_dir'])
utils.create_dir(os.path.join(opts['work_dir'], opts['ckpt_dir']))
with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
text.write('Parameters:\n')
for key in opts:
text.write('%s : %s\n' % (key, opts[key]))
data = DataHandler(opts)
assert data.num_points >= opts['batch_size'], 'Training set too small'
adagan = AdaGan(opts, data)
metrics = Metrics()
train_size = data.num_points
random_idx = np.random.choice(train_size, 4*320, replace=False)
metrics.make_plots(opts, 0, data.data,
data.data[random_idx], adagan._data_weights, prefix='dataset_')
for step in range(opts["adagan_steps_total"]):
logging.info('Running step {} of AdaGAN'.format(step + 1))
adagan.make_step(opts, data)
num_fake = opts['eval_points_num']
logging.debug('Sampling fake points')
fake_points = adagan.sample_mixture(num_fake)
logging.debug('Sampling more fake points')
more_fake_points = adagan.sample_mixture(500)
logging.debug('Plotting results')
if opts['dataset'] == 'gmm':
metrics.make_plots(opts, step, data.data[:500],
fake_points[0:100], adagan._data_weights[:500])
logging.debug('Evaluating results')
(likelihood, C) = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
else:
metrics.make_plots(opts, step, data.data,
fake_points[:320], adagan._data_weights)
if opts['inverse_metric']:
logging.debug('Evaluating results')
l2 = np.min(adagan._invert_losses[:step + 1], axis=0)
logging.debug('MSE=%.5f, STD=%.5f' % (np.mean(l2), np.std(l2)))
res = metrics.evaluate(
opts, step, data.data[:500],
fake_points, more_fake_points, prefix='')
logging.debug("AdaGan finished working!")
if __name__ == '__main__':
main()
|
tolstikhin/adagan
|
adagan_cifar.py
|
Python
|
bsd-3-clause
| 7,802
|
[
"Gaussian"
] |
2b9d42dfad5041956a5803f0ade684e40a47103e21bb135d18666ae8dd35d504
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import os,unittest,numpy as np
from pyscf import gto, scf, tddft
from pyscf.data.nist import HARTREE2EV
from pyscf.nao import bse_iter
from pyscf.nao.m_polariz_inter_ave import polariz_freq_osc_strength
class KnowValues(unittest.TestCase):
def test_161_bse_h2b_spin1_uhf_cis(self):
""" This """
mol = gto.M(verbose=1,atom='B 0 0 0; H 0 0.489 1.074; H 0 0.489 -1.074',basis='cc-pvdz',spin=1)
gto_mf = scf.UHF(mol)
gto_mf.kernel()
gto_td = tddft.TDHF(gto_mf)
gto_td.nstates = 150
gto_td.kernel()
omegas = np.arange(0.0, 2.0, 0.01) + 1j*0.03
p_ave = -polariz_freq_osc_strength(gto_td.e, gto_td.oscillator_strength(), omegas).imag
data = np.array([omegas.real*HARTREE2EV, p_ave])
np.savetxt('test_0161_bse_h2b_spin1_uhf_cis_pyscf.txt', data.T, fmt=['%f','%f'])
#data_ref = np.loadtxt('test_0159_bse_h2b_uhf_cis_pyscf.txt-ref').T
#self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
nao_td = bse_iter(mf=gto_mf, gto=mol, verbosity=0, xc_code='CIS')
polariz = -nao_td.comp_polariz_inter_ave(omegas).imag
data = np.array([omegas.real*HARTREE2EV, polariz])
np.savetxt('test_0161_bse_h2b_spin1_uhf_cis_nao.txt', data.T, fmt=['%f','%f'])
#data_ref = np.loadtxt('test_0161_bse_h2b_spin1_uhf_cis_nao.txt-ref').T
#self.assertTrue(np.allclose(data_ref, data, atol=1e-6, rtol=1e-3))
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0161_bse_h2b_spin1_uhf_cis.py
|
Python
|
apache-2.0
| 2,081
|
[
"PySCF"
] |
79be5b7cf75f24c63c6bac8a6cb4d2916f536ece45a25fae77bfed5c02a5cc31
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
m = 2000
n = 1000
display = True
worldRank = El.mpi.WorldRank()
worldSize = El.mpi.WorldSize()
def Rectang(height,width):
A = El.DistMatrix()
El.Uniform( A, height, width )
return A
A = Rectang(m,n)
b = El.DistMatrix()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.LPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
startCP = El.mpi.Time()
x = El.CP( A, b, ctrl )
endCP = El.mpi.Time()
if worldRank == 0:
print "CP time:", endCP-startCP, "seconds"
if display:
El.Display( x, "x" )
bTwoNorm = El.Nrm2( b )
bInfNorm = El.MaxNorm( b )
r = El.DistMatrix()
El.Copy( b, r )
El.Gemv( El.NORMAL, -1., A, x, 1., r )
if display:
El.Display( r, "r" )
rTwoNorm = El.Nrm2( r )
rInfNorm = El.MaxNorm( r )
if worldRank == 0:
print "|| b ||_2 =", bTwoNorm
print "|| b ||_oo =", bInfNorm
print "|| A x - b ||_2 =", rTwoNorm
print "|| A x - b ||_oo =", rInfNorm
startLS = El.mpi.Time()
xLS = El.LeastSquares(A,b)
endLS = El.mpi.Time()
if worldRank == 0:
print "LS time:", endLS-startLS, "seconds"
if display:
El.Display( xLS, "x_{LS}" )
rLS = El.DistMatrix()
El.Copy( b, rLS )
El.Gemv( El.NORMAL, -1., A, xLS, 1., rLS )
if display:
El.Display( rLS, "A x_{LS} - b" )
rLSTwoNorm = El.Nrm2(rLS)
rLSInfNorm = El.MaxNorm(rLS)
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", rLSTwoNorm
print "|| A x_{LS} - b ||_oo =", rLSInfNorm
# Require the user to press a button before the figures are closed
El.Finalize()
if worldSize == 1:
raw_input('Press Enter to exit')
|
birm/Elemental
|
examples/interface/CPDense.py
|
Python
|
bsd-3-clause
| 1,815
|
[
"Gaussian"
] |
ea39d4f2be566c4d51d02b20d88f8026081f5f5f223fa59a1225b63096d7ab17
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pprint
import numpy as np
from ..util import distance_matrix, update_with_error, unnp
from ..exceptions import *
from ..physconst import psi_bohr2angstroms
from .chgmult import validate_and_fill_chgmult
from .nucleus import reconcile_nucleus
try:
long(1)
except NameError:
long = int
def from_input_arrays(
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='error',
missing_enabled_return_efp='error',
# qm
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
# efp
fragment_files=None,
hint_types=None,
geom_hints=None,
# qm-vz
geom_unsettled=None,
variables=None,
# processing details
speclabel=True,
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1):
molinit = {}
if enable_qm:
molinit['qm'] = {}
if enable_efp:
molinit['efp'] = {}
if enable_efp:
processed = from_arrays(
domain='efp',
missing_enabled_return=missing_enabled_return_efp,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_files=fragment_files,
hint_types=hint_types,
geom_hints=geom_hints,
# which other processing details needed?
verbose=verbose)
update_with_error(molinit, {'efp': processed})
if molinit['efp'] == {}:
del molinit['efp']
efp_present = enable_efp and 'efp' in molinit and bool(len(molinit['efp']['geom_hints']))
if efp_present:
fix_com = True
fix_orientation = True
fix_symmetry = 'c1'
if enable_qm:
dm = 'qmvz' if geom_unsettled else 'qm'
processed = from_arrays(
domain=dm,
missing_enabled_return=missing_enabled_return_qm,
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
geom_unsettled=geom_unsettled,
variables=variables,
# processing details
speclabel=speclabel,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=1)
update_with_error(molinit, {'qm': processed})
if molinit['qm'] == {}:
del molinit['qm']
return molinit
def from_arrays(geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
fragment_files=None,
hint_types=None,
geom_hints=None,
geom_unsettled=None,
variables=None,
domain='qm',
missing_enabled_return='error',
np_out=True,
speclabel=True,
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1):
"""Compose a Molecule dict from unvalidated arrays and variables, returning dict.
minimum is geom and one of elem, elez, elbl
Parameters
----------
See fields of return molrec below. Required parameters are `geom` and one of `elem`, `elez`, `elbl` (`speclabel=True`)
geom : array-like
(nat, 3) or (3 * nat, ) ndarray or list o'lists of Cartesian coordinates.
fragment_separators : array-like of int, optional
(nfr - 1, ) list of atom indices at which to split `geom` into fragments.
elbl : ndarray of str
(nat, ) Label extending `elem` symbol, possibly conveying ghosting, isotope, mass, tagging information.
tooclose : float, optional
Interatom distance (native `geom` units) nearer than which atoms not allowed.
nonphysical : bool, optional
speclabel : bool, optional
If `True`, interpret `elbl` as potentially full nucleus spec including
ghosting, isotope, mass, tagging information, e.g., `@13C_mine` or
`He4@4.01`. If `False`, interpret `elbl` as only the user/tagging
extension to nucleus label, e.g. `_mine` or `4` in the previous examples.
missing_enabled_return : {'minimal', 'none', 'error'}
What to do when an enabled domain is of zero-length? Respectively, return
a fully valid but empty molrec, return empty dictionary, or throw error.
np_out : bool, optional
When `True`, fields geom, elea, elez, elem, mass, real, elbl will be ndarray.
Use `False` to get a json-able version.
Returns
-------
molrec : dict
Molecule dictionary spec follows. Its principles are (1)
contents are fully validated and defaulted - no error checking
necessary, (2) contents may be mildly redundant - atomic
numbers and element symbols present, (3) big system,
nat-length single-type arrays, not small system, nat-number
heterogeneous objects, (4) some fields are optional (e.g.,
symmetry) but largely self-describing so units or fix_com must
be present.
(5) apart from some mild optional fields, _all_ fields will
be present (correlary of "fully validated and defaulted") - no
need to check for every key. in some cases like efp, keys will
appear in blocks, so pre-handshake there will be a few hint keys
and post-handshake they will be joined by full qm-like molrec.
(6) molrec should be idempotent through this function (equiv to
schema validator) but are not idempostent throughout its life. if
fields permit, frame may be changed. Future? if fields permit,
mol may be symmetrized. Coordinates and angles may change units
or range if program returns them in only one form.
name : str, optional
Label for molecule; should be valid Python identifier.
units : {'Angstrom', 'Bohr'}
Units for `geom`.
input_units_to_au : float, optional
If `units='Angstrom'`, overrides consumer's value for [A]-->[a0] conversion.
fix_com : bool
Whether translation of `geom` is allowed or disallowed.
fix_orientation : bool
Whether rotation of `geom` is allowed or disallowed.
fix_symmetry : str, optional
Maximal point group symmetry which `geom` should be treated. Lowercase.
geom : ndarray of float
(3 * nat, ) Cartesian coordinates in `units`.
elea : ndarray of int
(nat, ) Mass number for atoms, if known isotope, else -1.
elez : ndarray of int
(nat, ) Number of protons, nuclear charge for atoms.
elem : ndarray of str
(nat, ) Element symbol for atoms.
mass : ndarray of float
(nat, ) Atomic mass [u] for atoms.
real : ndarray of bool
(nat, ) Real/ghostedness for atoms.
elbl : ndarray of str
(nat, ) Label with any tagging information from element spec.
fragment_separators : list of int
(nfr - 1, ) list of atom indices at which to split `geom` into fragments.
fragment_charges : list of float
(nfr, ) list of charge allocated to each fragment.
fragment_multiplicities : list of int
(nfr, ) list of multiplicity allocated to each fragment.
molecular_charge : float
total charge on system.
molecular_multiplicity : int
total multiplicity on system.
EFP extension (this + units is minimal)
fragment_files : list of str
(nfr, ) lowercased names of efp meat fragment files.
hint_types : {'xyzabc', 'points'}
(nfr, ) type of fragment orientation hint.
geom_hints : list of lists of float
(nfr, ) inner lists have length 6 (xyzabc; to orient the center) or
9 (points; to orient the first three atoms) of the EFP fragment.
QMVZ extension (geom_unsettled replaces geom)
geom_unsettled : list of lists of str
(nat, ) all-string Cartesian and/or zmat anchor and value contents
mixing anchors, values, and variables.
variables : list of pairs
(nvar, 2) pairs of variables (str) and values (float). May be incomplete.
"""
# << domain sorting >>
available_domains = ['qm', 'efp', 'qmvz']
if domain not in available_domains:
raise ValidationError(
'Topology domain {} not available for processing. Choose among {}'.format(domain, available_domains))
if domain == 'qm' and geom is None or geom == []:
if missing_enabled_return == 'none':
return {}
elif missing_enabled_return == 'minimal':
geom = []
else:
raise ValidationError("""For domain 'qm', `geom` must be provided.""")
if domain == 'efp' and geom_hints is None or geom_hints == []:
if missing_enabled_return == 'none':
return {}
elif missing_enabled_return == 'minimal':
geom_hints = []
fragment_files = []
hint_types = []
else:
raise ValidationError("""For domain 'efp', `geom_hints` must be provided.""")
molinit = {}
extern = False
processed = validate_and_fill_units(
name=name,
units=units,
input_units_to_au=input_units_to_au,
always_return_iutau=False)
update_with_error(molinit, processed)
if domain == 'efp':
processed = validate_and_fill_efp(
fragment_files=fragment_files,
hint_types=hint_types,
geom_hints=geom_hints)
update_with_error(molinit, processed)
extern = bool(len(molinit['geom_hints']))
if domain == 'qm' or (domain == 'efp' and geom is not None) or domain == 'qmvz':
if domain == 'qmvz':
processed = validate_and_fill_unsettled_geometry(
geom_unsettled=geom_unsettled,
variables=variables)
update_with_error(molinit, processed)
nat = len(molinit['geom_unsettled'])
else:
processed = validate_and_fill_geometry(
geom=geom,
tooclose=tooclose)
update_with_error(molinit, processed)
nat = molinit['geom'].shape[0] // 3
processed = validate_and_fill_nuclei(
nat,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
speclabel=speclabel,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
update_with_error(molinit, processed)
processed = validate_and_fill_fragments(
nat,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities)
update_with_error(molinit, processed)
Z_available = molinit['elez'] * molinit['real'] * 1.
processed = validate_and_fill_chgmult(
zeff=Z_available,
fragment_separators=molinit['fragment_separators'],
molecular_charge=molecular_charge,
fragment_charges=molinit['fragment_charges'],
molecular_multiplicity=molecular_multiplicity,
fragment_multiplicities=molinit['fragment_multiplicities'],
zero_ghost_fragments=zero_ghost_fragments,
verbose=verbose)
del molinit['fragment_charges'] # sometimes safe update is too picky about overwriting v_a_f_fragments values
del molinit['fragment_multiplicities']
update_with_error(molinit, processed)
extern = (domain == 'efp')
processed = validate_and_fill_frame(
extern=extern,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry)
update_with_error(molinit, processed)
if verbose >= 2:
print('RETURN FROM qcdb.molparse.from_arrays(domain={})'.format(domain.upper()))
pprint.pprint(molinit)
if not np_out:
molinit = unnp(molinit)
return molinit
def validate_and_fill_units(name=None, units='Angstrom', input_units_to_au=None, always_return_iutau=False):
molinit = {}
if name is not None:
molinit['name'] = name
if units.capitalize() in ['Angstrom', 'Bohr']:
molinit['units'] = units.capitalize()
else:
raise ValidationError('Invalid molecule geometry units: {}'.format(units))
if molinit['units'] == 'Bohr':
iutau = 1.
elif molinit['units'] == 'Angstrom':
iutau = 1. / psi_bohr2angstroms
if input_units_to_au is not None:
if abs(input_units_to_au - iutau) < 0.05:
iutau = input_units_to_au
else:
raise ValidationError(
"""No big perturbations to physical constants! {} !~= {}""".format(iutau, input_units_to_au))
if always_return_iutau or input_units_to_au is not None:
molinit['input_units_to_au'] = iutau
return molinit
def validate_and_fill_frame(extern, fix_com=None, fix_orientation=None, fix_symmetry=None):
if fix_com is True:
com = True
elif fix_com is False:
if extern:
raise ValidationError('Invalid fix_com ({}) with extern ({})'.format(fix_com, extern))
else:
com = False
elif fix_com is None:
com = extern
else:
raise ValidationError('Invalid fix_com: {}'.format(fix_com))
if fix_orientation is True:
orient = True
elif fix_orientation is False:
if extern:
raise ValidationError('Invalid fix_orientation ({}) with extern ({})'.format(fix_orientation, extern))
else:
orient = False
elif fix_orientation is None:
orient = extern
else:
raise ValidationError('Invalid fix_orientation: {}'.format(fix_orientation))
symm = None
if extern:
if fix_symmetry is None:
symm = 'c1'
elif fix_symmetry.lower() == 'c1':
symm = 'c1'
else:
raise ValidationError('Invalid (non-C1) fix_symmetry ({}) with extern ({})'.format(fix_symmetry, extern))
else:
if fix_symmetry is not None:
symm = fix_symmetry.lower()
molinit = {}
molinit['fix_com'] = com
molinit['fix_orientation'] = orient
if symm:
molinit['fix_symmetry'] = symm
return molinit
def validate_and_fill_efp(fragment_files=None, hint_types=None, geom_hints=None):
if (fragment_files is None or hint_types is None or geom_hints is None or fragment_files == [None]
or hint_types == [None] or geom_hints == [None]
or not (len(fragment_files) == len(hint_types) == len(geom_hints))):
raise ValidationError(
"""Missing or inconsistent length among efp quantities: fragment_files ({}), hint_types ({}), and geom_hints ({})""".
format(fragment_files, hint_types, geom_hints))
# NOTE: imposing case on file
try:
files = [f.lower() for f in fragment_files]
except AttributeError:
raise ValidationError("""fragment_files not strings: {}""".format(fragment_files))
if all(f in ['xyzabc', 'points', 'rotmat'] for f in hint_types):
types = hint_types
else:
raise ValidationError("""hint_types not among 'xyzabc', 'points', 'rotmat': {}""".format(hint_types))
hints = []
hlen = {'xyzabc': 6, 'points': 9, 'rotmat': 12}
for ifr, fr in enumerate(geom_hints):
try:
hint = [float(f) for f in fr]
except ValueError:
raise ValidationError("""Un float-able elements in geom_hints[{}]: {}""".format(ifr, fr))
htype = hint_types[ifr]
if len(hint) == hlen[htype]:
hints.append(hint)
else:
raise ValidationError("""EFP hint type {} not {} elements: {}""".format(htype, hlen[htype], hint))
return {'fragment_files': files, 'hint_types': types, 'geom_hints': hints}
def validate_and_fill_geometry(geom=None, tooclose=0.1):
"""Check `geom` for overlapping atoms. Return flattened"""
if geom is None:
raise ValidationError("""Geometry must be provided.""")
npgeom = np.array(geom, dtype=np.float).reshape((-1, 3))
dm = distance_matrix(npgeom, npgeom)
iu = np.triu_indices(dm.shape[0])
dm[iu] = 10.
tooclosem = np.where(dm < tooclose)
if tooclosem[0].shape[0]:
raise ValidationError(
"""Following atoms are too close: {}""".format([(i, j, dm[i, j]) for i, j in zip(*tooclosem)]))
return {'geom': npgeom.reshape((-1))}
def validate_and_fill_nuclei(
nat,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
# processing details
speclabel=True,
nonphysical=False,
mtol=1.e-3,
verbose=1):
"""Check the nuclear identity arrays for consistency and fill in knowable values."""
if elea is None:
elea = np.asarray([None] * nat)
else:
# -1 equivalent to None
elea = np.array([(None if at == -1 else at) for at in elea])
if elez is None:
elez = np.asarray([None] * nat)
else:
elez = np.array(elez)
if elem is None:
elem = np.asarray([None] * nat)
else:
elem = np.array(elem)
if mass is None:
mass = np.asarray([None] * nat)
else:
mass = np.array(mass)
if real is None:
real = np.asarray([None] * nat)
else:
real = np.array(real)
if elbl is None:
elbl = np.asarray([None] * nat)
else:
elbl = np.array(elbl)
if not ((nat, ) == elea.shape == elez.shape == elem.shape == mass.shape == real.shape == elbl.shape):
raise ValidationError(
"""Dimension mismatch ({}) among A ({}), Z ({}), E ({}), mass ({}), real ({}), and elbl({})""".format((
nat, ), elea.shape, elez.shape, elem.shape, mass.shape, real.shape, elbl.shape))
if nat:
A, Z, E, mass, real, label = zip(* [
reconcile_nucleus(
A=elea[at],
Z=elez[at],
E=elem[at],
mass=mass[at],
real=real[at],
label=elbl[at],
speclabel=speclabel,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose) for at in range(nat)
])
else:
A = Z = E = mass = real = label = []
return {
'elea': np.array(A, dtype=np.int),
'elez': np.array(Z, dtype=np.int),
'elem': np.array(E),
'mass': np.array(mass, dtype=np.float),
'real': np.array(real, dtype=np.bool),
'elbl': np.array(label)
}
def validate_and_fill_fragments(nat,
fragment_separators=None,
fragment_types=None,
fragment_charges=None,
fragment_multiplicities=None):
"""Check consistency of fragment specifiers wrt type and length. For
charge & multiplicity, scientific defaults are not computed or applied;
rather, missing slots are filled with `None` for later processing.
"""
if fragment_separators is None:
if fragment_types is None and fragment_charges is None and fragment_multiplicities is None:
frs = [] #np.array([], dtype=np.int) # if empty, needs to be both ndarray and int
frt = ['Real']
frc = [None]
frm = [None]
else:
raise ValidationError(
"""Fragment quantities given without separation info: sep ({}), types ({}), chg ({}), and mult ({})""".
format(fragment_separators, fragment_types, fragment_charges, fragment_multiplicities))
else:
trial_geom = np.zeros((nat, 3))
try:
split_geom = np.split(trial_geom, fragment_separators, axis=0)
except TypeError:
raise ValidationError("""fragment_separators ({}) unable to perform trial np.split on geometry.""".format(
fragment_separators))
if any(len(f) == 0 for f in split_geom):
if nat != 0:
raise ValidationError(
"""fragment_separators ({}) yields zero-length fragment(s) after trial np.split on geometry.""".
format(split_geom))
if sum(len(f) for f in split_geom) != nat:
raise ValidationError(
"""fragment_separators ({}) yields overlapping fragment(s) after trial np.split on geometry, possibly unsorted.""".
format(split_geom))
frs = fragment_separators
nfr = len(split_geom)
if fragment_types is None:
frt = ['Real'] * nfr
elif all(f in ['Real', 'Ghost', 'Absent'] for f in fragment_types):
frt = fragment_types
else:
raise ValidationError("""fragment_types not among 'Real', 'Ghost', 'Absent': {}""".format(fragment_types))
if fragment_charges is None:
frc = [None] * nfr
else:
try:
frc = [(f if f is None else float(f)) for f in fragment_charges]
except TypeError:
raise ValidationError("""fragment_charges not among None or float: {}""".format(fragment_charges))
if fragment_multiplicities is None:
frm = [None] * nfr
elif all(f is None or (isinstance(f, (int, np.int64, long)) and f >= 1) for f in fragment_multiplicities):
frm = fragment_multiplicities
else:
raise ValidationError(
"""fragment_multiplicities not among None or positive integer: {}""".format(fragment_multiplicities))
if not (len(frt) == len(frc) == len(frm) == len(frs) + 1):
raise ValidationError(
"""Dimension mismatch among fragment quantities: sep + 1 ({}), types ({}), chg ({}), and mult({})""".
format(len(frs) + 1, len(frt), len(frc), len(frm)))
return {'fragment_separators': list(frs), 'fragment_charges': frc, 'fragment_multiplicities': frm}
def validate_and_fill_unsettled_geometry(geom_unsettled, variables):
lgeom = [len(g) for g in geom_unsettled]
if lgeom[0] not in [0, 3]:
raise ValidationError("""First line must be Cartesian or single atom.""")
if any(l == 3 for l in lgeom) and not all((l in [3, 6]) for l in lgeom):
raise ValidationError(
"""Mixing Cartesian and Zmat formats must occur in just that order once absolute frame established.""")
for il in range(len(lgeom) - 1):
if (lgeom[il + 1] < lgeom[il]) and (lgeom[il + 1] != 3):
raise ValidationError("""This is not how a Zmat works - aim for lower triangular: {} < {}""".format(
lgeom[il + 1], lgeom[il]))
if not all(len(v) == 2 for v in variables):
raise ValidationError("""Variables should come in pairs: {}""".format(variables))
vvars = [[str(v[0]), float(v[1])] for v in variables]
return {'geom_unsettled': geom_unsettled, 'variables': vvars}
|
amjames/psi4
|
psi4/driver/qcdb/molparse/from_arrays.py
|
Python
|
lgpl-3.0
| 25,534
|
[
"Psi4"
] |
d6c1feb1232b0c275d06502e7e8e3634a8623408caecb0c69280e33aed81c05f
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
import itertools
import math
import numpy as np
from monty.json import MSONable
from . import coord_cython as cuc
# array size threshold for looping instead of broadcasting
LOOP_THRESHOLD = 1e6
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :], atol=atol), axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result, atol=atol):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Superset cannot contain duplicate matching rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
# pylint: disable=I1101
atol = np.array([1.0, 1.0, 1.0]) * atol
return cuc.coord_list_mapping_pbc(subset, superset, atol)
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
def pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask=None, return_d2=False):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
return_d2 (boolean): whether to also return the squared distances
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
# pylint: disable=I1101
return cuc.pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask, return_d2)
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0
def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):
"""
Tests if all fractional coords in subset are contained in superset.
Args:
subset, superset: List of fractional coords
atol (float or size 3 array): Tolerance for matching
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
Returns:
True if all of subset is in superset.
"""
# pylint: disable=I1101
c1 = np.array(subset, dtype=np.float64)
c2 = np.array(superset, dtype=np.float64)
if mask is not None:
m = np.array(mask, dtype=np.int_)
else:
m = np.zeros((len(subset), len(superset)), dtype=np.int_)
atol = np.zeros(3, dtype=np.float64) + atol
return cuc.is_coord_subset_pbc(c1, c2, atol, m)
def lattice_points_in_supercell(supercell_matrix):
"""
Returns the list of points on the original lattice contained in the
supercell in fractional coordinates (with the supercell basis).
e.g. [[2,0,0],[0,1,0],[0,0,1]] returns [[0,0,0],[0.5,0,0]]
Args:
supercell_matrix: 3x3 matrix describing the supercell
Returns:
numpy array of the fractional coordinates
"""
diagonals = np.array(
[
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
]
)
d_points = np.dot(diagonals, supercell_matrix)
mins = np.min(d_points, axis=0)
maxes = np.max(d_points, axis=0) + 1
ar = np.arange(mins[0], maxes[0])[:, None] * np.array([1, 0, 0])[None, :]
br = np.arange(mins[1], maxes[1])[:, None] * np.array([0, 1, 0])[None, :]
cr = np.arange(mins[2], maxes[2])[:, None] * np.array([0, 0, 1])[None, :]
all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :]
all_points = all_points.reshape((-1, 3))
frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix))
tvects = frac_points[np.all(frac_points < 1 - 1e-10, axis=1) & np.all(frac_points >= -1e-10, axis=1)]
assert len(tvects) == round(abs(np.linalg.det(supercell_matrix)))
return tvects
def barycentric_coords(coords, simplex):
"""
Converts a list of coordinates to barycentric coordinates, given a
simplex with d+1 points. Only works for d >= 2.
Args:
coords: list of n coords to transform, shape should be (n,d)
simplex: list of coordinates that form the simplex, shape should be
(d+1, d)
Returns:
a LIST of barycentric coordinates (even if the original input was 1d)
"""
coords = np.atleast_2d(coords)
t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None]
all_but_one = np.transpose(np.linalg.solve(t, np.transpose(coords - simplex[-1])))
last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None]
return np.append(all_but_one, last_coord, axis=-1)
def get_angle(v1, v2, units="degrees"):
"""
Calculates the angle between two vectors.
Args:
v1: Vector 1
v2: Vector 2
units: "degrees" or "radians". Defaults to "degrees".
Returns:
Angle between them in degrees.
"""
d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)
d = min(d, 1)
d = max(d, -1)
angle = math.acos(d)
if units == "degrees":
return math.degrees(angle)
if units == "radians":
return angle
raise ValueError(f"Invalid units {units}")
class Simplex(MSONable):
"""
A generalized simplex object. See http://en.wikipedia.org/wiki/Simplex.
.. attribute: space_dim
Dimension of the space. Usually, this is 1 more than the simplex_dim.
.. attribute: simplex_dim
Dimension of the simplex coordinate space.
"""
def __init__(self, coords):
"""
Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].
"""
self._coords = np.array(coords)
self.space_dim, self.simplex_dim = self._coords.shape
self.origin = self._coords[-1]
if self.space_dim == self.simplex_dim + 1:
# precompute augmented matrix for calculating bary_coords
self._aug = np.concatenate([coords, np.ones((self.space_dim, 1))], axis=-1)
self._aug_inv = np.linalg.inv(self._aug)
@property
def volume(self):
"""
Volume of the simplex.
"""
return abs(np.linalg.det(self._aug)) / math.factorial(self.simplex_dim)
def bary_coords(self, point):
"""
Args:
point (): Point coordinates.
Returns:
Barycentric coordinations.
"""
try:
return np.dot(np.concatenate([point, [1]]), self._aug_inv)
except AttributeError:
raise ValueError("Simplex is not full-dimensional")
def point_from_bary_coords(self, bary_coords):
"""
Args:
bary_coords (): Barycentric coordinates
Returns:
Point coordinates
"""
try:
return np.dot(bary_coords, self._aug[:, :-1])
except AttributeError:
raise ValueError("Simplex is not full-dimensional")
def in_simplex(self, point, tolerance=1e-8):
"""
Checks if a point is in the simplex using the standard barycentric
coordinate system algorithm.
Taking an arbitrary vertex as an origin, we compute the basis for the
simplex from this origin by subtracting all other vertices from the
origin. We then project the point into this coordinate system and
determine the linear decomposition coefficients in this coordinate
system. If the coeffs satisfy that all coeffs >= 0, the composition
is in the facet.
Args:
point ([float]): Point to test
tolerance (float): Tolerance to test if point is in simplex.
"""
return (self.bary_coords(point) >= -tolerance).all()
def line_intersection(self, point1, point2, tolerance=1e-8):
"""
Computes the intersection points of a line with a simplex
Args:
point1, point2 ([float]): Points that determine the line
Returns:
points where the line intersects the simplex (0, 1, or 2)
"""
b1 = self.bary_coords(point1)
b2 = self.bary_coords(point2)
l = b1 - b2
# don't use barycentric dimension where line is parallel to face
valid = np.abs(l) > 1e-10
# array of all the barycentric coordinates on the line where
# one of the values is 0
possible = b1 - (b1[valid] / l[valid])[:, None] * l
barys = []
for p in possible:
# it's only an intersection if its in the simplex
if (p >= -tolerance).all():
found = False
# don't return duplicate points
for b in barys:
if np.all(np.abs(b - p) < tolerance):
found = True
break
if not found:
barys.append(p)
assert len(barys) < 3
return [self.point_from_bary_coords(b) for b in barys]
def __eq__(self, other):
for p in itertools.permutations(self._coords):
if np.allclose(p, other.coords):
return True
return False
def __hash__(self):
return len(self._coords)
def __repr__(self):
output = [
f"{self.simplex_dim}-simplex in {self.space_dim}D space",
"Vertices:",
]
for coord in self._coords:
output.append("\t({})".format(", ".join(map(str, coord))))
return "\n".join(output)
def __str__(self):
return self.__repr__()
@property
def coords(self):
"""
Returns a copy of the vertex coordinates in the simplex.
"""
return self._coords.copy()
|
vorwerkc/pymatgen
|
pymatgen/util/coord.py
|
Python
|
mit
| 15,668
|
[
"pymatgen"
] |
a07eca6e9eb7fead688161827926e64de3d10f264906ecd4f500a50d2b13ee27
|
from math import sqrt, pi
import numpy as np
from gpaw.xc.gga import GGA
from gpaw.utilities.blas import axpy
from gpaw.fd_operators import Gradient
from gpaw.lfc import LFC
from gpaw.sphere.lebedev import weight_n
class MGGA(GGA):
orbital_dependent = True
def __init__(self, kernel, nn=1):
"""Meta GGA functional.
nn: int
Number of neighbor grid points to use for FD stencil for
wave function gradient.
"""
self.nn = nn
GGA.__init__(self, kernel)
def set_grid_descriptor(self, gd):
GGA.set_grid_descriptor(self, gd)
def get_setup_name(self):
return 'PBE'
def initialize(self, density, hamiltonian, wfs, occupations):
self.wfs = wfs
self.tauct = LFC(wfs.gd,
[[setup.tauct] for setup in wfs.setups],
forces=True, cut=True)
self.tauct_G = None
self.dedtaut_sG = None
self.restrict = hamiltonian.restrictor.apply
self.interpolate = density.interpolator.apply
self.taugrad_v = [Gradient(wfs.gd, v, n=self.nn, dtype=wfs.dtype).apply
for v in range(3)]
def set_positions(self, spos_ac):
self.tauct.set_positions(spos_ac)
if self.tauct_G is None:
self.tauct_G = self.wfs.gd.empty()
self.tauct_G[:] = 0.0
self.tauct.add(self.tauct_G)
def calculate_gga(self, e_g, nt_sg, v_sg, sigma_xg, dedsigma_xg):
taut_sG = self.wfs.calculate_kinetic_energy_density(self.taugrad_v)
taut_sg = np.empty_like(nt_sg)
for taut_G, taut_g in zip(taut_sG, taut_sg):
taut_G += 1.0 / self.wfs.nspins * self.tauct_G
self.interpolate(taut_G, taut_g)
dedtaut_sg = np.empty_like(nt_sg)
self.kernel.calculate(e_g, nt_sg, v_sg, sigma_xg, dedsigma_xg,
taut_sg, dedtaut_sg)
self.dedtaut_sG = self.wfs.gd.empty(self.wfs.nspins)
self.ekin = 0.0
for s in range(self.wfs.nspins):
self.restrict(dedtaut_sg[s], self.dedtaut_sG[s])
self.ekin -= self.wfs.gd.integrate(
self.dedtaut_sG[s] * (taut_sG[s] -
self.tauct_G / self.wfs.nspins))
def apply_orbital_dependent_hamiltonian(self, kpt, psit_xG,
Htpsit_xG, dH_asp):
a_G = self.wfs.gd.empty(dtype=psit_xG.dtype)
for psit_G, Htpsit_G in zip(psit_xG, Htpsit_xG):
for v in range(3):
self.taugrad_v[v](psit_G, a_G, kpt.phase_cd)
self.taugrad_v[v](self.dedtaut_sG[kpt.s] * a_G, a_G,
kpt.phase_cd)
axpy(-0.5, a_G, Htpsit_G)
def calculate_paw_correction(self, setup, D_sp, dEdD_sp=None,
addcoredensity=True, a=None):
assert not hasattr(self, 'D_sp')
self.D_sp = D_sp
self.n = 0
self.ae = True
self.c = setup.xc_correction
self.dEdD_sp = dEdD_sp
if self.c.tau_npg is None:
self.c.tau_npg, self.c.taut_npg = self.initialize_kinetic(self.c)
print 'TODO: tau_ypg is HUGE! There must be a better way.'
E = GGA.calculate_paw_correction(self, setup, D_sp, dEdD_sp,
addcoredensity, a)
del self.D_sp, self.n, self.ae, self.c, self.dEdD_sp
return E
def calculate_gga_radial(self, e_g, n_sg, v_sg, sigma_xg, dedsigma_xg):
nspins = len(n_sg)
if self.ae:
tau_pg = self.c.tau_npg[self.n]
tauc_g = self.c.tauc_g / (sqrt(4 * pi) * nspins)
sign = 1.0
else:
tau_pg = self.c.taut_npg[self.n]
tauc_g = self.c.tauct_g / (sqrt(4 * pi) * nspins)
sign = -1.0
tau_sg = np.dot(self.D_sp, tau_pg) + tauc_g
dedtau_sg = np.empty_like(tau_sg)
self.kernel.calculate(e_g, n_sg, v_sg, sigma_xg, dedsigma_xg,
tau_sg, dedtau_sg)
if self.dEdD_sp is not None:
self.dEdD_sp += (sign * weight_n[self.n] *
np.inner(dedtau_sg * self.c.rgd.dv_g, tau_pg))
self.n += 1
if self.n == len(weight_n):
self.n = 0
self.ae = False
def calculate_spherical(self, rgd, n_sg, v_sg):
raise NotImplementedError
def add_forces(self, F_av):
dF_av = self.tauct.dict(derivative=True)
self.tauct.derivative(self.dedtaut_sG.sum(0), dF_av)
for a, dF_v in dF_av.items():
F_av[a] += dF_v[0]
def estimate_memory(self, mem):
bytecount = self.wfs.gd.bytecount()
mem.subnode('MGGA arrays', (1 + self.wfs.nspins) * bytecount)
def initialize_kinetic(self, xccorr):
nii = xccorr.nii
nn = len(xccorr.rnablaY_nLv)
ng = len(xccorr.phi_jg[0])
tau_npg = np.zeros((nn, nii, ng))
taut_npg = np.zeros((nn, nii, ng))
self.create_kinetic(xccorr, nn, xccorr.phi_jg, tau_npg)
self.create_kinetic(xccorr, nn, xccorr.phit_jg, taut_npg)
return tau_npg, taut_npg
def create_kinetic(self, x, ny, phi_jg, tau_ypg):
"""Short title here.
kinetic expression is::
__ __
tau_s = 1/2 Sum_{i1,i2} D(s,i1,i2) \/phi_i1 . \/phi_i2 +tauc_s
here the orbital dependent part is calculated::
__ __
\/phi_i1 . \/phi_i2 =
__ __
\/YL1.\/YL2 phi_j1 phi_j2 +YL1 YL2 dphi_j1 dphi_j2
------ ------
dr dr
__ __
\/YL1.\/YL2 [y] = Sum_c A[L1,c,y] A[L2,c,y] / r**2
"""
nj = len(phi_jg)
ni = len(x.jlL)
nii = ni * (ni + 1) // 2
dphidr_jg = np.zeros(np.shape(phi_jg))
for j in range(nj):
phi_g = phi_jg[j]
x.rgd.derivative(phi_g, dphidr_jg[j])
# Second term:
for y in range(ny):
i1 = 0
p = 0
Y_L = x.Y_nL[y]
for j1, l1, L1 in x.jlL:
for j2, l2, L2 in x.jlL[i1:]:
c = Y_L[L1]*Y_L[L2]
temp = c * dphidr_jg[j1] * dphidr_jg[j2]
tau_ypg[y,p,:] += temp
p += 1
i1 +=1
##first term
for y in range(ny):
i1 = 0
p = 0
rnablaY_Lv = x.rnablaY_nLv[y, :x.Lmax]
Ax_L = rnablaY_Lv[:, 0]
Ay_L = rnablaY_Lv[:, 1]
Az_L = rnablaY_Lv[:, 2]
for j1, l1, L1 in x.jlL:
for j2, l2, L2 in x.jlL[i1:]:
temp = (Ax_L[L1] * Ax_L[L2] + Ay_L[L1] * Ay_L[L2]
+ Az_L[L1] * Az_L[L2])
temp *= phi_jg[j1] * phi_jg[j2]
temp[1:] /= x.rgd.r_g[1:]**2
temp[0] = temp[1]
tau_ypg[y, p, :] += temp
p += 1
i1 +=1
tau_ypg *= 0.5
return
|
ajylee/gpaw-rtxs
|
gpaw/xc/mgga.py
|
Python
|
gpl-3.0
| 7,381
|
[
"GPAW"
] |
d0f7b99524cac289d1cd8451817afc27c515974f5d41d49cae3d4818e3b09025
|
import os
import sys
import subprocess
import shutil
import contextlib
import json
current_dir = os.path.dirname(__file__)
_base_dir = None
def which(cmd):
try:
return shutil.which(cmd)
except AttributeError:
import distutils.spawn
return distutils.spawn.find_executable(cmd)
def base_dir():
global _base_dir
if _base_dir is None:
try:
_base_dir = os.path.dirname(os.environ["VIRTUAL_ENV"])
except KeyError:
print("This command should only be run from inside a virtual environment")
raise
return _base_dir
def script_path(*args):
_dir = os.path.dirname(__file__)
return os.path.join(_dir, *args)
def make_path(*args):
return os.path.join(base_dir(), *args)
def make_dir(*args):
path = make_path(*args)
try:
os.makedirs(path)
except OSError:
pass
return path
def make_dirs(*args):
for d in args:
make_dir(d)
def make_file(path, content=None):
with open(path, "w") as fh:
if content:
fh.write(content)
def delete_files(*files):
for f in files:
path = make_path(f)
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
@contextlib.contextmanager
def cd(*args):
path = make_path(*args)
orig = os.getcwd()
os.chdir(path)
yield
os.chdir(orig)
def can_run(cmd):
try:
run(cmd)
return True
except subprocess.CalledProcessError:
return False
def run(cmd, **kwargs):
try:
return subprocess.check_output(cmd, universal_newlines=True, shell=True, **kwargs)
except subprocess.CalledProcessError as ex:
print(ex.output)
print("Return code: %s" % ex.cmd, ex.returncode)
raise
class Project(object):
def __init__(self, path, name):
self.base_dir = path
self.name = name
env = os.environ.copy()
parts = env.get("PATH", "").split(os.pathsep)
parts.insert(0, self.path("node_modules/.bin/"))
env["PATH"] = os.pathsep.join(parts)
self.env = env
def echo(self, template, *args, **kwargs):
stmt = template.format(*args, **kwargs)
print(stmt)
def check_installed(self, cmd, error=None, action=None, test=None):
if (not which(cmd)) if not test else can_run(test):
self.echo("{0} is not found.", cmd)
if action:
if callable(action):
action()
else:
self.run(action)
self.echo("Successfully installed {0}.", cmd)
else:
raise Exception(error or "Failed to run: %r" % cmd)
def npm_install(self, packages, flags="--save-dev"):
if not isinstance(packages, (list, tuple)):
packages = [packages]
for pkg in packages:
self.echo("Installing %s" % pkg)
self.run("npm install %s %s" % (pkg, flags))
def prepare(self):
self.check_installed("node")
self.check_installed("npm")
self.prepare_npm()
self.check_installed("bower", action=lambda: self.npm_install("bower", "--save"))
# self.check_installed("gulp", action=lambda: self.npm_install("gulp"))
# packages = "gulp-ruby-sass,gulp-autoprefixer,gulp-minify-css,gulp-rename".split(",")
# for pkg in packages:
# self.npm_install(pkg)
def run(self, cmd):
run(cmd, cwd=self.path(), env=self.env)
def create(self):
run("django-admin.py startproject %s src --template %s" % (
self.name, script_path("templates", "project_templates"))
)
def create_app(self, name):
with cd(self.base_dir):
run("python manage.py startapp %s %s --template %s -e=bowerrc,py" % (
name,
self.path(self.name),
script_path("templates", "app_templates")
)
)
def path(self, *args):
return os.path.join(self.base_dir, *args)
def write_file(self, name, content, kind=None):
with open(name, "w") as fh:
if kind == "json":
content = json.dumps(content, indent=4)
fh.write(content)
def prepare_npm(self):
values = {
"name": self.name,
"version": "1.0.0",
"description": "",
"main": "index.js",
"dependencies": {},
"devDependencies": {},
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "",
"license": "ISC"
}
self.write_file(self.path("package.json"), values, "json")
def prepare_bower(self):
assets = self.path(self.name, "assets")
config_path = self.path(self.name, "assets", "js/config.js")
self.write_file(config_path, """
requirejs.config({
map: {
'*': {
'underscore': 'lodash'
}
},
path:{
"jquery": "../bower_components/jquery/dist/jquery",
"backbone": "../bower_components/backbone/backbone",
"lodash": "../bower_components/lodash/lodash",
"requireLib": "../bower_components/requirejs/require",
}
})
""")
values = {
"cwd": assets,
}
self.write_file(".bowerrc", values, "json")
self.write_file(self.path(assets, "bower.json"),{
"name": self.name,
"version": '0.0.0',
"moduleType": [
'amd'
],
"private": True,
"ignore": [
'**/.*',
'node_modules',
'bower_components',
'test',
'tests'
]
}, "json")
with open(self.path(".bowerrc"), "w") as fh:
fh.write(json.dumps(values))
self.run("bower install jquery backbone font-awesome lodash requirejs --save")
def manage(self, cmd):
with cd(self.path()):
run("python manage.py %s" % cmd)
def main():
# delete_files("src", "static", "media", "var", "log", ".env")
make_dirs("src", "static", "media", "var", "log")
make_file(".env")
name = sys.argv[1]
prj = Project(make_path("src"), name)
prj.prepare()
prj.create()
prj.prepare_bower()
prj.manage("makemigrations auth")
prj.manage("migrate")
prj.run("git init .")
prj.run("npm install gulp gulp-sass")
|
vivsh/django-ginger
|
ginger/scripts/bootstrap.py
|
Python
|
mit
| 6,698
|
[
"GULP"
] |
79ea0f5b20ff96edc5db179d41916d787ab34d10a243415fccce57177b9fb7eb
|
import os
import shutil
import argparse
import subprocess
def pg_ctl(database_path, database_version, mod='start'):
"""
Start/Stop PostgreSQL with variable data_directory.
mod = [start, end, restart, reload]
"""
pg_conf = '/etc/postgresql/%s/main/postgresql.conf' % database_version
new_data_directory = "'%s'" % database_path
cmd = 'sed -i "s|data_directory = .*|data_directory = %s|g" %s' % (new_data_directory, pg_conf)
subprocess.call(cmd, shell=True)
subprocess.call('service postgresql %s' % mod, shell=True)
def set_pg_permission(database_path):
"""
Set the correct permissions for a newly created PostgreSQL data_directory.
"""
subprocess.call('chown -R postgres:postgres %s' % database_path, shell=True)
subprocess.call('chmod -R 0700 %s' % database_path, shell=True)
def create_pg_db(user, password, database, database_path, database_version):
"""
Initialize PostgreSQL Database, add database user und create the Galaxy Database.
"""
pg_bin = "/usr/lib/postgresql/%s/bin/" % database_version
os.makedirs(database_path)
set_pg_permission(database_path)
# initialize a new postgres database
subprocess.call("su - postgres -c '%s --auth=trust --encoding UTF8 --pgdata=%s'" % (os.path.join(pg_bin, 'initdb'),
database_path), shell=True)
shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt'))
shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key'))
set_pg_permission(os.path.join(database_path, 'server.crt'))
set_pg_permission(os.path.join(database_path, 'server.key'))
# change data_directory in postgresql.conf and start the service with the new location
pg_ctl(database_path, database_version, 'start')
subprocess.call("""su - postgres -c "psql --command \\"CREATE USER %s WITH SUPERUSER PASSWORD '%s'\\";"
""" % (user, password), shell=True)
subprocess.call("su - postgres -c 'createdb -O %s %s'" % (user, database), shell=True)
subprocess.call('service postgresql stop', shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Initializing a complete Galaxy Database with Tool Shed Tools.')
parser.add_argument("--dbuser", required=True,
help="Username of the Galaxy Database Administrator. That name will be specified in the "
"universe_wsgi.xml file.")
parser.add_argument("--dbpassword", required=True,
help="Password of the Galaxy Database Administrator. That name will be specified in the "
"universe_wsgi.xml file.")
parser.add_argument("--db-name", dest='db_name', required=True,
help="Galaxy Database name. That name will be specified in the universe_wsgi.xml file.")
parser.add_argument("--dbpath",
help="Galaxy Database path.")
parser.add_argument("--dbversion", default='11',
help="Postgresql server major version.")
options = parser.parse_args()
"""
Initialize the Galaxy Database + adding an Admin user.
This database is the default one, created by the Dockerfile.
The user can set a volume (-v /path/:/export/) to get a persistent database.
"""
create_pg_db(options.dbuser, options.dbpassword, options.db_name, options.dbpath, options.dbversion)
|
chambm/docker-galaxy-stable
|
galaxy/setup_postgresql.py
|
Python
|
mit
| 3,592
|
[
"Galaxy"
] |
250a538391f388d2bbb9f50f65ba9ff2fc54b8bc15b5e64ccacc599a38475fb6
|
# Copyright 2003-2009 by Bartek Wilczynski. All rights reserved.
# Copyright 2012-2013 by Michiel JL de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tools for sequence motif analysis.
Bio.motifs contains the core Motif class containing various I/O methods
as well as methods for motif comparisons and motif searching in sequences.
It also includes functionality for parsing output from the AlignACE, MEME,
and MAST programs, as well as files in the TRANSFAC format.
Bio.motifs is replacing the older and now obsolete Bio.Motif module.
"""
from __future__ import print_function
from Bio._py3k import range
import math
def create(instances, alphabet=None):
instances = Instances(instances, alphabet)
return Motif(instances=instances, alphabet=alphabet)
def parse(handle, format):
"""Parses an output file of motif finding programs.
Currently supported formats (case is ignored):
- AlignAce: AlignAce output file format
- MEME: MEME output file motif
- MAST: MAST output file motif
- TRANSFAC: TRANSFAC database file format
- pfm: JASPAR-style position-frequency matrix
- jaspar: JASPAR-style multiple PFM format
- sites: JASPAR-style sites file
As files in the pfm and sites formats contain only a single motif,
it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()
for those.
For example:
>>> from Bio import motifs
>>> with open("Motif/alignace.out") as handle:
... for m in motifs.parse(handle, "AlignAce"):
... print(m.consensus)
...
TCTACGATTGAG
CTGCAGCTAGCTACGAGTGAG
GTGCTCTAAGCATAGTAGGCG
GCCACTAGCAGAGCAGGGGGC
CGACTCAGAGGTT
CCACGCTAAGAGAGGTGCCGGAG
GCGCGTCGCTGAGCA
GTCCATCGCAAAGCGTGGGGC
GGGATCAGAGGGCCG
TGGAGGCGGGG
GACCAGAGCTTCGCATGGGGG
GGCGTGCGTG
GCTGGTTGCTGTTCATTAGG
GCCGGCGGCAGCTAAAAGGG
GAGGCCGGGGAT
CGACTCGTGCTTAGAAGG
"""
format = format.lower()
if format == "alignace":
from Bio.motifs import alignace
record = alignace.read(handle)
return record
elif format == "meme":
from Bio.motifs import meme
record = meme.read(handle)
return record
elif format == "mast":
from Bio.motifs import mast
record = mast.read(handle)
return record
elif format == "transfac":
from Bio.motifs import transfac
record = transfac.read(handle)
return record
elif format in ('pfm', 'sites', 'jaspar'):
from Bio.motifs import jaspar
record = jaspar.read(handle, format)
return record
else:
raise ValueError("Unknown format %s" % format)
def read(handle, format):
"""Reads a motif from a handle using a specified file-format.
This supports the same formats as Bio.motifs.parse(), but
only for files containing exactly one motif. For example,
reading a JASPAR-style pfm file:
>>> from Bio import motifs
>>> with open("motifs/SRF.pfm") as handle:
... m = motifs.read(handle, "pfm")
>>> m.consensus
Seq('GCCCATATATGG', IUPACUnambiguousDNA())
Or a single-motif MEME file,
>>> from Bio import motifs
>>> with open("motifs/meme.out") as handle:
... m = motifs.read(handle, "meme")
>>> m.consensus
Seq('CTCAATCGTA', IUPACUnambiguousDNA())
If the handle contains no records, or more than one record,
an exception is raised:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... motif = motifs.read(handle, "AlignAce")
Traceback (most recent call last):
...
ValueError: More than one motif found in handle
If however you want the first motif from a file containing
multiple motifs this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... record = motifs.parse(handle, "alignace")
>>> motif = record[0]
>>> motif.consensus
Seq('TCTACGATTGAG', IUPACUnambiguousDNA())
Use the Bio.motifs.parse(handle, format) function if you want
to read multiple records from the handle.
"""
format = format.lower()
motifs = parse(handle, format)
if len(motifs) == 0:
raise ValueError("No motifs found in handle")
if len(motifs) > 1:
raise ValueError("More than one motif found in handle")
motif = motifs[0]
return motif
class Instances(list):
"""
A class representing instances of sequence motifs.
"""
def __init__(self, instances=None, alphabet=None):
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
if instances is None:
instances = []
self.length = None
for instance in instances:
if self.length is None:
self.length = len(instance)
elif self.length != len(instance):
message = "All instances should have the same length (%d found, %d expected)" % (len(instance), self.length)
raise ValueError(message)
try:
a = instance.alphabet
except AttributeError:
# The instance is a plain string
continue
if alphabet is None:
alphabet = a
elif alphabet != a:
raise ValueError("Alphabets are inconsistent")
if alphabet is None or alphabet.letters is None:
# If we didn't get a meaningful alphabet from the instances,
# assume it is DNA.
alphabet = IUPAC.unambiguous_dna
for instance in instances:
if not isinstance(instance, Seq):
sequence = str(instance)
instance = Seq(sequence, alphabet=alphabet)
self.append(instance)
self.alphabet = alphabet
def __str__(self):
text = ""
for instance in self:
text += str(instance) + "\n"
return text
def count(self):
counts = {}
for letter in self.alphabet.letters:
counts[letter] = [0] * self.length
for instance in self:
for position, letter in enumerate(instance):
counts[letter][position] += 1
return counts
def search(self, sequence):
"""
a generator function, returning found positions of motif instances in a given sequence
"""
for pos in range(0, len(sequence) - self.length + 1):
for instance in self:
if str(instance) == str(sequence[pos:pos + self.length]):
yield (pos, instance)
break # no other instance will fit (we don't want to return multiple hits)
def reverse_complement(self):
instances = Instances(alphabet=self.alphabet)
instances.length = self.length
for instance in self:
instance = instance.reverse_complement()
instances.append(instance)
return instances
class Motif(object):
"""
A class representing sequence motifs.
"""
def __init__(self, alphabet=None, instances=None, counts=None):
from . import matrix
from Bio.Alphabet import IUPAC
self.name = ""
if counts is not None and instances is not None:
raise Exception(ValueError,
"Specify either instances or counts, don't specify both")
elif counts is not None:
if alphabet is None:
alphabet = IUPAC.unambiguous_dna
self.instances = None
self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)
self.length = self.counts.length
elif instances is not None:
self.instances = instances
alphabet = self.instances.alphabet
counts = self.instances.count()
self.counts = matrix.FrequencyPositionMatrix(alphabet, counts)
self.length = self.counts.length
else:
self.counts = None
self.instances = None
self.length = None
if alphabet is None:
alphabet = IUPAC.unambiguous_dna
self.alphabet = alphabet
self.pseudocounts = None
self.background = None
self.mask = None
def __get_mask(self):
return self.__mask
def __set_mask(self, mask):
if self.length is None:
self.__mask = ()
elif mask is None:
self.__mask = (1,) * self.length
elif len(mask) != self.length:
raise ValueError("The length (%d) of the mask is inconsistent with the length (%d) of the motif", (len(mask), self.length))
elif isinstance(mask, str):
self.__mask = []
for char in mask:
if char == "*":
self.__mask.append(1)
elif char == " ":
self.__mask.append(0)
else:
raise ValueError("Mask should contain only '*' or ' ' and not a '%s'" % char)
self.__mask = tuple(self.__mask)
else:
self.__mask = tuple(int(bool(c)) for c in mask)
mask = property(__get_mask, __set_mask)
del __get_mask
del __set_mask
def __get_pseudocounts(self):
return self._pseudocounts
def __set_pseudocounts(self, value):
self._pseudocounts = {}
if isinstance(value, dict):
self._pseudocounts = dict((letter, value[letter]) for letter in self.alphabet.letters)
else:
if value is None:
value = 0.0
self._pseudocounts = dict.fromkeys(self.alphabet.letters, value)
pseudocounts = property(__get_pseudocounts, __set_pseudocounts)
del __get_pseudocounts
del __set_pseudocounts
def __get_background(self):
return self._background
def __set_background(self, value):
if isinstance(value, dict):
self._background = dict((letter, value[letter]) for letter in self.alphabet.letters)
elif value is None:
self._background = dict.fromkeys(self.alphabet.letters, 1.0)
else:
if sorted(self.alphabet.letters) != ["A", "C", "G", "T"]:
# TODO - Should this be a ValueError?
raise Exception("Setting the background to a single value only "
"works for DNA motifs (in which case the value "
"is interpreted as the GC content")
self._background['A'] = (1.0 - value) / 2.0
self._background['C'] = value / 2.0
self._background['G'] = value / 2.0
self._background['T'] = (1.0 - value) / 2.0
total = sum(self._background.values())
for letter in self.alphabet.letters:
self._background[letter] /= total
background = property(__get_background, __set_background)
del __get_background
del __set_background
@property
def pwm(self):
return self.counts.normalize(self._pseudocounts)
@property
def pssm(self):
return self.pwm.log_odds(self._background)
def __str__(self, masked=False):
""" string representation of a motif.
"""
text = ""
if self.instances is not None:
text += str(self.instances)
if masked:
for i in range(self.length):
if self.__mask[i]:
text += "*"
else:
text += " "
text += "\n"
return text
def __len__(self):
"""return the length of a motif
Please use this method (i.e. invoke len(m)) instead of referring to m.length directly.
"""
if self.length is None:
return 0
else:
return self.length
def reverse_complement(self):
"""Gives the reverse complement of the motif."""
alphabet = self.alphabet
if self.instances is not None:
instances = self.instances.reverse_complement()
res = Motif(instances=instances, alphabet=alphabet)
else: # has counts
res = Motif(alphabet)
res.counts = {}
res.counts["A"] = self.counts["T"][::-1]
res.counts["T"] = self.counts["A"][::-1]
res.counts["G"] = self.counts["C"][::-1]
res.counts["C"] = self.counts["G"][::-1]
res.length = self.length
res.__mask = self.__mask[::-1]
return res
@property
def consensus(self):
"""Returns the consensus sequence."""
return self.counts.consensus
@property
def anticonsensus(self):
"""Returns the least probable pattern to be generated from this motif."""
return self.counts.anticonsensus
@property
def degenerate_consensus(self):
"""Generate degenerate consesnsus sequence.
Following the rules adapted from
D. R. Cavener: "Comparison of the consensus sequence flanking
translational start sites in Drosophila and vertebrates."
Nucleic Acids Research 15(4): 1353-1361. (1987).
The same rules are used by TRANSFAC.
"""
return self.counts.degenerate_consensus
def weblogo(self, fname, format="PNG", version="2.8.2", **kwds):
"""Uses the Berkeley weblogo service to download and save a weblogo of itself.
Requires an internet connection.
The parameters from ``**kwds`` are passed directly to the weblogo server.
Currently, this method uses WebLogo version 3.3.
These are the arguments and their default values passed to
WebLogo 3.3; see their website at http://weblogo.threeplusone.com
for more information::
'stack_width' : 'medium',
'stack_per_line' : '40',
'alphabet' : 'alphabet_dna',
'ignore_lower_case' : True,
'unit_name' : "bits",
'first_index' : '1',
'logo_start' : '1',
'logo_end': str(self.length),
'composition' : "comp_auto",
'percentCG' : '',
'scale_width' : True,
'show_errorbars' : True,
'logo_title' : '',
'logo_label' : '',
'show_xaxis': True,
'xaxis_label': '',
'show_yaxis': True,
'yaxis_label': '',
'yaxis_scale': 'auto',
'yaxis_tic_interval' : '1.0',
'show_ends' : True,
'show_fineprint' : True,
'color_scheme': 'color_auto',
'symbols0': '',
'symbols1': '',
'symbols2': '',
'symbols3': '',
'symbols4': '',
'color0': '',
'color1': '',
'color2': '',
'color3': '',
'color4': '',
"""
from Bio._py3k import urlopen, urlencode, Request
from Bio import Alphabet
if isinstance(self.alphabet, Alphabet.ProteinAlphabet):
alpha = "alphabet_protein"
elif isinstance(self.alphabet, Alphabet.RNAAlphabet):
alpha = "alphabet_rna"
elif isinstance(self.alphabet, Alphabet.DNAAlphabet):
alpha = "alphabet_dna"
else:
alpha = "auto"
frequencies = self.format('transfac')
url = 'http://weblogo.threeplusone.com/create.cgi'
values = {'sequences': frequencies,
'format': format.lower(),
'stack_width': 'medium',
'stack_per_line': '40',
'alphabet': alpha,
'ignore_lower_case': True,
'unit_name': "bits",
'first_index': '1',
'logo_start': '1',
'logo_end': str(self.length),
'composition': "comp_auto",
'percentCG': '',
'scale_width': True,
'show_errorbars': True,
'logo_title': '',
'logo_label': '',
'show_xaxis': True,
'xaxis_label': '',
'show_yaxis': True,
'yaxis_label': '',
'yaxis_scale': 'auto',
'yaxis_tic_interval': '1.0',
'show_ends': True,
'show_fineprint': True,
'color_scheme': 'color_auto',
'symbols0': '',
'symbols1': '',
'symbols2': '',
'symbols3': '',
'symbols4': '',
'color0': '',
'color1': '',
'color2': '',
'color3': '',
'color4': '',
}
values.update(
dict((k, "" if v is False else str(v)) for k, v in kwds.items()))
data = urlencode(values).encode("utf-8")
req = Request(url, data)
response = urlopen(req)
with open(fname, "wb") as f:
im = response.read()
f.write(im)
def format(self, format):
"""Returns a string representation of the Motif in a given format
Currently supported fromats:
- pfm : JASPAR single Position Frequency Matrix
- jaspar : JASPAR multiple Position Frequency Matrix
- transfac : TRANSFAC like files
"""
if format in ('pfm', 'jaspar'):
from Bio.motifs import jaspar
motifs = [self]
return jaspar.write(motifs, format)
elif format == "transfac":
from Bio.motifs import transfac
motifs = [self]
return transfac.write(motifs)
else:
raise ValueError("Unknown format type %s" % format)
def write(motifs, format):
"""Returns a string representation of motifs in a given format
Currently supported formats (case is ignored):
- pfm : JASPAR simple single Position Frequency Matrix
- jaspar : JASPAR multiple PFM format
- transfac : TRANSFAC like files
"""
format = format.lower()
if format in ("pfm", "jaspar"):
from Bio.motifs import jaspar
return jaspar.write(motifs, format)
elif format == "transfac":
from Bio.motifs import transfac
return transfac.write(motifs)
else:
raise ValueError("Unknown format type %s" % format)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest(verbose=0)
|
zjuchenyuan/BioWeb
|
Lib/Bio/motifs/__init__.py
|
Python
|
mit
| 18,757
|
[
"Biopython"
] |
720df290cd36eabd90ac97ab99481506812c27bbbeaeffd4644a46b7e4f55e67
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that trains a small multi-layer perceptron with fully connected layers
on MNIST.
This example has some command line arguments that enable different neon features.
Examples:
python mnist_mlp.py -b gpu -e 10
Run the example for 10 epochs of mnist data using the nervana gpu
backend
python mnist_mlp.py --validation_freq 1
After each training epoch the validation/test data set will be
processed through the model and the cost will be displayed.
python mnist_mlp.py --serialize 1 -s checkpoint.pkl
After every iteration of training the model will be dumped to a pickle
file names "checkpoint.pkl". Increase the serialize parameter to
change the frequency at which the model is saved.
python mnist_mlp.py --model_file checkpioint.pkl
Before starting to train the model, the model state is set to the
values stored in the checkpoint file named checkpioint.pkl.
"""
import logging
import os
from neon.backends import gen_backend
from neon.callbacks.callbacks import Callbacks
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, BatchNorm
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, CrossEntropyBinary, Misclassification
from neon.util.argparser import NeonArgparser
logger = logging.getLogger()
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--serialize', nargs='?', type=int,
default=0, const=1, metavar='N',
help='serialize model every N epochs')
parser.add_argument('--model_file', help='load model from pkl file')
args = parser.parse_args()
# hyperparameters
batch_size = 128
num_epochs = args.epochs
# setup backend
be = gen_backend(backend=args.backend,
batch_size=batch_size,
rng_seed=args.rng_seed,
device_id=args.device_id,
default_dtype=args.datatype,
stochastic_round=False)
# load up the mnist data set
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# setup a training set iterator
train_set = DataIterator(X_train, y_train, nclass=nclass)
# setup a validation data set iterator
valid_set = DataIterator(X_test, y_test, nclass=nclass)
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
# setiup model layers
layers = []
layers.append(Affine(nout=100, init=init_norm, activation=Rectlin()))
layers.append(Affine(nout=10, init=init_norm, activation=Logistic(shortcut=True)))
# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
mlp = Model(layers=layers)
if args.model_file:
assert os.path.exists(args.model_file), '%s not found' % args.model_file
logger.info('loading initial model state from %s' % args.model_file)
mlp.load_weights(args.model_file)
# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file,
progress_bar=args.progress_bar)
# add a callback ot calculate
if args.validation_freq:
# setup validation trial callbacks
callbacks.add_validation_callback(valid_set, args.validation_freq)
if args.serialize > 0:
# add callback for saving checkpoint file
# every args.serialize epchs
checkpoint_schedule = args.serialize
checkpoint_model_path = args.save_path
callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
|
chetan51/neon
|
examples/mnist_mlp.py
|
Python
|
apache-2.0
| 4,752
|
[
"Gaussian"
] |
1cb77972b1dfc9ae54376d4be6469300c5660a2c4e5fd14f8f938759800eebb2
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************************
**espressopp.interaction.HarmonicUnique**
***************************************************
.. math::
U = K (d - d_{cur})^2;
.. function:: espressopp.interaction.HarmonicUnique(K)
:param K: (default: 1.0)
:type K: real
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique(system, fpl, potential)
:param system:
:param fpl:
:param potential:
:type system:
:type fpl:
:type potential:
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairDistListHarmonicUnique.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.PotentialUniqueDist import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_HarmonicUnique, \
interaction_FixedPairDistListHarmonicUnique
class HarmonicUniqueLocal(PotentialUniqueDistLocal, interaction_HarmonicUnique):
def __init__(self, K=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_HarmonicUnique, K)
class FixedPairDistListHarmonicUniqueLocal(InteractionLocal, interaction_FixedPairDistListHarmonicUnique):
def __init__(self, system, fpl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairDistListHarmonicUnique, system, fpl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class HarmonicUnique(PotentialUniqueDist):
'The HarmonicUnique potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.HarmonicUniqueLocal',
pmiproperty = ['K']
)
class FixedPairDistListHarmonicUnique(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairDistListHarmonicUniqueLocal',
pmicall = ['setPotential','setFixedPairList','getFixedPairList']
)
|
capoe/espressopp.soap
|
src/interaction/HarmonicUnique.py
|
Python
|
gpl-3.0
| 3,896
|
[
"ESPResSo"
] |
f4258b413d94ff2ee692ea9566fcee7142bd65df2a3fc6fb29af89ea397743ea
|
#!/usr/bin/env python
from traits.api import HasTraits, Instance, Array, Bool, Dict, \
on_trait_change, Delegate, List, Color, Any, Instance, Int, File, \
Button, Enum, Str, DelegatesTo, Property, CFloat,Range
from traitsui.api import View, Item, HGroup, VGroup, \
Group, Handler, HSplit, VSplit, RangeEditor, Include, Action, MenuBar, Menu, \
TableEditor, ObjectColumn, Separator
from traitsui.extras.checkbox_column import CheckboxColumn
from ..volumes.scalar_volume import ScalarVolumes
from tvtk.pyface.scene import Scene
from mayavi.core.ui.api import SceneEditor
from traitsui.color_column import ColorColumn
from mayavi.core.api import PipelineBase, Source
from mayavi import mlab
from traitsui.editors.tabular_editor import TabularEditor
from traitsui.tabular_adapter import TabularAdapter
from traitsui.file_dialog import save_file, open_file
from tvtk.pyface.scene import Scene
from tvtk.api import tvtk
from mayavi.core.ui.api import SceneEditor
from mayavi.tools.mlab_scene_model import MlabSceneModel
import os
import numpy as np
from dsi2.volumes.scalar_volume import ScalarVolume
from dsi2.streamlines.track_dataset import TrackDataset
import cPickle as pickle
ltpa_result_table = TableEditor(
columns =
[
ObjectColumn(name="name"),
CheckboxColumn(name="visible"),
ObjectColumn(name="coord_opacity"),
ObjectColumn(name="tracksA_opacity"),
CheckboxColumn(name="tracksA_visible"),
ObjectColumn(name="tracksB_opacity"),
CheckboxColumn(name="tracksB_visible"),
ColorColumn(name="colorA", width=5),
ColorColumn(name="colorB", width=5),
ObjectColumn(name="coord_shape"),
ObjectColumn(name="coord_radius"),
],
auto_size=False,
)
class CoordinatesGraphic(HasTraits):
# Data
scalars = Array
indices = Array
radius = CFloat(0.5)
# Holds the mayavi objects
source = Instance(Source,transient=True)
glyph = Instance(PipelineBase, transient=True)
glyph_drawn = Bool(False, transient=True)
splatter = Instance(PipelineBase,transient=True)
glyph_opacity = Range(high=1.0,low=0.0,value=0.3)
# MayaVi data options
color_map = Enum(
[ "Blues", "Oranges", "pink", "Greens"] )
render_type = Enum(["static_spheres","sized_cubes",
"static_cubes","splatter"])
static_color = Color
visible = Bool(True)
def set_visibility(self, visibility):
if not visibility:
if not self.glyph_drawn: return
else:
if not self.glyph_drawn:
self.render()
# Set visibility of all items
for viz in [self.glyph, self.splatter]:
if viz:
viz.visible = visibility
def render(self):
if not self.visible: return
try:
color = self.static_color.toTuple()
except:
color = (self.static_color.red(),self.static_color.green(),self.static_color.blue())
static_color = color[0]/255., color[1]/255., color[2]/255.
if self.render_type == "sized_cubes":
self.glyph = mlab.pipeline.glyph(
self.source, colormap=self.color_map, mode="cube" )
elif self.render_type == "splatter":
self.splatter = mlab.pipeline.gaussian_splatter(self.source)
self.glyph = mlab.pipeline.volume(
self.splatter,
color=static_color)
elif self.render_type == "static_cubes":
self.source = mlab.pipeline.scalar_scatter(
self.indices[:,0],self.indices[:,1],self.indices[:,2])
self.glyph = mlab.pipeline.glyph(
self.source, color=static_color, mode="cube" )
elif self.render_type == "static_spheres":
self.source = mlab.pipeline.scalar_scatter(
self.indices[:,0],self.indices[:,1],self.indices[:,2])
self.glyph = mlab.pipeline.glyph(
self.source, color=static_color,
mode="sphere" )
self.glyph.glyph.glyph_source.glyph_source.radius = self.radius
self.glyph.actor.property.opacity = self.glyph_opacity
self.glyph_drawn = True
def _color_map_changed(self):
self.clear()
self.render()
instance_view = View(
Group(
Item("filepath"),
Group(Item("visible"),Item("glyph"),Item("splatter"),Item("source"),orientation="horizontal"),
Item("static_color"),
Item("b_render"),
orientation="vertical")
)
class LTPAResult(HasTraits):
name=Str("LTPA Result")
# 3d MayaVi scene that will display slices and streamlines
scene3d = Instance(MlabSceneModel,transient=True)
# Data objects
result_coords = Array
result_coord_scalars = Array
coords_apply_to = Enum("A","B")
tracksA = Instance(TrackDataset)
tracksB = Instance(TrackDataset)
#graphics options
coord_shape = Enum("sphere", "cube")
coord_radius = CFloat(1.0)
colorA = Color("red")
colorB = Color("blue")
showA_as = Enum("splatter","tracks")
showB_as = Enum("splatter","tracks")
coord_group = Enum("A","B")
coord_opacity = Range(0.0,1.0,0.5)
visible = Bool(False)
tracksA_opacity = Range(0.0,1.0,0.5)
tracksA_visible = Bool(True)
tracksB_opacity = Range(0.0,1.0,0.5)
tracksB_visible = Bool(True)
# graphics objects
coord_graphic = Instance(CoordinatesGraphic,transient=True)
coord_opacity = Range(0.0,1.0,0.5)
def __init__(self,**traits):
super(LTPAResult,self).__init__(**traits)
# prepare track datasets for plotting
for tds in [self.tracksA, self.tracksB]:
tds.render_tracks = True
tds.tracks_drawn = False
tds.dynamic_color_clusters = False
self.tracksA.static_color = self.colorA
self.tracksB.static_color = self.colorB
def _coord_graphic_default(self):
"""
Looks at the contents of this result object
"""
if self.coords_apply_to == "A":
c = self.colorA
else:
c = self.colorB
return CoordinatesGraphic(
indices = self.result_coords,
static_color=c,
scalars = self.result_coord_scalars,
radius=self.coord_radius
)
def _coord_opacity_changed(self):
self.coord_graphic.glyph.actor.property.opacity = self.coord_opacity
def _visible_changed(self):
"""
"""
for tds in [self.tracksA, self.tracksB]:
tds.set_track_visibility(self.visible)
self._tracksA_opacity_changed()
self._tracksB_opacity_changed()
self.coord_graphic.set_visibility(self.visible)
def _tracksA_opacity_changed(self):
if self.tracksA.tracks_drawn:
self.tracksA.src.actor.property.opacity = self.tracksA_opacity
def _tracksA_visible_changed(self):
if self.tracksA.tracks_drawn:
self.tracksA.set_track_visibility(self.tracksA_visible)
def _tracksB_opacity_changed(self):
if self.tracksB.tracks_drawn:
self.tracksB.src.actor.property.opacity = self.tracksB_opacity
def _tracksB_visible_changed(self):
if self.tracksB.tracks_drawn:
self.tracksB.set_track_visibility(self.tracksB_visible)
class LTPAResults(HasTraits):
scene3d_inited = Bool(False)
results = List(Instance(LTPAResult))
scene3d = Instance(MlabSceneModel, (),transient=True)
def __init__(self,**traits):
super(LTPAResults,self).__init__(**traits)
for res in self.results:
res.scene3d = self.scene3d
traits_view = View(
Group(
Item("results", editor=ltpa_result_table),
show_labels=False
)
)
test_view = View(
Group(
Item("scene3d",
editor=SceneEditor(scene_class=Scene),
height=500, width=500),
Item("results", editor=ltpa_result_table),
show_labels=False
),
resizable=True
)
@on_trait_change('scene3d.activated')
def display_scene3d(self):
if self.scene3d_inited: return
for res in self.results:
res.visible = True
def load_ltpa_results(results_pth):
if not os.path.exists(results_pth):
raise ValueError("No such file " + results_pth)
fop = open(results_pth,"rb")
try:
res = pickle.load(fop)
except Exception, e:
print "Unable to load", results_pth, "because of\n", e
return LTPAResults()
# When loading from a pickle, the __init__ isn't properly run.
# so explicitly run the __init__ code here before returning the result
#for result in res.results:
# for tds in [result.tracksA, result.tracksB]:
# tds.render_tracks = True
# tds.tracks_drawn = False
# tds.dynamic_color_clusters = False
# result.tracksA.static_color = result.colorA
# result.tracksB.static_color = result.colorB
return res
|
mattcieslak/DSI2
|
dsi2/ui/ltpa_result.py
|
Python
|
gpl-3.0
| 9,331
|
[
"Mayavi"
] |
bccae8742838fc56d2cd570ef65dca4048cee2f9a433304c4c1c966ed4f81f4f
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.crissmoldovan.tisip.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','com.crissmoldovan.tisip.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComCrissmoldovanTisipModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def error(msg):
print "[ERROR] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def verify_build_arch(manifest, config):
binaryname = 'lib%s.a' % manifest['moduleid']
binarypath = os.path.join('build', binaryname)
manifestarch = set(manifest['architectures'].split(' '))
output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True)
builtarch = set(output.split(':')[-1].strip().split(' '))
print 'Check build architectures\n'
if ('arm64' not in builtarch):
warn('built module is missing 64-bit support.')
if (manifestarch != builtarch):
warn('architectures in manifest: %s' % ', '.join(manifestarch))
warn('compiled binary architectures: %s' % ', '.join(builtarch))
print '\nMODULE BUILD FAILED'
error('there is discrepancy between the architectures specified in module manifest and compiled binary.')
error('Please update manifest to match module binary architectures.')
die('')
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
verify_build_arch(manifest, config)
package_module(manifest,mf,config)
sys.exit(0)
|
crissmoldovan/tisip
|
iphone/build.py
|
Python
|
mit
| 8,643
|
[
"VisIt"
] |
5cb5f150c9c6b45f36f2d8e9a53f5fffaed9205bee62384b8fb57b717ab30c54
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class bigquery_datatransferCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'check_valid_creds': ('name', ),
'create_transfer_config': ('parent', 'transfer_config', 'authorization_code', 'version_info', 'service_account_name', ),
'delete_transfer_config': ('name', ),
'delete_transfer_run': ('name', ),
'enroll_data_sources': ('name', 'data_source_ids', ),
'get_data_source': ('name', ),
'get_transfer_config': ('name', ),
'get_transfer_run': ('name', ),
'list_data_sources': ('parent', 'page_token', 'page_size', ),
'list_transfer_configs': ('parent', 'data_source_ids', 'page_token', 'page_size', ),
'list_transfer_logs': ('parent', 'page_token', 'page_size', 'message_types', ),
'list_transfer_runs': ('parent', 'states', 'page_token', 'page_size', 'run_attempt', ),
'schedule_transfer_runs': ('parent', 'start_time', 'end_time', ),
'start_manual_transfer_runs': ('parent', 'requested_time_range', 'requested_run_time', ),
'update_transfer_config': ('transfer_config', 'update_mask', 'authorization_code', 'version_info', 'service_account_name', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=bigquery_datatransferCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the bigquery_datatransfer client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-bigquery-datatransfer
|
scripts/fixup_bigquery_datatransfer_v1_keywords.py
|
Python
|
apache-2.0
| 7,039
|
[
"VisIt"
] |
a37695b36847a28d959252322a12359b9d79620b7614b19fce5fad68e43bebca
|
#!/usr/bin/env python
import sys
extras = {}
try:
from setuptools import setup
extras['zip_safe'] = False
if sys.version_info < (2, 6):
extras['install_requires'] = ['multiprocessing']
except ImportError:
from distutils.core import setup
setup(name='futures',
version='2.1.2',
description='Backport of the concurrent.futures package from Python 3.2',
author='Brian Quinlan',
author_email='brian@sweetapp.com',
maintainer='Alex Gronholm',
maintainer_email='alex.gronholm+pypi@nextday.fi',
url='http://code.google.com/p/pythonfutures',
download_url='http://pypi.python.org/pypi/futures/',
packages=['futures', 'concurrent', 'concurrent.futures'],
license='BSD',
classifiers=['License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1'],
**extras
)
|
santegoeds/pythonfutures
|
setup.py
|
Python
|
bsd-2-clause
| 1,249
|
[
"Brian"
] |
9dea5a6189ee511751c86af387d95cca7be69eb09dead531ed1cc79fe21e0596
|
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="cdk",
version="0.0.1",
description="BGTools CDK App",
long_description=long_description,
long_description_content_type="text/markdown",
author="Peter Gorniak",
package_dir={"": "cdk"},
packages=setuptools.find_packages(where="cdk"),
install_requires=[
"aws-cdk.core",
"aws-cdk.aws_certificatemanager",
"aws-cdk.aws_cloudfront",
"aws-cdk.aws_cloudfront_origins",
"aws-cdk.aws_lambda",
"aws-cdk.aws_lambda_python",
"aws-cdk.aws_apigateway",
"aws-cdk.aws_s3",
"aws-cdk.aws_s3_deployment",
"requests",
"jinja2",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
sumpfork/bgtools_web
|
setup.py
|
Python
|
mit
| 1,141
|
[
"CDK"
] |
1da50106753ae8a7421e0e2f358362dbedafce55f75ee3f944814d02d7e329d5
|
from assembler import Assembler
from assembler import Form
from assembler import Kernel
from diagnostics import Verbose
from fem import QuadFE
from fem import DofHandler
from fem import Basis
from function import Constant
from function import Explicit
from function import Map
from function import Nodal
from gmrf import Covariance
from gmrf import GaussianField
from mesh import QuadMesh
from mesh import Mesh1D
from plot import Plot
from solver import LS
import numpy as np
import matplotlib.pyplot as plt
import gc
import scipy.sparse as sp
from tqdm import tqdm
from scipy.optimize import minimize
"""
System
-div(exp(K)*grad(y)) = b + Fu, x in D
y = g , x in D_Dir
exp(K)*grad(y)*n = 0 , x in D_Neu
Random field:
K ~ GaussianField
Cost Functional
J(u) = E(|y(u)-y_d|**2) + alpha/2*|u|**2-
"""
def cost_gradient(x,n,z_data,state,adjoint,A,M,gamma,dofs_inj,dofs_prod):
"""
Return the cost function and Jacobian
"""
# -------------------------------------------------------------------------
# State equation
# -------------------------------------------------------------------------
u_data = np.zeros((n,1))
u_data[dofs_inj,:] = x[:,None]
b = M.dot(u_data)
state.set_matrix(sp.csr_matrix(A,copy=True))
state.set_rhs(b)
state.solve_system()
y_data = state.get_solution(as_function=False)
# -------------------------------------------------------------------------
# Compute cost functional
# -------------------------------------------------------------------------
residual = np.zeros((ny,1))
y_data = y_data[dofs_prod,0][:,None]
residual[dofs_prod,:] = y_data - z_data
f = 0.5*residual.T.dot(residual) + \
0.5*gamma*u_data.T.dot(M.dot(u_data))
f = f[0,0]
# -------------------------------------------------------------------------
# Adjoint Equation
# -------------------------------------------------------------------------
adjoint.set_matrix(sp.csr_matrix(A, copy=True))
adjoint.set_rhs(residual)
# Solve adjoint equation
adjoint.solve_system()
p = adjoint.get_solution(as_function=False)
# -------------------------------------------------------------------------
# Compute gradient
# -------------------------------------------------------------------------
g = M.dot(p + gamma*u_data)[dofs_inj]
print(np.linalg.norm(g))
return f,g.ravel()
# =============================================================================
# Mesh
# =============================================================================
# Computational domain
x_min = 0
x_max = 2
mesh = Mesh1D(box=[x_min, x_max], resolution=(512,))
# Mark Dirichlet Vertices
mesh.mark_region('left', lambda x: np.abs(x)<1e-9)
mesh.mark_region('right', lambda x: np.abs(x-2)<1e-9)
#
# Finite element spaces
#
Q1 = QuadFE(mesh.dim(), 'Q1')
# Dofhandler for state
dh_y = DofHandler(mesh, Q1)
dh_y.distribute_dofs()
ny = dh_y.n_dofs()
# Basis functions
phi = Basis(dh_y, 'v')
phi_x = Basis(dh_y, 'vx')
# -----------------------------------------------------------------------------
# Observations
# -----------------------------------------------------------------------------
# Determine vertices corresponding to production wells
n_prod = 4
h = (x_max-x_min)/(n_prod+2)
x_prod = np.array([(i+1)*h for i in range(n_prod)])
v = dh_y.get_dof_vertices()
dofs_prod = []
for x in x_prod:
dofs_prod.append(np.argmin(abs(v-x)))
#
# Target pressure at production wells
#
z_fn = Explicit(f=lambda x: 3-4*(x[:,0]-1)**2, dim=1, mesh=mesh)
z_data = z_fn.eval(v[dofs_prod])
# -----------------------------------------------------------------------------
# Control
# -----------------------------------------------------------------------------
# Determine the vertices corresponding to the injection wells
n_inj = 6
h = (x_max-x_min)/(n_inj+2)
x_inj = np.array([(i+1)*h for i in range(n_inj)])
dofs_inj = []
for x in x_inj:
dofs_inj.append(np.argmin(abs(v-x)))
u_data = np.zeros((ny,1))
u_data[dofs_inj] = 1
u = Nodal(dofhandler=dh_y, data=u_data, dim=1)
#
# Regularization parameter
#
gamma = 0.00001
#gamma = 0.1
#
# Random diffusion coefficient
#
cov = Covariance(dh_y, name='gaussian', parameters={'l':0.1})
k = GaussianField(ny, K=cov)
k.update_support()
kfn = Nodal(dofhandler=dh_y, data=k.sample(n_samples=1))
# =============================================================================
# Assembly
# =============================================================================
K = Kernel(kfn, F=lambda f:np.exp(f)) # diffusivity
problems = [[Form(K, test=phi_x, trial=phi_x)],
[Form(test=phi, trial=phi)]]
assembler = Assembler(problems, mesh)
assembler.assemble()
# Mass matrix (for control)
A = assembler.af[0]['bilinear'].get_matrix()
M = assembler.af[1]['bilinear'].get_matrix()
# =============================================================================
# Define State and Adjoint Systems
# =============================================================================
state = LS(phi) #, A=sp.csr_matrix(A, copy=True))
adjoint = LS(phi) #, A=sp.csr_matrix(A, copy=True))
# Apply Dirichlet Constraints (state)
state.add_dirichlet_constraint('left',1)
state.add_dirichlet_constraint('right',0)
state.set_constraint_relation()
# Apply Dirichlet Constraints (adjoint)
adjoint.add_dirichlet_constraint('left',0)
adjoint.add_dirichlet_constraint('right',0)
adjoint.set_constraint_relation()
# =============================================================================
# Optimization
# =============================================================================
res = minimize(cost_gradient, u_data[dofs_inj],
args=(ny,z_data,state, adjoint,A,M,gamma,dofs_inj,dofs_prod),
jac=True)
print(res.x)
# =============================================================================
# Plot results
# =============================================================================
u_data = np.zeros((ny,1))
u_data[dofs_inj,:] = res.x[:,None]
b = M.dot(u_data)
state.set_matrix(sp.csr_matrix(A,copy=True))
state.set_rhs(b)
state.solve_system()
y_data = state.get_solution(as_function=True)
fig, ax = plt.subplots(1,1)
plot = Plot(quickview=False)
ax.plot(v[dofs_prod],z_data,'ro')
ax = plot.line(y_data, axis=ax)
ax.plot(v[dofs_inj],np.zeros((len(dofs_inj),1)), 'C0o')
plt.show()
|
hvanwyk/quadmesh
|
experiments/optimal_control/ex03_pw_deterministic_1d.py
|
Python
|
mit
| 6,517
|
[
"Gaussian"
] |
146c25a37d378a15894b731140cb3f88e37a7f5d20f99ff34147ab3ab992e755
|
#!/usr/bin/env python3
""" Computer-based immigration office for Kanadia """
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2014 Susan Sim"
__license__ = "MIT License"
__status__ = "Prototype"
"""
The program is designed by Zhong Yan and Tao Ran,Liu. The basic function for this program is receiving the entry
record and outputs one of four Strings for each record. Appropriate docstring has been made above each part to prevent
confusion.
"""
# imports one per line
import re
import datetime
import json
# In the game the country in which travelers are attempting tro enter is Kanadia, "KAN".
def decide(input_file, watchlist_file, countries_file):
"""
Decides whether a traveller's entry into Kanadia should be accepted
:param input_file: The name of a JSON formatted file that contains cases to decide
:param watchlist_file: The name of a JSON formatted file that contains names and passport numbers on a watchlist
:param countries_file: The name of a JSON formatted file that contains country data, such as whether
an entry or transit visa is required, and whether there is currently a medical advisory
:return: List of strings. Possible values of strings are: "Accept", "Reject", "Secondary", and "Quarantine"
"""
with open(input_file, "r") as file_reader_input:
file_contents_input = file_reader_input.read()
json_contents_input_in_list = json.loads(file_contents_input)
# json.contents.input contains the list of travelers attempting to enter Kanadia's border
with open(watchlist_file, "r") as file_reader_watchlist:
file_contents_watchlist = file_reader_watchlist.read()
json_contents_watchlist_in_list = json.loads(file_contents_watchlist)
# json.contents.watchlist contains the list of travelers on the watchlist who should be sent to "secondary"
with open(countries_file, "r") as file_reader_countries:
file_contents_countries = file_reader_countries.read()
json_contents_countries_in_dictionary = json.loads(file_contents_countries)
# json.contents.countries contains the list of countries possible in this game
string_result = [] # Create an empty string list to store the different output results.
# If the required information for an entry record is incomplete, the traveler must be rejected.
# For example: if "passport" is missing then the traveler is rejected regardless of other conditions
# All strings and string keys converted to lowercase to prevent differentiation between lower and uppercase
for entry_dictionary in json_contents_input_in_list:
year = datetime.timedelta(days=365) # A variable "year" that contains 365 days
two_years = 2*year
# Year multiplied by two to make the new variable two_years for the convenient calculating of valid visa date.
if set(["passport","first_name","last_name","birth_date","home","from","entry_reason"]).issubset(entry_dictionary)is False:
return ["Reject"]
home_dictionary = entry_dictionary["home"]
home_dictionary = dict((k.lower(), v.lower()) for k, v in home_dictionary.iteritems())
# Converts every string key in the dictionary to lowercase
# If the reason for entry is to visit and the visitor has a passport from a country from which a visitor visa is required,
# The traveller must have a valid visa.
# A valid visa is one that is less than two years old.Time calculated from present time to the date on the visa.
# For example, if the visa is "1999-05-19" and is it now "2012-05-19" then visa is expired.
if json_contents_countries_in_dictionary[entry_dictionary["from"]["country"]]["visitor_visa_required"] == "1":
if "visa" in entry_dictionary.keys():
if datetime.datetime.now() - datetime.datetime.strptime(entry_dictionary["visa"]["visa_date"], '%Y-%m-%d') >= two_years:
string_result.append("Reject")
continue
# If the reason for entry is transit and the visitor has a passport from a country from which a transit visa is required,
# The traveller must have a valid visa.
# A valid visa is one that is less than two years old. Time calculated from present time to the date on the visa.
# For example, if the visa is "1999-05-19" and is it now "2012-05-19" then visa is expired.
if json_contents_countries_in_dictionary[entry_dictionary["from"]["country"]]["transit_visa_required"] == "1":
if "visa" in entry_dictionary.keys():
if datetime.datetime.now() - datetime.datetime.strptime(entry_dictionary["visa"]["visa_date"], '%Y-%m-%d') >= two_years:
string_result.append("Reject")
continue
# If the traveler is coming from or via a country that has a medical advisory, he or she must be send to quarantine.
# All alphabetical values to lowercase to prevent differentiation between lower and uppercase
for countries_dictionary in json_contents_countries_in_dictionary:
key_code_country = entry_dictionary["from"]["country"]
if (json_contents_countries_in_dictionary[key_code_country]["medical_advisory"] == "") is False:
string_result.append("Quarantine")
break
if "via" in entry_dictionary:
if (json_contents_countries_in_dictionary[entry_dictionary["via"]["country"]]["medical_advisory"] == "" )is False:
string_result.append("Quarantine")
break
continue
#If the traveller has a name or passport on the watch list, she or he must be sent to secondary processing.
# All alphabetical values to lowercase to prevent differentiation between lower and uppercase
for watchlist_dictionary in json_contents_watchlist_in_list:
#ignore case sensitives
entry_dictionary["passport"] = entry_dictionary["passport"].lower();
entry_dictionary["last_name"] = entry_dictionary["last_name"].lower();
entry_dictionary["first_name"] = entry_dictionary["first_name"].lower();
watchlist_dictionary = dict((k.lower(), v.lower()) for k,v in watchlist_dictionary.iteritems())# Make each item in watchlist to lowercase
if entry_dictionary["passport"] == watchlist_dictionary["passport"]:
string_result.append("Secondary")
continue
if entry_dictionary["last_name"] == watchlist_dictionary["last_name"]:
if entry_dictionary["first_name"] == watchlist_dictionary["first_name"]:
string_result.append("Secondary")
continue
continue
continue
if home_dictionary["country"] == "kan":
if entry_dictionary["entry_reason"] == "returning":
string_result.append("Accept")
continue
string_result.append("Reject")
continue
return string_result
def valid_passport_format(passport_number):
"""
Checks whether a pasport number is five sets of five alpha-number characters separated by dashes
:param passport_number: alpha-numeric string
:return: Boolean; True if the format is valid, False otherwise
"""
passport_format = re.compile('^\w{5}-\w{5}$')
if passport_format.match(passport_number):
return True
else:
return False
def valid_date_format(date_string):
"""
Checks whether a date has the format YYYY-mm-dd in numbers
:param date_string: date to be checked
:return: Boolean True if the format is valid, False otherwise
"""
try:
datetime.datetime.strptime(date_string, '%Y-%m-%d')
return True
except ValueError:
return False
|
hebe889900/info1340_assignment2
|
papers.py
|
Python
|
mit
| 7,875
|
[
"VisIt"
] |
2845a6aaff5591dc271874e9cf35a32a63da568687c9e628cbf7b174cf721849
|
#! /usr/bin/python
####################################################################
# MaterialsStudio Cell File To Abinit Config file Converter
# zhoubo
# 2006.9.26
####################################################################
import sys
import re
if __name__=="__main__":
if len(sys.argv) >2:
useage()
sys.exit()
try:
fp=open(sys.argv[1])
except:
print "Cannot open Cell file !"
sys.exit()
data=fp.readlines()
#lattice paramenters
a=b=c=0.0
#primitive vectors
a_=b_=c_=[]
# atoms
atoms=[]
atompos=[]
ntypat=""
natom=0
typat=""
n=0
while n<len(data):
if data[n].find("%BLOCK LATTICE_CART")>-1:
t1=re.split("\s+",data[n+1].strip())
a_=map(float,t1)
for x in a_:
if x > a: a=x
for x in xrange(len(a_)):
a_[x]=a_[x]/a
t1=re.split("\s+",data[n+2].strip())
b_=map(float,t1)
for x in b_:
if x > b: b=x
for x in xrange(len(b_)):
b_[x]=b_[x]/b
t1=re.split("\s+",data[n+3].strip())
c_=map(float,t1)
for x in c_:
if x > c: c=x
for x in xrange(len(c_)):
c_[x]=c_[x]/c
n+=4
if data[n].find("%ENDBLOCK LATTICE_CART") > -1:
n+=1
continue
if data[n].find("%BLOCK POSITIONS_FRAC") > -1:
n+=1
while data[n].find( "%ENDBLOCK POSITIONS_FRAC")==-1:
t1=re.split("\s+",data[n].strip())
if t1[0] not in atoms:
atoms.append(t1[0])
ntypat+=str(len(atoms))+" "
natom+=1
typat+=str(len(atoms))+" "
atompos.append(map(float,t1[1:]))
else:
natom+=1
typat+=str(atoms.index(t1[0])+1)+" "
atompos.append(map(float,t1[1:]))
n+=1
if data[n].find("%ENDBLOCK POSITIONS_FRAC") > -1:
break
n+=1
# Prepare the output
print "####################################################"
print "# Atoms Stucture created by CellToAbinit Converter !"
print "####################################################"
print ""
print "# Definition of the unit cell"
print "#The length of the primitive vectors "
print "# 1 Bohr=0.5291772108 Angstroms "
print "# acell %f %f %f" % (round(a,6),round(b,6),round(c,6)),"Angstroms"
print "acell %f %f %f" % (round(a/0.5291772108,6),round(b/0.5291772108,6),round(c/0.5291772108,6))
print "rprim "
print " %8f %8f %8f"%(a_[0],a_[1],a_[2])
print " %8f %8f %8f"%(b_[0],b_[1],b_[2])
print " %8f %8f %8f"%(c_[0],c_[1],c_[2])
print ""
print "#Definition of the atom types"
print "# %d kind of atoms" %( natom)
print "# ",atoms
print "ntypat "+ntypat
print "znucl "+"Needed"
print ""
print "#Definition of tha atoms"
print "natom ",natom
print "typat ",typat
print "xred"
for x in atompos:
print " %8s %8s %8s"%(round(x[0],6),round(x[1],6),round(x[2],6))
print ""
print "##### End of CellToAbinit Conveter ! #####"
|
qsnake/abinit
|
util/users/Cell2Abinit.py
|
Python
|
gpl-3.0
| 3,431
|
[
"ABINIT"
] |
2fd7e3482f86c8ee587c365963d7b5e9cc9f720f60cb08f75d5be407d791d3a0
|
#! /usr/bin/env python
# coding:utf-8
# Author: bingwang
# Email: toaya.kase@gmail.com
# Copylight 2012-2012 Bing Wang
# LICENCES: GPL v3.0
__docformat__ = "epytext en"
"""
This program will take two fasta file as input,
use balst compare them and return
exact same pairs back as table.
>>> import compare2fsa
>>> compare2fsa.main(fsafile_1,fsa_file_2)
...
"""
from Bio import SeqIO
import os
os.chdir("/Users/bingwang/zen/yeast_anno_pipe/")
#TODO change dir need to be more elegant
def check_file(file_name):
'''
check if a file is fsa format
>>> check_file(test_1.fsa)
True
>>> check_file(test_wrong.fsa)
False
'''
pass
def db_construct(file_name):
'''
make a blast datebase
>>> db_construct("/test/test_1.fsa")
>>> os.path.isfile("/tests/test_1.fsa.db")
True
'''
db_name = file_name + ".db"
os.system("makeblastdb -in %s -dbtype nucl -out %s"%(fsa,db))
def run_blast(file_name,db_name):
'''
run blast program
>>> run_blast("/test/test_1.fsa","/test/test_2.fsa.db")
>>> os.path.isfile("/test/test_1.fsa.out")
True
'''
blast_result_file = file_name + ".out"
os.system("blastn -evalue 0.00001 -max_target_seqs 1 -strand plus "+\
"-max_hsps_per_subject 1 " +\
"-db %s -query %s -out %s "%(db_name,file_name,blast_result_file) +\
"-outfmt \"6 qseqid sseqid pident length mismatch gapopen qstart " +\
"qend sstart send \"")
def main():
check_file(file_1)
db_construct(file_1)
db_construct(file_2)
blast(file_1,file_2)
rm_db(file_1,file_2)
blast_1 = read_blast(file_1)
blast_2 = read_blast(file_2)
pairs = pair(blast_1,blast_2)
write_pairs(file_out)
if __name__ == "__main__":
file_1 = open("tests/scer.devin.fsa")
file_2 = open("tests/scer.ygap.fsa")
file_out = open("test/scer/ygapVSdevin.tab")
main(file_1,file_2,file_out)
|
BingW/yeast_anno_pipe
|
src/compare2fsa.py
|
Python
|
gpl-3.0
| 1,914
|
[
"BLAST"
] |
bcd6c014ee8cab4e209f732765fb2a84be4e94f90fff8d427b8c9fd3d535dd9f
|
"""
this file does variant calling for DNAseq
"""
#============= import required packages =================
import os
import sys,subprocess
sys.path.append('/home/shangzhong/Codes/Projects')
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer
from Modules.f00_Message import Message
from Modules.f01_list_trim_fq import list_files,Trimmomatic
from Modules.f02_aligner_command import bwa_vari,bwa_Db
from Modules.f03_samtools import sam2bam_sort
from Modules.f07_picard import markduplicates,sortVCF
from Modules.f08_GATK import *
from Modules.p01_FileProcess import remove,get_parameters,rg_bams
from Modules.p02_ParseFasta import divide_scaffold_by_len
#============= define some parameters ===================
"""these parameters and read group names are different for
different samples, should only change this part for
running pipeline
"""
#parFile = '/data/shangzhong/DNArepair/GATK_parameters4DNAandRNA.txt'
parFile = sys.argv[1]
param = get_parameters(parFile)
thread = param['thread']
email = param['email']
startMessage = param['startMessage']
endMessage = param['endMessage']
ref_fa = param['refSequence']
file_path = param['filePath']
bwaIndex = param['alignerDb']
trim = param['trim']
phred = param['phred']
picard = param['picard']
trimmomatic = param['trimmomatic']
trimmoAdapter = param['trimmoAdapter']
gatk = param['gatk']
read_group = param['readGroup']
organism = param['organism']
##***************** Part 0. Build index file for bwa and GATK ******
##================= Part I. Preprocess ============================
#======== 1. map and dedupping =====================================
Message(startMessage,email)
#======== (0) enter the directory ========================
bwa_path = bwaIndex[:bwaIndex.rfind('/')]
if not os.path.exists(bwa_path): os.mkdir(bwa_path)
if os.listdir(bwa_path) == []:
bwa_Db(bwa_path,ref_fa)
os.chdir(file_path)
#======== (1) read files ================================
fastqFiles = list_files(file_path)
if trim == 'True':
trim_fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter,batch=6)
remove(fastqFiles)
else:
trim_fastqFiles = fastqFiles
print 'list file succeed'
print 'fastqFiles is: ',trim_fastqFiles
#======== (2) define group ===============================
#defined above
#======== (3) align using bwa ============================
try:
map_sam = bwa_vari(read_group,trim_fastqFiles,bwaIndex,thread)
print 'align succeed'
print 'map_sam is: ',map_sam
except:
print 'align failed'
Message('align failed',email)
raise
#======== (4) Convert sam to sorted bam ==================
try:
sort_bams = sam2bam_sort(map_sam,thread)
print 'sort bam files succeed'
print 'sort_bams is: ',sort_bams
except:
print 'sort bam files failed'
Message('sort bam files failed',email)
raise
#======== (5) Markduplicates using picard ================
try:
dedup_files = markduplicates(picard,sort_bams)
print 'mark duplicates succeed'
print 'dedup_files is: ',dedup_files
remove(sort_bams)
except:
print 'mark duplicates failed'
Message('mark duplicates failed',email)
raise
#======== 2. Indel realignment ====================================
#======== (1) Create a target list of intervals===========
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread)
print 'RealignerTarget Creator succeed'
print 'interval is: ',interval
except:
print 'RealignerTarget Creator failed'
Message('RealignerTarget Creator failed',email)
raise
#======== (2) realignment of target intervals ============
try:
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval,9) # (gatk,dedupbams,reference,intervals,batch=1,*gold_indels)
print 'IndexRealigner succeed'
print 'realign_bams is: ',realign_bams
remove(dedup_files)
except:
print 'IndelRealigner failed'
Message('IndelRealigner failed',email)
raise
#======== 3. Base quality recalibration =================
# since we don't have dbsnp for CHO, we need to:
# 1. find snp without recalibration, got vcf file
# 2. extract the snps we think are real snps, into a real_vcf file.
# 3. use the file in 2 to do the recalibration.
##================= Part II. Variant Calling ======================
#======== 1. call raw variant using HaplotypeCaller =====
#======== (1) determine parameters ======================
#======== (2) call variant ==============================
roundNum = 1
try:
raw_gvcf_files = HaplotypeCaller_DNA_gVCF(gatk,realign_bams,ref_fa,thread,batch=int(thread))
print 'round 1 call succeed'
print 'raw_gvcf_files is: ',raw_gvcf_files
except:
print 'round 1 call failed'
Message('round 1 call failed',email)
raise
#======== (3) Joint Genotyping ===========================
try:
joint_gvcf_file = JointGenotype(gatk,raw_gvcf_files,ref_fa,organism,thread)
print 'round 1 join vcf succeed'
print 'joint_gvcf_file is: ',joint_gvcf_file
remove(raw_gvcf_files)
except:
print 'round 1 join vcf failed'
Message('round 1 join vcf failed',email)
raise
#*********** since we don't have the dbsnp for CHO, we need to repeat
#*********** base reaclibration until it converge.
#======== (4) Variant hard filter =======================
try:
gold_files = HardFilter(gatk,joint_gvcf_file,ref_fa,thread)
print 'round 1 gold files succeed'
print 'gold_files is: ',gold_files
remove(joint_gvcf_file)
except:
print 'round 1 gold files failed'
Message('round 1 gold files failed',email)
raise
#======== (5) Base Recalibration ========================
try:
recal_bam_files = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_files[0],
gold_files[1],roundNum,thread,6)
print 'round 1 recalibration succeed'
print 'recal_bam_files is: ',recal_bam_files
remove(realign_bams)
except:
print 'round 1 recalibration failed'
Message('round 1 recalibration failed',email)
raise
# #======== second round ====================================
# roundNum = 2
# try:
# raw_gvcf_files = HaplotypeCaller_DNA_gVCF(gatk,recal_bam_files,ref_fa,thread)
# print 'round 2 call succeed'
# print 'raw_gvcf_files is:',raw_gvcf_files
# except:
# print 'round 2 call failed'
# Message('round 2 call failed',email)
# raise
# #------- Joint Genotyping --------
# try:
# joint_gvcf_file = JointGenotype(gatk,raw_gvcf_files,ref_fa,organism,thread)
# print 'round 2 join vcf succeed'
# print 'joint_gvcf_file is: ',joint_gvcf_file
# remove(raw_gvcf_files)
# except:
# print 'round 2 join vcf failed'
# Message('round 2 join vcf failed',email)
# raise
# #------- Hard filter -------------
# try:
# gold_files = HardFilter(gatk,joint_gvcf_file,ref_fa,thread)
# print 'round 2 gold files succeed'
# print 'gold_files is: ',gold_files
# remove(joint_gvcf_file)
# except:
# print 'round 2 gold files failed'
# Message('round 2 gold files failed',email)
# raise
# #------- Recalibration -----------
# try:
# recal_bam_files = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_files[0],
# gold_files[1],roundNum,thread)
# print 'round 2 recalibration succeed'
# print 'recal_bam_files is: ',recal_bam_files
# remove(realign_bams)
# except:
# print 'round 2 recalibration failed'
# Message('round 2 recalibration failed',email)
# raise
#======== !!! merge lanes for the same sample ============
if len(recal_bam_files) !=1:
#========= merge samples =========================
try:
merged_bams = rg_bams(read_group,recal_bam_files)
print 'merged succeed'
print 'merged_bams is: ',merged_bams
remove(recal_bam_files)
except:
print 'merged failed'
Message('merged failed',email)
raise
#========= mark duplicates ========================
try:
dedup_files = markduplicates(picard,merged_bams)
print 'dedup succeed'
print 'merged dedup_files is: ',dedup_files
remove(merged_bams)
except:
print 'merged dedup failed'
Message('merged dedup failed',email)
raise
#========= Realignment ============================
try:
interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread)
realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval)
print 'merged indelrealigner succeed'
print 'merged realign_bams is: ',realign_bams
remove(dedup_files)
except:
print 'merged realign failed'
Message('merged realign failed',email)
raise
#======== (6) call variant ==============================
try:
raw_gvcf_files = HaplotypeCaller_DNA_gVCF(gatk,realign_bams,ref_fa,thread,batch=int(thread))
# raw_gvcf_files,par_L_files = par_HaplotypeCaller_DNA_gVCF(gatk,realign_bams,ref_fa,L_path)
# remove(par_L_files)
print 'merged final call succeed'
print 'raw_gvcf_files is:',raw_gvcf_files
except:
print 'final call failed'
Message('final call failed',email)
raise
#======== (7) Joint Genotyping ===========================
try:
joint_gvcf_file = JointGenotype(gatk,raw_gvcf_files,ref_fa,organism,thread)
print 'final joint succeed'
print 'joint_gvcf_file is: ',joint_gvcf_file
remove(raw_gvcf_files)
except:
print 'final joint failed'
Message('final joint failed',email)
raise
else:
# for only one file, just run calling with recalibration bam file
try:
joint_gvcf_file = HaplotypeCaller_DNA_VCF(gatk,recal_bam_files[0],ref_fa,thread)
print 'final call succeed'
print 'raw_gvcf_files is:',joint_gvcf_file
except:
print 'final call failed'
Message('final call failed',email)
raise
#======== (8) VQSR or Hard filter ======================================
# since for CHO samples we don't have enough samples and snp resources, the VQSR step cannot give a very good prediction.
# we choose to use hardFilter.
try:
final_filtered_files = HardFilter(gatk,joint_gvcf_file,ref_fa,thread)
print 'final filter succeed'
print 'final_filtered_files is: ',final_filtered_files
except:
print 'final filter failed'
Message('final filter failed',email)
raise
# try:
# recal_variant = VQSR(gatk,joint_gvcf_file,gold_files[0],gold_files[1],ref_fa,thread)
# print 'vcf recalibration succeed'
# print 'recal_variant is: ',recal_variant
# except:
# print 'final vcf recalibration failed'
# Message('final vcf recalibration failed',email)
# raise
#======== (9) combine snp and indel ======================================
try:
combinedVcf = CombineSNPandINDEL(gatk,ref_fa,final_filtered_files,'--assumeIdenticalSamples --genotypemergeoption UNSORTED')
print 'combine snp and indel succeed'
print 'combineVcf file is: ',combinedVcf
remove(final_filtered_files)
except:
print 'combine snp and indel failed'
raise
Message(endMessage,email)
##================= Part III. Analyze Variant =====================
|
shl198/Projects
|
VariantCall/01_GATK_DNA_vari_call.py
|
Python
|
mit
| 11,232
|
[
"BWA"
] |
abf14bad5be6bd7b8986d30d70a4e94407b6af721a6c008e44c23045dc4a9a27
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gromacs(CMakePackage):
"""GROMACS (GROningen MAchine for Chemical Simulations) is a molecular
dynamics package primarily designed for simulations of proteins, lipids
and nucleic acids. It was originally developed in the Biophysical
Chemistry department of University of Groningen, and is now maintained
by contributors in universities and research centers across the world.
GROMACS is one of the fastest and most popular software packages
available and can run on CPUs as well as GPUs. It is free, open source
released under the GNU General Public License. Starting from version 4.6,
GROMACS is released under the GNU Lesser General Public License.
"""
homepage = 'http://www.gromacs.org'
url = 'http://ftp.gromacs.org/gromacs/gromacs-5.1.2.tar.gz'
version('2018', '6467ffb1575b8271548a13abfba6374c')
version('2016.4', '19c8b5c85f3ec62df79d2249a3c272f8')
version('2016.3', 'e9e3a41bd123b52fbcc6b32d09f8202b')
version('5.1.4', 'ba2e34d59b3982603b4935d650c08040')
version('5.1.2', '614d0be372f1a6f1f36382b7a6fcab98')
version('develop', git='https://github.com/gromacs/gromacs', branch='master')
variant('mpi', default=True, description='Activate MPI support')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant(
'double', default=False,
description='Produces a double precision version of the executables')
variant('plumed', default=False, description='Enable PLUMED support')
variant('cuda', default=False, description='Enable CUDA support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel',
'Reference', 'RelWithAssert', 'Profile'))
depends_on('mpi', when='+mpi')
depends_on('plumed+mpi', when='+plumed+mpi')
depends_on('plumed~mpi', when='+plumed~mpi')
depends_on('fftw')
depends_on('cmake@2.8.8:', type='build')
depends_on('cmake@3.4.3:', type='build', when='@2018:')
depends_on('cuda', when='+cuda')
def patch(self):
if '+plumed' in self.spec:
self.spec['plumed'].package.apply_patch(self)
def cmake_args(self):
options = []
if '+mpi' in self.spec:
options.append('-DGMX_MPI:BOOL=ON')
if '+double' in self.spec:
options.append('-DGMX_DOUBLE:BOOL=ON')
if '~shared' in self.spec:
options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
if '+cuda' in self.spec:
options.append('-DGMX_GPU:BOOL=ON')
options.append('-DCUDA_TOOLKIT_ROOT_DIR:STRING=' +
self.spec['cuda'].prefix)
return options
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/gromacs/package.py
|
Python
|
lgpl-2.1
| 4,055
|
[
"Gromacs"
] |
74b619122a3ae9fe6a328c64eecc7e5b7b90fbd83da2844a39e6c735197543c4
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Demonstrate how to extract polygonal cells with an implicit function
# get the interactor ui
# create a sphere source and actor
#
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(16)
sphere.SetRadius(1.5)
# Extraction stuff
t = vtk.vtkTransform()
t.RotateX(90)
cylfunc = vtk.vtkCylinder()
cylfunc.SetRadius(0.5)
cylfunc.SetTransform(t)
extract = vtk.vtkExtractPolyDataGeometry()
extract.SetInputConnection(sphere.GetOutputPort())
extract.SetImplicitFunction(cylfunc)
extract.ExtractBoundaryCellsOn()
extract.PassPointsOn()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(extract.GetOutputPort())
sphereMapper.GlobalImmediateModeRenderingOn()
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
# Extraction stuff - now cull points
extract2 = vtk.vtkExtractPolyDataGeometry()
extract2.SetInputConnection(sphere.GetOutputPort())
extract2.SetImplicitFunction(cylfunc)
extract2.ExtractBoundaryCellsOn()
extract2.PassPointsOff()
sphereMapper2 = vtk.vtkPolyDataMapper()
sphereMapper2.SetInputConnection(extract2.GetOutputPort())
sphereActor2 = vtk.vtkActor ()
sphereActor2.SetMapper(sphereMapper2)
sphereActor2.AddPosition(2.5, 0, 0)
# Put some glyphs on the points
glyphSphere = vtk.vtkSphereSource()
glyphSphere.SetRadius(0.05)
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(extract.GetOutputPort())
glyph.SetSourceConnection(glyphSphere.GetOutputPort())
glyph.SetScaleModeToDataScalingOff()
glyphMapper = vtk.vtkPolyDataMapper()
glyphMapper.SetInputConnection(glyph.GetOutputPort())
glyphActor = vtk.vtkActor()
glyphActor.SetMapper(glyphMapper)
glyph2 = vtk.vtkGlyph3D()
glyph2.SetInputConnection(extract2.GetOutputPort())
glyph2.SetSourceConnection(glyphSphere.GetOutputPort())
glyph2.SetScaleModeToDataScalingOff()
glyphMapper2 = vtk.vtkPolyDataMapper()
glyphMapper2.SetInputConnection(glyph2.GetOutputPort())
glyphActor2 = vtk.vtkActor()
glyphActor2.SetMapper(glyphMapper2)
glyphActor2.AddPosition(2.5, 0, 0)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetWindowName("vtk - extractPolyData")
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(sphereActor)
ren1.AddActor(glyphActor)
ren1.AddActor(sphereActor2)
ren1.AddActor(glyphActor2)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(30)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(300,300)
renWin.Render()
# render the image
#
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Extraction/Testing/Python/extractPolyData.py
|
Python
|
bsd-3-clause
| 2,894
|
[
"VTK"
] |
327b036540f2bf38000799f5b9276fd412b09774fe986180471d5fe49e3219d0
|
#!/usr/bin/python
# This example shows how to implement deep linking (https://core.telegram.org/bots#deep-linking)
# with the pyTelegramBotAPI.
# Note: This is not a working, production-ready sample.
#
# In this example we are connecting a user account on a website with a Telegram bot.
# Implementing this will enable you to push notifications (and other content) to your users' Telegram account.
# In this explanation the word 'database' can refer to any form of key-value storage.
# The deep linking explained:
#
# 1. Let the user log in on an actual website with actual username-password authentication.
#
# 2. Generate a unique hashcode (we will call it unique_code)
#
# 3. Save unique_code->username to the database.
#
# 4. Show the user the URL https://telegram.me/YOURBOTNAME?start=unique_code
#
# 5. Now as soon as the user opens this URL in Telegram and presses 'Start',
# your bot will receive a text message containing '/start unique_code',
# where unique_code is of course replaced by the actual hashcode.
#
# 6. Let the bot retrieve the username by querying the database for unique_code.
#
# 7. Save chat_id->username to the database.
#
# 8. Now when your bot receives another message, it can query message.chat.id in the database
# to check if the message is from this specific user. (And handle accordingly) or
# you can push messages to the user using his chat id.
#
# Steps 1 to 4 will have to be implemented in a web server, using a language such as PHP, Python, C# or Java. These
# steps are not shown here. Only steps 5 to 7 are illustrated, some in pseudo-code, with this example.
import telebot
bot = telebot.TeleBot('TOKEN')
def extract_unique_code(text):
# Extracts the unique_code from the sent /start command.
return text.split()[1] if len(text.split()) > 1 else None
def in_storage(unique_code):
# (pseudo-code) Should check if a unique code exists in storage
return True
def get_username_from_storage(unique_code):
# (pseudo-code) Does a query to the storage, retrieving the associated username
# Should be replaced by a real database-lookup.
return "ABC" if in_storage(unique_code) else None
def save_chat_id(chat_id, username):
# (pseudo-code) Save the chat_id->username to storage
# Should be replaced by a real database query.
pass
@bot.message_handler(commands=['start'])
def send_welcome(message):
unique_code = extract_unique_code(message.text)
if unique_code: # if the '/start' command contains a unique_code
username = get_username_from_storage(unique_code)
if username: # if the username exists in our database
save_chat_id(message.chat.id, username)
reply = "Hello {0}, how are you?".format(username)
else:
reply = "I have no clue who you are..."
else:
reply = "Please visit me via a provided URL from the website."
bot.reply_to(message, reply)
bot.polling()
|
sgomez/pyTelegramBotAPI
|
examples/deep_linking.py
|
Python
|
gpl-2.0
| 2,971
|
[
"VisIt"
] |
41af199a480bfa3ced90c3f543cd873e8822bc1f114e3de09fe6c59f37d9cedc
|
#!/usr/bin/env python
###########################################################################
## ##
## Language Technologies Institute ##
## Carnegie Mellon University ##
## Copyright (c) 2012 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## Author: Aasish Pappu (aasish@cs.cmu.edu) ##
## Date : November 2012 ##
###########################################################################
## Description: Example python backend module for olympus applications ##
## ##
## ##
###########################################################################
import os, sys, string, math, random
import exceptions
from copy import copy, deepcopy
import re
from time import sleep
from random import randint
from threading import Thread, Timer
import logging
import os.path as path
import Control #@yipeiw
import Loader
import NLG
os.environ['GC_HOME'] = os.path.join(os.environ['OLYMPUS_ROOT'], 'Libraries', 'Galaxy')
sys.path.append(os.path.join(os.environ['GC_HOME'], 'contrib', 'MITRE', 'templates'))
sys.path.append(os.path.join(os.environ['OLYMPUS_ROOT'], 'bin', 'x86-nt'))
import GC_py_init
import Galaxy, GalaxyIO
import time
import unicodedata
import random
galaxyServer = None
current_dialog_state = None
home_dialog_state = None
current_dialog_state_counter = 0
current_dialog_state_begin = None
global_dialog_state_counter = 0
from random import randrange
logger = None
def InitLogging():
global logger
logger = logging.getLogger('BE')
hdlr = logging.FileHandler('BE.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
def Log(input):
global logger
print input
logger.error(input)
sys.stdout.flush()
#@yipeiw
database = {}
resource = {}
listfile='cnn_qa.list'
rescource_root = 'resource'
template_list=['template/template_new.txt', 'template/template_end.txt', 'template/template_open.txt', 'template/template_expand.txt']
template_list = [path.join(rescource_root, name) for name in template_list]
topicfile = path.join(rescource_root, 'topic.txt')
#currentime = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
#fileout = open(currentime, 'w')
def InitResource():
global database, resource
datalist=[line.strip() for line in open(listfile)]
database = Loader.LoadDataPair(datalist)
resource = Loader.LoadLanguageResource()
global TemplateLib, TopicLib, TreeState, Template
TemplateLib = Loader.LoadTemplate(template_list)
TopicLib = Loader.LoadTopic(topicfile)
TreeState, Template = Control.Init()
def Welcome(env, dict):
Log(dict)
user_id = dict[":user_id"]
# Log(user_id)
# if env and user_id not in provider_env:
# provider_env[user_id] = env
# Log('Stored the env for user_id %s' %(user_id))
Log("Welcome to the new Backend Server")
prog_name = "reinitialize"
#print Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE,dict)
print dict
return Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE,dict)
def SetDialogState(env, dict):
global current_dialog_state
global home_dialog_state
global current_dialog_state_counter
global current_dialog_state_begin
global global_dialog_state_counter
inframe = dict[":dialog_state"]
# extracting the dialog state and turn number
# main logic of updating the dialog state, such as sleeping, awake, etc
lines = inframe.split('\n')
new_dialog_state = None
turn_counter = 0
for l in lines:
components = l.split(' = ')
if (len(components)!=2):
continue
prefix = components[0]
suffix = components[1]
if (prefix == "dialog_state"):
new_dialog_state = suffix
if (global_dialog_state_counter == 0):
home_dialog_state = new_dialog_state
print "current_dialog_state", current_dialog_state
print "new_dialog_state", new_dialog_state
if (current_dialog_state == new_dialog_state):
current_dialog_state_counter = turn_counter - current_dialog_state_begin
current_dialog_state = new_dialog_state
print "cur == new, cur_counter =", current_dialog_state_counter
else:
current_dialog_state = new_dialog_state
current_dialog_state_counter = 0
current_dialog_state_begin = turn_counter
print "cur != new, cur_begin =", current_dialog_state_begin
print "cur_counter =", current_dialog_state_counter
elif (prefix == "turn_number"):
turn_counter = int(suffix)
print "get turn counter", turn_counter
if (global_dialog_state_counter == -1 or turn_counter == 0):
global_dialog_state_counter = 0
#print "set g_d_s_c to 0"
else:
global_dialog_state_counter = turn_counter
#print "g_d_s_c =", turn_counter
#print "end of turn counter"
print "==============================="
print "DIALOG STATE is", current_dialog_state
print "CURRENT TURN NUMBER is", current_dialog_state_counter
state_out = -1
if (current_dialog_state.endswith(aware_state)):
print "system is aware of the person but can't see"
state_out = 4
elif (current_dialog_state == home_dialog_state):
print "system is sleeping now ... zzz"
state_out = 1
elif (current_dialog_state_counter >= 1):
print "system is puzzled ... "
state_out = 2
else:
print "system can understand you."
state_out = 3
count = 1
onDialogState(state_out)
print "==============================="
# end of the main logic
prog_name = "main"
outframe = "got dialog state"
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":outframe": outframe})
return f
def ReadRawInFrame(inframe_str):
Log("In Read Raw InFrame")
inframe_str = inframe_str.strip('\n').strip('}').strip('{')
inframe_dict = {}
inframe_lines = inframe_str.split('\n')
list_holder = None
current_list_key = None
in_array = False
Log(inframe_lines)
Log("######")
for line in inframe_lines:
line = line.strip('\n').strip(' ').lower()
if in_array is False:
# very likely key value pairs
if ' ' in line:
if ':' in line:
#beginning of array?
Log(line)
key, value = line.split(' ')
if re.match('^:\d+$', value) is not None:
in_array = True
list_holder = []
current_list_key = key
else:
Log(line)
key, value = line.split(' ')
inframe_dict[key] = value
else:
if line != '{' and line !='}':
if ' ' in line:
line = line.replace(' ', '_')
list_holder.append(line)
elif line == '}':
new_list = list_holder
inframe_dict[current_list_key] = new_list
list_holder = None
in_array = False
return inframe_dict
def SayThanks():
msg = {'[schedule_final]':'Your activity has been scheduled'}
SendMessageToDM('A', '[schedule]', msg)
SendMessageToDM('B', '[schedule]', msg)
#@yipeiw
def get_response(user_input):
global database, resource
global TemplateLib, TopicLib, TreeState, Template
relavance, answer = Control.FindCandidate(database, resource, user_input)
state = Control.SelectState(relavance, TreeState)
Log('DM STATE is [ %s ]' %(state))
print 'state:', state['name']
print "candidate answer ", relavance, answer
output = NLG.FillTemplate(TemplateLib, TopicLib, Template[state['name']], answer)
if isinstance(output, str):
output2 = output
else:
output2 = unicodedata.normalize('NFKD',output).encode('ascii','ignore')
Log('OUTPUT is [ %s ]' %(output2))
#fileout = open('input_response_history.txt', 'a')
#fileout.write(str(user_input) + '\n')
#fileout.write(str(output) + '\n')
#fileout.close()
return output2
def LaunchQuery(env, dict):
global requestCounter
Log("Launching a query")
Log(dict.keys())
propertiesframe = env.GetSessionProperties(dict.keys())
hub_opaque_data = propertiesframe[':hub_opaque_data']
provider_id = hub_opaque_data[':provider_id'].strip('[').strip(']')
try: prog_name = dict[":program"]
except: prog_name = "main"
inframe = dict[":inframe"]
inframe = inframe.replace("\n{c inframe \n}", "")
Log("Converting inframe to galaxy frame")
#Log(inframe)
raw_inframe_str = dict[":inframe"]
inframe_raw_dict = ReadRawInFrame(raw_inframe_str)
Log('RAW INFRAME is \n%s' %(str(inframe_raw_dict)))
user_input = ''
system_response = random.choice(['pardon me ?','can you say that again ?', 'excuse me?'])
try:
user_input = inframe_raw_dict['user_input'].strip('"')
user_input = user_input.replace('_', ' ')
except KeyError:
system_response = 'I am TickTock, how are you doing'
pass
if user_input:
#system_response = user_input
#system_response = get_response(user_input)
filehistory = open('input_response_history.txt', 'r')
system_tail = tail(filehistory, 4)
filehistory.close()
Log('USER INPUT is [ %s ]' %(user_input))
if user_input == '':
system_response = 'pardon me'
elif ((user_input.find('repeat')> 0) or (user_input.find('say that again')>0) or (user_input.find('excuse me')>0)):
filein = open('history.txt', 'r')
system_response = 'sure ... ' + filein.readline()
filein.close()
elif (system_tail[0] == system_tail[2]) and (system_tail[0] == user_input):
system_response = 'I am having a good time talking to you.{ {BREAK TIME="2s"/}} Do you want to keep going,' \
' if not, you can say goodbye'
else:
system_response = get_response(user_input)
#Log(type(system_response))
fileout = open('history.txt', 'w')
fileout.write(str(system_response) + '\n')
fileout.close()
prefix = ['', 'well ... ', 'uh ... ', '', 'let me see ... ', 'oh ... ']
cur_index = -1
while True:
random_index = randrange(0, len(prefix))
if random_index != cur_index:
break
cur_index = random_index
system_response = prefix[cur_index] + system_response
#system_response_2 = unicodedata.normalize('NFKD',system_response).encode('ascii','ignore')
resultsFrame = '{\n res %s \n}\n}' %(system_response)
#Log("outframe")
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":outframe": resultsFrame})
#Log(f)
return f
def tail(f, n, offset=0):
"""Reads a n lines from f with an offset of offset lines."""
avg_line_length = 74
to_read = n + offset
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
return lines[-to_read:offset and -offset or None]
avg_line_length *= 1.3
# oas in C is -increment i.
OAS = [("-increment i", "initial increment")]
# Write a wrapper for the usage check.
class BackEnd(GalaxyIO.Server):
def CheckUsage(self, oas_list, args):
global InitialIncrement
data, out_args = GalaxyIO.Server.CheckUsage(self, OAS + oas_list, args)
if data.has_key("-increment"):
InitialIncrement = data["-increment"][0]
del data["-increment"]
return data, out_args
def SendToHub(provider, frame):
prog_name = "main"
global provider_env
env = provider_env[provider]
if env:
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, frame)
try:
env.WriteFrame(f)
except GalaxyIO.DispatchError:
Log('ERROR: cannot send frame')
def SendMessageToDM(provider, msgtype, msg):
prog_name = "main"
print 'lets say hello to DM async way'
nets = []
parse_str = []
hyp_str = []
for k, v in msg.iteritems():
net = Galaxy.Frame("slot", Galaxy.GAL_CLAUSE, {':name':k, ':contents':v})
nets.append(net)
parse_str.append('( %s ( %s ) )' %(k, v))
hyp_str.append(v)
#Log('Test Printing the nets\n %s' %(Galaxy.OPr(nets)))
#Log('----------THEEND OF NETS -------')
gfSlot = {}
gfParse = {}
gfSlot[":nets"] = nets
gfSlot[":numnets"] = len(nets)
gfSlot[":name"] = msgtype
gfSlot[":contents"] = ' '.join(hyp_str)
gfSlot[":frame"] = "Fake Frame"
gfSlotFrame = Galaxy.Frame("slot", Galaxy.GAL_CLAUSE, gfSlot)
slots = [gfSlotFrame]
#Log('Test Printing the slots\n %s' %(Galaxy.OPr(slots)))
#Log('----------THEEND OF SLOTS-------')
gfParse[":gal_slotsstring"] = Galaxy.OPr(slots)
gfParse[":slots"] = slots
gfParse[":numslots"] = 1
gfParse[":uttid"] = "-1"
gfParse[":hyp"] = ' '.join(hyp_str)
gfParse[":hyp_index"] = 0
gfParse[":hyp_num_parses"] = 1
gfParse[":decoder_score"] = 0.0
gfParse[":am_score"] = 0.0
gfParse[":lm_score"] = 0.0
gfParse[":frame_num"] = 0
gfParse[":acoustic_gap_norm"] = 0.0
gfParse[":avg_wordconf"] = 0.0
gfParse[":min_wordconf"] = 0.0
gfParse[":max_wordconf"] = 0.0
gfParse[":avg_validwordconf"] = 0.0
gfParse[":min_validwordconf"] = 0.0
gfParse[":max_validwordconf"] = 0.0
gfParse[":parsestring"] = ' '.join(parse_str)
Log('Test printing the parse frame')
gfParseFrame = Galaxy.Frame("utterance", Galaxy.GAL_CLAUSE, gfParse)
#gfParseFrame.Print()
parses = [gfParseFrame]
confhyps = [gfParseFrame]
f = Galaxy.Frame(prog_name, Galaxy.GAL_CLAUSE, {":confhyps": confhyps,
":parses": parses,
':total_numparses': 1,
':input_source': 'gal_be',
':gated_input': 'gated_input'})
Log("Sending the message to DM")
#Log(f)
SendToHub(provider, f)
Log("Sent to DM")
def GalInterface():
InitLogging()
Log("Starting Galaxy Server")
global galaxyServer
#load database and other resources @yipeiw
InitResource()
galaxyServer = BackEnd(sys.argv, "gal_be",
default_port = 2900)
galaxyServer.AddDispatchFunction("set_dialog_state", SetDialogState,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("launch_query", LaunchQuery,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("reinitialize", Welcome,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.AddDispatchFunction("welcome", Welcome,
[[], Galaxy.GAL_OTHER_KEYS_NEVER,
Galaxy.GAL_REPLY_NONE, [],
Galaxy.GAL_OTHER_KEYS_NEVER])
galaxyServer.RunServer()
def MonitorThread():
current_focus = {}
def LaunchQueryDebug(user_input):
# this guy is only used in debugging
#system_response = user_input
#system_response = get_response(user_input)
filehistory = open('input_response_history.txt', 'r')
system_tail = tail(filehistory, 4)
filehistory.close()
Log('USER INPUT is [ %s ]' %(user_input))
if user_input == '':
system_response = random.choice(['pardon me ?','can you say that again ?', 'excuse me?'])
elif ((user_input.find('repeat')> 0) or (user_input.find('say that again')>0) or (user_input.find('excuse me')>0)):
filein = open('history.txt', 'r')
system_response = 'sure ... ' + filein.readline()
filein.close()
elif (system_tail[0] == system_tail[2]) and (system_tail[0] == user_input):
system_response = 'I am having a good time, do you want to keep going,...' \
' if not, you can say goodbye'
else:
system_response = get_response(user_input)
fileout = open('history.txt', 'w')
fileout.write(str(system_response) + '\n')
fileout.close()
prefix = ['', 'well ... ', 'uh ... ', '', 'let me see ... ', 'oh ... ']
cur_index = -1
while True:
random_index = randrange(0, len(prefix))
if random_index != cur_index:
break
cur_index = random_index
system_response = prefix[cur_index] + system_response
print(system_response)
if __name__ == "__main__":
gt = Thread(target=GalInterface)
gt.start()
gt.join()
|
leahrnh/ticktock_text_api
|
galbackend_AHC.py
|
Python
|
gpl-2.0
| 20,555
|
[
"Galaxy"
] |
0be422fecdce3d6a0a4cc8fb517a406f3bea0572e3e371d91aad439b4387343d
|
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def replicate (bonds, angles, x, y, z, Lx, Ly, Lz, xdim=1, ydim=1, zdim=1):
"""
Replicates configuration in each dimension.
This may be used to increase the size of an equilibrated melt by a factor of 8 or more.
Presently this routine works only for semiflexible polymers. A general
class should be written to deal with files containing coordinates
and topology data.
xdim = ydim = zdim = 1 returns the original system not replicated.
xdim = ydim = zdim = 2 returns the original system replicated to 8x.
xdim = ydim = zdim = 3 returns the original system replicated to 27x.
xdim = ydim = 1, zdim = 2 returns the original system replicated in the z-direction.
"""
# replicate the particles
x_replicated = x[:]
y_replicated = y[:]
z_replicated = z[:]
for i in range(xdim):
for j in range(ydim):
for k in range(zdim):
if(i + j + k != 0):
for x_, y_, z_ in zip(x, y, z):
x_replicated.append(x_ + i * Lx)
y_replicated.append(y_ + j * Ly)
z_replicated.append(z_ + k * Lz)
# replicate the bonds and angles
ct = 0
num_particles_original = len(x)
bonds_replicated = bonds[:]
angles_replicated = angles[:]
for i in range(xdim):
for j in range(ydim):
for k in range(zdim):
if(i + j + k != 0):
ct = ct + 1
for p1, p2 in bonds:
bonds_replicated.append((p1 + ct * num_particles_original, \
p2 + ct * num_particles_original))
for p1, p2, p3 in angles:
angles_replicated.append((p1 + ct * num_particles_original, \
p2 + ct * num_particles_original, \
p3 + ct * num_particles_original))
# modify the box size
Lx = xdim * Lx
Ly = ydim * Ly
Lz = zdim * Lz
return bonds_replicated, angles_replicated, x_replicated, y_replicated, z_replicated, Lx, Ly, Lz
|
espressopp/espressopp
|
src/tools/replicate.py
|
Python
|
gpl-3.0
| 3,037
|
[
"ESPResSo"
] |
f759b0f98c1098e4ae3247dbae180f7296c8bccc98f10e340bfb2032476f6827
|
"""@camvtk docstring
This module provides helper classes for testing and debugging OCL
This module is part of OpenCAMLib (ocl), a toolpath-generation library.
Copyright 2010-2011 Anders Wallin (anders.e.e.wallin "at" gmail.com)
Published under the GNU General Public License, see http://www.gnu.org/licenses/
"""
#import vtk
#import time
#import datetime
import ocl
import math
def CLPointGridZigZag(minx,dx,maxx,miny,dy,maxy,z):
""" generate and return a zigzag grid of points """
plist = []
xvalues = [round(minx+n*dx,2) for n in xrange(int(round((maxx-minx)/dx))+1) ]
yvalues = [round(miny+n*dy,2) for n in xrange(int(round((maxy-miny)/dy))+1) ]
#yrow = 0
#x=minx
#dir = 0
xlist = xvalues
for y in yvalues:
#xlist = xvalues
#if dir == 1:
# xlist.reverse()
# dir = 0
#else:
# dir = 1
for x in xlist:
plist.append( ocl.CLPoint(x,y,z) )
xlist.reverse()
#yrow=yrow+1
return plist
def CLPointGrid(minx,dx,maxx,miny,dy,maxy,z):
""" generate and return a rectangular grid of points """
plist = []
xvalues = [round(minx+n*dx,2) for n in xrange(int(round((maxx-minx)/dx))+1) ]
yvalues = [round(miny+n*dy,2) for n in xrange(int(round((maxy-miny)/dy))+1) ]
for y in yvalues:
for x in xvalues:
plist.append( ocl.CLPoint(x,y,z) )
return plist
def octree2trilist(t):
""" return a list of triangles correspoinding to the input octree """
nodes = t.get_nodes()
tlist = []
for n in nodes:
p1 = n.corner(0) # + + +
p2 = n.corner(1) # - + +
p3 = n.corner(2) # + - +
p4 = n.corner(3) # + + -
p5 = n.corner(4) # + - -
p6 = n.corner(5) # - + -
p7 = n.corner(6) # - - +
p8 = n.corner(7) # - - -
tlist.append(ocl.Triangle(p1,p2,p3)) #top
tlist.append(ocl.Triangle(p2,p3,p7)) #top
tlist.append(ocl.Triangle(p4,p5,p6)) # bot
tlist.append(ocl.Triangle(p5,p6,p8)) # bot
tlist.append(ocl.Triangle(p1,p3,p4)) # 1,3,4,5
tlist.append(ocl.Triangle(p4,p5,p3))
tlist.append(ocl.Triangle(p2,p6,p7)) # 2,6,7,8
tlist.append(ocl.Triangle(p7,p8,p6))
tlist.append(ocl.Triangle(p3,p5,p7)) # 3,5,7,8
tlist.append(ocl.Triangle(p7,p8,p5))
tlist.append(ocl.Triangle(p1,p2,p4)) # 1,2,4,6
tlist.append(ocl.Triangle(p4,p6,p2))
return tlist
|
play113/swer
|
opencamlib-read-only/lib/pyocl.py
|
Python
|
mit
| 2,483
|
[
"VTK"
] |
402f9621398d2999f87d49576f2c18a7af35eb7f8e35c47376e039d7c5876344
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import warnings
from .groups import (Atom, AtomGroup, Residue, ResidueGroup, Segment,
SegmentGroup)
from . import universe
def deprecate_class(class_new, message):
"""utility to deprecate a class"""
class new_class(class_new):
def __init__(self, *args, **kwargs):
super(new_class, self).__init__(*args, **kwargs)
warnings.warn(message, DeprecationWarning)
return new_class
Universe = deprecate_class(
universe.Universe,
"MDAnalysis.core.AtomGroup.Universe has been removed."
"Please use MDAnalysis.Universe."
"This stub will be removed in 1.0")
_group_message = ("MDAnalysis.core.AtomGroup.{0} has been removed."
"Please use MDAnalysis.groups.{0}"
"This stub will be removed in 1.0")
Atom = deprecate_class(Atom, message=_group_message.format('Atom'))
AtomGroup = deprecate_class(
AtomGroup, message=_group_message.format('AtomGroup'))
Residue = deprecate_class(Residue, message=_group_message.format('Residue'))
ResidueGroup = deprecate_class(
ResidueGroup, message=_group_message.format('ResidueGroup'))
Segment = deprecate_class(Segment, message=_group_message.format('Segment'))
SegmentGroup = deprecate_class(
SegmentGroup, message=_group_message.format('SegmentGroup'))
__all__ = [
'Universe', 'Atom', 'AtomGroup', 'Residue', 'ResidueGroup', 'Segment',
'SegmentGroup'
]
|
alejob/mdanalysis
|
package/MDAnalysis/core/AtomGroup.py
|
Python
|
gpl-2.0
| 2,443
|
[
"MDAnalysis"
] |
90600d2b17b3cdef8b3088ea2b6e2b5c9d8864a0a185b077a251fbe7d5978a43
|
"""
.. versionadded:: v6r20
FTS3Agent implementation.
It is in charge of submitting and monitoring all the transfers. It can be duplicated.
::
FTS3Agent
{
PollingTime = 120
MaxThreads = 10
# How many Operation we will treat in one loop
OperationBulkSize = 20
# How many Job we will monitor in one loop
JobBulkSize = 20
# Max number of files to go in a single job
MaxFilesPerJob = 100
# Max number of attempt per file
MaxAttemptsPerFile = 256
# days before removing jobs
DeleteGraceDays = 180
# Max number of deletes per cycle
DeleteLimitPerCycle = 100
# hours before kicking jobs with old assignment tag
KickAssignedHours = 1
# Max number of kicks per cycle
KickLimitPerCycle = 100
}
"""
__RCSID__ = "$Id$"
import time
# from threading import current_thread
from multiprocessing.pool import ThreadPool
# We use the dummy module because we use the ThreadPool
from multiprocessing.dummy import current_process
from socket import gethostname
from DIRAC import S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.DictCache import DictCache
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3ServerDict
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations as opHelper
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getDNForUsername
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.DataManagementSystem.private import FTS3Utilities
from DIRAC.DataManagementSystem.DB.FTS3DB import FTS3DB
from DIRAC.DataManagementSystem.Client.FTS3Job import FTS3Job
# pylint: disable=attribute-defined-outside-init
AGENT_NAME = "DataManagement/FTS3Agent"
class FTS3Agent(AgentModule):
"""
This Agent is responsible of interacting with the FTS3 services.
Several of them can run in parallel.
It first treats the Operations, by creating new FTS jobs and performing
callback.
Then, it monitors the current jobs.
CAUTION: This agent and the FTSAgent cannot run together.
"""
def __readConf(self):
""" read configurations """
# Getting all the possible servers
res = getFTS3ServerDict()
if not res['OK']:
gLogger.error(res['Message'])
return res
srvDict = res['Value']
serverPolicyType = opHelper().getValue('DataManagement/FTSPlacement/FTS3/ServerPolicy', 'Random')
self._serverPolicy = FTS3Utilities.FTS3ServerPolicy(srvDict, serverPolicy=serverPolicyType)
self.maxNumberOfThreads = self.am_getOption("MaxThreads", 10)
# Number of Operation we treat in one loop
self.operationBulkSize = self.am_getOption("OperationBulkSize", 20)
# Number of Jobs we treat in one loop
self.jobBulkSize = self.am_getOption("JobBulkSize", 20)
self.maxFilesPerJob = self.am_getOption("MaxFilesPerJob", 100)
self.maxAttemptsPerFile = self.am_getOption("MaxAttemptsPerFile", 256)
self.kickDelay = self.am_getOption("KickAssignedHours", 1)
self.maxKick = self.am_getOption("KickLimitPerCycle", 100)
self.deleteDelay = self.am_getOption("DeleteGraceDays", 180)
self.maxDelete = self.am_getOption("DeleteLimitPerCycle", 100)
return S_OK()
def initialize(self):
""" agent's initialization """
self.fts3db = FTS3DB()
self._globalContextCache = {}
# name that will be used in DB for assignment tag
self.assignmentTag = gethostname().split('.')[0]
res = self.__readConf()
self.jobsThreadPool = ThreadPool(self.maxNumberOfThreads)
self.opsThreadPool = ThreadPool(self.maxNumberOfThreads)
return res
def beginExecution(self):
""" reload configurations before start of a cycle """
return self.__readConf()
def getFTS3Context(self, username, group, ftsServer, threadID):
""" Returns an fts3 context for a given user, group and fts server
The context pool is per thread, and there is one context
per tuple (user, group, server).
We dump the proxy of a user to a file (shared by all the threads),
and use it to make the context.
The proxy needs a lifetime of at least 2h, is cached for 1.5h, and
the lifetime of the context is 45mn
:param username: name of the user
:param group: group of the user
:param ftsServer: address of the server
:returns: S_OK with the context object
"""
log = gLogger.getSubLogger("getFTS3Context", child=True)
contextes = self._globalContextCache.setdefault(threadID, DictCache())
idTuple = (username, group, ftsServer)
log.debug("Getting context for %s" % (idTuple, ))
if not contextes.exists(idTuple, 2700):
res = getDNForUsername(username)
if not res['OK']:
return res
# We take the first DN returned
userDN = res['Value'][0]
log.debug("UserDN %s" % userDN)
# We dump the proxy to a file.
# It has to have a lifetime of at least 2 hours
# and we cache it for 1.5 hours
res = gProxyManager.downloadVOMSProxyToFile(
userDN, group, requiredTimeLeft=7200, cacheTime=5400)
if not res['OK']:
return res
proxyFile = res['Value']
log.debug("Proxy file %s" % proxyFile)
# We generate the context
res = FTS3Job.generateContext(ftsServer, proxyFile)
if not res['OK']:
return res
context = res['Value']
# we add it to the cache for this thread for 1h
contextes.add(idTuple, 3600, context)
return S_OK(contextes.get(idTuple))
def _monitorJob(self, ftsJob):
"""
* query the FTS servers
* update the FTSFile status
* update the FTSJob status
"""
# General try catch to avoid that the tread dies
try:
threadID = current_process().name
log = gLogger.getSubLogger("_monitorJob/%s" % ftsJob.jobID, child=True)
res = self.getFTS3Context(
ftsJob.username, ftsJob.userGroup, ftsJob.ftsServer, threadID=threadID)
if not res['OK']:
log.error("Error getting context", res)
return ftsJob, res
context = res['Value']
res = ftsJob.monitor(context=context)
if not res['OK']:
log.error("Error monitoring job", res)
return ftsJob, res
# { fileID : { Status, Error } }
filesStatus = res['Value']
res = self.fts3db.updateFileStatus(filesStatus)
if not res['OK']:
log.error("Error updating file fts status", "%s, %s" % (ftsJob.ftsGUID, res))
return ftsJob, res
upDict = {
ftsJob.jobID: {
'status': ftsJob.status,
'error': ftsJob.error,
'completeness': ftsJob.completeness,
'operationID': ftsJob.operationID,
'lastMonitor': True,
}
}
res = self.fts3db.updateJobStatus(upDict)
if ftsJob.status in ftsJob.FINAL_STATES:
self.__sendAccounting(ftsJob)
return ftsJob, res
except Exception as e:
return ftsJob, S_ERROR(0, "Exception %s" % repr(e))
@staticmethod
def _monitorJobCallback(returnedValue):
""" Callback when a job has been monitored
:param returnedValue: value returned by the _monitorJob method
(ftsJob, standard dirac return struct)
"""
ftsJob, res = returnedValue
log = gLogger.getSubLogger("_monitorJobCallback/%s" % ftsJob.jobID, child=True)
if not res['OK']:
log.error("Error updating job status", res)
else:
log.debug("Successfully updated job status")
def monitorJobsLoop(self):
"""
* fetch the active FTSJobs from the DB
* spawn a thread to monitor each of them
"""
log = gLogger.getSubLogger("monitorJobs", child=True)
log.debug("Size of the context cache %s" % len(self._globalContextCache))
log.debug("Getting active jobs")
# get jobs from DB
res = self.fts3db.getActiveJobs(limit=self.jobBulkSize, jobAssignmentTag=self.assignmentTag)
if not res['OK']:
log.error("Could not retrieve ftsJobs from the DB", res)
return res
activeJobs = res['Value']
log.info("%s jobs to queue for monitoring" % len(activeJobs))
# We store here the AsyncResult object on which we are going to wait
applyAsyncResults = []
# Starting the monitoring threads
for ftsJob in activeJobs:
log.debug("Queuing executing of ftsJob %s" % ftsJob.jobID)
# queue the execution of self._monitorJob( ftsJob ) in the thread pool
# The returned value is passed to _monitorJobCallback
applyAsyncResults.append(self.jobsThreadPool.apply_async(
self._monitorJob, (ftsJob, ), callback=self._monitorJobCallback))
log.debug("All execution queued")
# Waiting for all the monitoring to finish
while not all([r.ready() for r in applyAsyncResults]):
log.debug("Not all the tasks are finished")
time.sleep(0.5)
log.debug("All the tasks have completed")
return S_OK()
@staticmethod
def _treatOperationCallback(returnedValue):
""" Callback when an operation has been treated
:param returnedValue: value returned by the _treatOperation method
(ftsOperation, standard dirac return struct)
"""
operation, res = returnedValue
log = gLogger.getSubLogger("_treatOperationCallback/%s" % operation.operationID, child=True)
if not res['OK']:
log.error("Error treating operation", res)
else:
log.debug("Successfully treated operation")
def _treatOperation(self, operation):
""" Treat one operation:
* does the callback if the operation is finished
* generate new jobs and submits them
:param operation: the operation to treat
:param threadId: the id of the tread, it just has to be unique (used for the context cache)
"""
try:
threadID = current_process().name
log = gLogger.getSubLogger("treatOperation/%s" % operation.operationID, child=True)
# If the operation is totally processed
# we perform the callback
if operation.isTotallyProcessed():
log.debug("FTS3Operation %s is totally processed" % operation.operationID)
res = operation.callback()
if not res['OK']:
log.error("Error performing the callback", res)
log.info("Putting back the operation")
dbRes = self.fts3db.persistOperation(operation)
if not dbRes['OK']:
log.error("Could not persist operation", dbRes)
return operation, res
else:
log.debug("FTS3Operation %s is not totally processed yet" % operation.operationID)
res = operation.prepareNewJobs(
maxFilesPerJob=self.maxFilesPerJob, maxAttemptsPerFile=self.maxAttemptsPerFile)
if not res['OK']:
log.error("Cannot prepare new Jobs", "FTS3Operation %s : %s" %
(operation.operationID, res))
return operation, res
newJobs = res['Value']
log.debug("FTS3Operation %s: %s new jobs to be submitted" %
(operation.operationID, len(newJobs)))
for ftsJob in newJobs:
res = self._serverPolicy.chooseFTS3Server()
if not res['OK']:
log.error(res)
continue
ftsServer = res['Value']
log.debug("Use %s server" % ftsServer)
ftsJob.ftsServer = ftsServer
res = self.getFTS3Context(
ftsJob.username, ftsJob.userGroup, ftsServer, threadID=threadID)
if not res['OK']:
log.error("Could not get context", res)
continue
context = res['Value']
res = ftsJob.submit(context=context)
if not res['OK']:
log.error("Could not submit FTS3Job", "FTS3Operation %s : %s" %
(operation.operationID, res))
continue
operation.ftsJobs.append(ftsJob)
submittedFileIds = res['Value']
log.info("FTS3Operation %s: Submitted job for %s transfers" %
(operation.operationID, len(submittedFileIds)))
# new jobs are put in the DB at the same time
res = self.fts3db.persistOperation(operation)
if not res['OK']:
log.error("Could not persist operation", res)
return operation, res
except Exception as e:
log.exception('Exception in the thread', repr(e))
return operation, S_ERROR("Exception %s" % repr(e))
def treatOperationsLoop(self):
""" * Fetch all the FTSOperations which are not finished
* Spawn a thread to treat each operation
"""
log = gLogger.getSubLogger("treatOperations", child=True)
log.debug("Size of the context cache %s" % len(self._globalContextCache))
log.info("Getting non finished operations")
res = self.fts3db.getNonFinishedOperations(
limit=self.operationBulkSize, operationAssignmentTag=self.assignmentTag)
if not res['OK']:
log.error("Could not get incomplete operations", res)
return res
incompleteOperations = res['Value']
log.info("Treating %s incomplete operations" % len(incompleteOperations))
applyAsyncResults = []
for operation in incompleteOperations:
log.debug("Queuing executing of operation %s" % operation.operationID)
# queue the execution of self._treatOperation( operation ) in the thread pool
# The returned value is passed to _treatOperationCallback
applyAsyncResults.append(self.opsThreadPool.apply_async(
self._treatOperation, (operation, ), callback=self._treatOperationCallback))
log.debug("All execution queued")
# Waiting for all the treatments to finish
while not all([r.ready() for r in applyAsyncResults]):
log.debug("Not all the tasks are finished")
time.sleep(0.5)
log.debug("All the tasks have completed")
return S_OK()
def kickOperations(self):
""" kick stuck operations """
log = gLogger.getSubLogger("kickOperations", child=True)
res = self.fts3db.kickStuckOperations(limit=self.maxKick, kickDelay=self.kickDelay)
if not res['OK']:
return res
kickedOperations = res['Value']
log.info("Kicked %s stuck operations" % kickedOperations)
return S_OK()
def kickJobs(self):
""" kick stuck jobs """
log = gLogger.getSubLogger("kickJobs", child=True)
res = self.fts3db.kickStuckJobs(limit=self.maxKick, kickDelay=self.kickDelay)
if not res['OK']:
return res
kickedJobs = res['Value']
log.info("Kicked %s stuck jobs" % kickedJobs)
return S_OK()
def deleteOperations(self):
""" delete final operations """
log = gLogger.getSubLogger("deleteOperations", child=True)
res = self.fts3db.deleteFinalOperations(limit=self.maxDelete, deleteDelay=self.deleteDelay)
if not res['OK']:
return res
deletedOperations = res['Value']
log.info("Deleted %s final operations" % deletedOperations)
return S_OK()
def finalize(self):
""" finalize processing """
# Joining all the ThreadPools
log = gLogger.getSubLogger("Finalize")
log.debug("Closing jobsThreadPool")
self.jobsThreadPool.close()
self.jobsThreadPool.join()
log.debug("jobsThreadPool joined")
log.debug("Closing opsThreadPool")
self.opsThreadPool.close()
self.opsThreadPool.join()
log.debug("opsThreadPool joined")
return S_OK()
def execute(self):
""" one cycle execution """
log = gLogger.getSubLogger("execute", child=True)
log.info("Monitoring job")
res = self.monitorJobsLoop()
if not res['OK']:
log.error("Error monitoring jobs", res)
return res
log.info("Treating operations")
res = self.treatOperationsLoop()
if not res['OK']:
log.error("Error treating operations", res)
return res
log.info("Kicking stuck jobs")
res = self.kickJobs()
if not res['OK']:
log.error("Error kicking jobs", res)
return res
log.info("Kicking stuck operations")
res = self.kickOperations()
if not res['OK']:
log.error("Error kicking operations", res)
return res
log.info("Deleting final operations")
res = self.deleteOperations()
if not res['OK']:
log.error("Error deleting operations", res)
return res
return S_OK()
@staticmethod
def __sendAccounting(ftsJob):
""" prepare and send DataOperation to AccountingDB
:param ftsJob: the FTS3Job from which we send the accounting info
"""
dataOp = DataOperation()
dataOp.setStartTime(fromString(ftsJob.submitTime))
dataOp.setEndTime(fromString(ftsJob.lastUpdate))
dataOp.setValuesFromDict(ftsJob.accountingDict)
dataOp.delayedCommit()
|
arrabito/DIRAC
|
DataManagementSystem/Agent/FTS3Agent.py
|
Python
|
gpl-3.0
| 16,959
|
[
"DIRAC"
] |
3cf9e856c1996b41b0a283a175b71c9f4ad716c5c0869b066e7be9aa6a925d99
|
import sqlite3
from owade.constants import *
class GetChromeHistory:
def getChromeHistoryData(self, myPath):
"""
From https://github.com/OsandaMalith/ChromeFreak/blob/master/ChromeFreak.py CC license
"""
historyValues = {}
try:
sqlitePath = myPath + "/chrome/" + chromeHistoryFile
connexion = sqlite3.connect(sqlitePath)
c = connexion.cursor()
c.execute("SELECT urls.url, urls.title, urls.visit_count,urls.typed_count, \
datetime((urls.last_visit_time/1000000)-11644473600,'unixepoch', 'localtime'),\
datetime((visits.visit_time/1000000)-11644473600,'unixepoch', 'localtime'), \
CASE (visits.transition & 255)\
WHEN 0 THEN 'User clicked a link'\
WHEN 1 THEN 'User typed the URL in the URL bar'\
WHEN 2 THEN 'Got through a suggestion in the UI'\
WHEN 3 THEN 'Content automatically loaded in a non-toplevel frame - user may not realize'\
WHEN 4 THEN 'Subframe explicitly requested by the user'\
WHEN 5 THEN 'User typed in the URL bar and selected an entry from the list - such as a search bar'\
WHEN 6 THEN 'The start page of the browser'\
WHEN 7 THEN 'A form the user has submitted values to'\
WHEN 8 THEN 'The user reloaded the page, eg by hitting the reload button or restored a session'\
WHEN 9 THEN 'URL what was generated from a replacable keyword other than the default search provider'\
WHEN 10 THEN 'Corresponds to a visit generated from a KEYWORD'\
END AS Description\
FROM urls, visits WHERE urls.id = visits.url")
for row in c:
try:
historyValues['URL %s' % row[0]] = {'title':row[1].encode("utf-8"), 'visitNumber':str(row[2]),
'lastVisit':str(row[4]), 'firstVisit':str(row[5])}
except Exception, e:
print e
continue
return historyValues
except sqlite3.OperationalError, e:
e = str(e)
if e == 'database is locked':
print '[!] Make sure Google Chrome is not running in the background'
elif e == 'no such table: downloads':
print '[!] Something wrong with the database name'
elif e == 'unable to open database file':
print '[!] Something wrong with the database path'
else:
print e
return None
def getChromeDowloadData(self, myPath):
"""
From https://github.com/OsandaMalith/ChromeFreak/blob/master/ChromeFreak.py CC license
"""
downloadValues = {}
try:
sqlitePath = myPath + "/chrome/" + chromeHistoryFile
connexion = sqlite3.connect(sqlitePath)
c = connexion.cursor()
c.execute("SELECT url, current_path, target_path,datetime((end_time/1000000)-11644473600,'unixepoch', 'localtime'),\
datetime((start_time/1000000)-11644473600,'unixepoch', 'localtime'),\
received_bytes, total_bytes FROM downloads,\
downloads_url_chains WHERE downloads.id = downloads_url_chains.id")
for row in c:
receivedBytes = ''
try:
#"%.2f" % receivedBytes
receivedBytes = "%.2f Bytes" % float(row[5])
#if receivedBytes < 1024:
#downloads += 'Received Bytes = %.2f Bytes\n' % (float(row[5]))
if float(row[5]) > 1024 and float(row[5]) < 1048576:
receivedBytes = "%.2f KB" % (float(row[5]) / 1024)
elif (float(row[5]) > 1048576 and float(row[5]) < 1073741824):
receivedBytes = "%.2f MB" % (float(row[5]) / 1048576)
else:
receivedBytes = "%.2f GB" % (float(row[5]) / 1073741824)
downloadValues['URL %s' % row[0]] = {'currentPath':str(row[1]), 'targetPath':str(row[2]),
'endTime':str(row[4]), 'startTime':str(row[5]), 'receivedBytes':str(receivedBytes)}
except UnicodeError:
continue
return downloadValues
except sqlite3.OperationalError, e:
e = str(e)
if e == 'database is locked':
print '[!] Make sure Google Chrome is not running in the background'
elif e == 'no such table: downloads':
print '[!] Something wrong with the database name'
elif e == 'unable to open database file':
print '[!] Something wrong with the database path'
else:
print e
return None
def main(self, myPath):
placesValues = self.getChromeHistoryData(myPath)
if placesValues == None:
return None
downloadValues = self.getChromeDowloadData(myPath)
return {self.__class__.__name__:{'history':placesValues, 'download':downloadValues}}
|
CarlosLannister/OwadeReborn
|
owade/fileAnalyze/historyChrome.py
|
Python
|
gpl-3.0
| 5,028
|
[
"VisIt"
] |
5ba708168184ed06abdfe9379bbf6128364ab6963cb2ebdbfc49f9363f85ef4e
|
from __future__ import print_function
from typing import cast, Any, Iterable, Mapping, Optional, Sequence, Tuple, Text
import mandrill
from confirmation.models import Confirmation
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils import timezone
from zerver.decorator import statsd_increment, uses_mandrill
from zerver.models import (
Recipient,
ScheduledJob,
UserMessage,
Stream,
get_display_recipient,
UserProfile,
get_user_profile_by_email,
get_user_profile_by_id,
receives_offline_notifications,
get_context_for_message,
Message,
Realm,
)
import datetime
import re
import subprocess
import ujson
from six.moves import urllib
from collections import defaultdict
def unsubscribe_token(user_profile):
# type: (UserProfile) -> Text
# Leverage the Django confirmations framework to generate and track unique
# unsubscription tokens.
return Confirmation.objects.get_link_for_object(user_profile).split("/")[-1]
def one_click_unsubscribe_link(user_profile, endpoint):
# type: (UserProfile, Text) -> Text
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
token = unsubscribe_token(user_profile)
resource_path = "accounts/unsubscribe/%s/%s" % (endpoint, token)
return "%s/%s" % (user_profile.realm.uri.rstrip("/"), resource_path)
def hashchange_encode(string):
# type: (Text) -> Text
# Do the same encoding operation as hashchange.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.parse.quote(
string.encode("utf-8"), safe=b"").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(realm, participants):
# type: (Realm, List[Text]) -> Text
participants.sort()
base_url = u"%s/#narrow/pm-with/" % (realm.uri,)
return base_url + hashchange_encode(",".join(participants))
def stream_narrow_url(realm, stream):
# type: (Realm, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return base_url + hashchange_encode(stream)
def topic_narrow_url(realm, stream, topic):
# type: (Realm, Text, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return u"%s%s/topic/%s" % (base_url, hashchange_encode(stream),
hashchange_encode(topic))
def build_message_list(user_profile, messages):
# type: (UserProfile, List[Message]) -> List[Dict[str, Any]]
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = [] # type: List[Dict[str, Any]]
def sender_string(message):
# type: (Message) -> Text
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def relative_to_full_url(content):
# type: (Text) -> Text
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/generated/emoji/images/emoji/snowflake.png".
content = re.sub(
r"/static/generated/emoji/images/emoji/",
user_profile.realm.uri + r"/static/generated/emoji/images/emoji/",
content)
return content
def fix_plaintext_image_urls(content):
# type: (Text) -> Text
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
# type: (Text) -> Text
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
# type: (Message) -> Dict[str, Text]
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
# type: (Message) -> Dict[str, Any]
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
# type: (UserProfile, Message) -> Dict[str, Any]
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = u"You and %s" % (message.sender.full_name,)
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
assert not isinstance(disp_recipient, Text)
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = u"You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url(user_profile.realm, [r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
assert isinstance(disp_recipient, Text)
header = u"%s > %s" % (disp_recipient, message.topic_name())
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
# type: (UserProfile, List[Message], int) -> None
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
template_payload = common_context(user_profile)
template_payload.update({
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'reply_warning': False,
'mention': missed_messages[0].recipient.type == Recipient.STREAM,
'reply_to_zulip': True,
'unsubscribe_link': unsubscribe_link,
})
headers = {}
from zerver.lib.email_mirror import create_missed_message_address
address = create_missed_message_address(user_profile, missed_messages[0])
headers['Reply-To'] = address
senders = set(m.sender.full_name for m in missed_messages)
sender_str = ", ".join(senders)
plural_messages = 's' if len(missed_messages) > 1 else ''
subject = "Missed Zulip%s from %s" % (plural_messages, sender_str)
from_email = 'Zulip <%s>' % (settings.NOREPLY_EMAIL_ADDRESS,)
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
headers['Sender'] = from_email
sender = missed_messages[0].sender
from_email = '"%s" <%s>' % (sender_str, sender.email)
text_content = loader.render_to_string('zerver/missed_message_email.txt', template_payload)
html_content = loader.render_to_string('zerver/missed_message_email.html', template_payload)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user_profile.email],
headers = headers)
msg.attach_alternative(html_content, "text/html")
msg.send()
user_profile.last_reminder = timezone.now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
# type: (int, Iterable[Dict[str, Any]]) -> None
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = [um.message for um in UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids,
flags=~UserMessage.flags.read)]
if not messages:
return
messages_by_recipient_subject = defaultdict(list) # type: Dict[Tuple[int, Text], List[Message]]
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_recipient_subject[recipient_subject],
)
@uses_mandrill
def clear_followup_emails_queue(email, mail_client=None):
# type: (Text, Optional[mandrill.Mandrill]) -> None
"""
Clear out queued emails (from Mandrill's queue) that would otherwise
be sent to a specific email address. Optionally specify which sender
to filter by (useful when there are more Zulip subsystems using our
mandrill account).
`email` is a string representing the recipient email
`from_email` is a string representing the email account used
to send the email (E.g. support@example.com).
"""
# SMTP mail delivery implementation
if not mail_client:
items = ScheduledJob.objects.filter(type=ScheduledJob.EMAIL, filter_string__iexact = email)
items.delete()
return
# Mandrill implementation
for email_message in mail_client.messages.list_scheduled(to=email):
result = mail_client.messages.cancel_scheduled(id=email_message["_id"])
if result.get("status") == "error":
print(result.get("name"), result.get("error"))
return
def log_digest_event(msg):
# type: (Text) -> None
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
@uses_mandrill
def send_future_email(recipients, email_html, email_text, subject,
delay=datetime.timedelta(0), sender=None,
tags=[], mail_client=None):
# type: (List[Dict[str, Any]], Text, Text, Text, datetime.timedelta, Optional[Dict[str, Text]], Iterable[Text], Optional[mandrill.Mandrill]) -> None
"""
Sends email via Mandrill, with optional delay
'mail_client' is filled in by the decorator
"""
# When sending real emails while testing locally, don't accidentally send
# emails to non-zulip.com users.
if settings.DEVELOPMENT and \
settings.EMAIL_BACKEND != 'django.core.mail.backends.console.EmailBackend':
for recipient in recipients:
email = recipient.get("email")
if get_user_profile_by_email(email).realm.domain != "zulip.com":
raise ValueError("digest: refusing to send emails to non-zulip.com users.")
# message = {"from_email": "othello@zulip.com",
# "from_name": "Othello",
# "html": "<p>hello</p> there",
# "tags": ["signup-reminders"],
# "to": [{'email':"acrefoot@zulip.com", 'name': "thingamajig"}]
# }
# SMTP mail delivery implementation
if not mail_client:
if sender is None:
# This may likely overridden by settings.DEFAULT_FROM_EMAIL
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
for recipient in recipients:
email_fields = {'email_html': email_html,
'email_subject': subject,
'email_text': email_text,
'recipient_email': recipient.get('email'),
'recipient_name': recipient.get('name'),
'sender_email': sender['email'],
'sender_name': sender['name']}
ScheduledJob.objects.create(type=ScheduledJob.EMAIL, filter_string=recipient.get('email'),
data=ujson.dumps(email_fields),
scheduled_timestamp=timezone.now() + delay)
return
# Mandrill implementation
if sender is None:
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
message = {'from_email': sender['email'],
'from_name': sender['name'],
'to': recipients,
'subject': subject,
'html': email_html,
'text': email_text,
'tags': tags,
}
# ignore any delays smaller than 1-minute because it's cheaper just to sent them immediately
if not isinstance(delay, datetime.timedelta):
raise TypeError("specified delay is of the wrong type: %s" % (type(delay),))
if delay < datetime.timedelta(minutes=1):
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool")
else:
send_time = (timezone.now() + delay).__format__("%Y-%m-%d %H:%M:%S")
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool", send_at=send_time)
problems = [result for result in results if (result['status'] in ('rejected', 'invalid'))]
if problems:
for problem in problems:
if problem["status"] == "rejected":
if problem["reject_reason"] == "hard-bounce":
# A hard bounce means the address doesn't exist or the
# recipient mail server is completely blocking
# delivery. Don't try to send further emails.
if "digest-emails" in tags:
from zerver.lib.actions import do_change_enable_digest_emails
bounce_email = problem["email"]
user_profile = get_user_profile_by_email(bounce_email)
do_change_enable_digest_emails(user_profile, False)
log_digest_event("%s\nTurned off digest emails for %s" % (
str(problems), bounce_email))
continue
elif problem["reject_reason"] == "soft-bounce":
# A soft bounce is temporary; let it try to resolve itself.
continue
raise Exception(
"While sending email (%s), encountered problems with these recipients: %r"
% (subject, problems))
return
def send_local_email_template_with_delay(recipients, template_prefix,
template_payload, delay,
tags=[], sender={'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}):
# type: (List[Dict[str, Any]], Text, Dict[str, Text], datetime.timedelta, Iterable[Text], Dict[str, Text]) -> None
html_content = loader.render_to_string(template_prefix + ".html", template_payload)
text_content = loader.render_to_string(template_prefix + ".txt", template_payload)
subject = loader.render_to_string(template_prefix + ".subject", template_payload).strip()
send_future_email(recipients,
html_content,
text_content,
subject,
delay=delay,
sender=sender,
tags=tags)
def enqueue_welcome_emails(email, name):
# type: (Text, Text) -> None
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
sender = settings.WELCOME_EMAIL_SENDER # type: Dict[str, Text]
else:
sender = {'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'}
user_profile = get_user_profile_by_email(email)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
template_payload = common_context(user_profile)
template_payload.update({
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'unsubscribe_link': unsubscribe_link
})
# Send day 1 email
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day1",
template_payload,
datetime.timedelta(hours=1),
tags=["followup-emails"],
sender=sender)
# Send day 2 email
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day2",
template_payload,
datetime.timedelta(days=1),
tags=["followup-emails"],
sender=sender)
def convert_html_to_markdown(html):
# type: (Text) -> Text
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode('utf-8'))[0].decode('utf-8').strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(u"!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
u"[\\2](\\1/\\2)", markdown)
|
isht3/zulip
|
zerver/lib/notifications.py
|
Python
|
apache-2.0
| 22,548
|
[
"VisIt"
] |
a3f1c82ce685e7dca8a1e342f96efcbe5e727a8d99dd51d7ff25e84b222b1f5b
|
"""
Convenience routines for performing common operations.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os
from zeroinstall import SafeException
DontUseGUI = object()
def should_use_gui(use_gui):
if use_gui is False:
return False
if not os.environ.get('DISPLAY', None):
if use_gui is None:
return False
else:
raise SafeException("Can't use GUI because $DISPLAY is not set")
from zeroinstall.gui import main
if main.gui_is_available(use_gui):
return True
if use_gui is None:
return False
else:
raise SafeException("No GUI available")
|
linuxmidhun/0install
|
zeroinstall/helpers.py
|
Python
|
lgpl-2.1
| 684
|
[
"VisIt"
] |
8efb9674afbc24f91470b681fe47f9e69e188cd5451ecf6782998d5bcdaebf58
|
""" Frontend to MySQL DB AccountingDB
"""
__RCSID__ = "$Id$"
import datetime
import time
import threading
import random
from DIRAC.Core.Base.DB import DB
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Utilities import List, ThreadSafe, Time, DEncode
from DIRAC.Core.Utilities.Plotting.TypeLoader import TypeLoader
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
gSynchro = ThreadSafe.Synchronizer()
class AccountingDB(DB):
def __init__(self, name='Accounting/AccountingDB', readOnly=False):
DB.__init__(self, 'AccountingDB', name)
self.maxBucketTime = 604800 # 1 w
self.autoCompact = False
self.__readOnly = readOnly
self.__doingCompaction = False
self.__oldBucketMethod = False
self.__doingPendingLockTime = 0
self.__deadLockRetries = 2
self.__queuedRecordsLock = ThreadSafe.Synchronizer()
self.__queuedRecordsToInsert = []
self.dbCatalog = {}
self.dbBucketsLength = {}
self.__keysCache = {}
maxParallelInsertions = self.getCSOption("ParallelRecordInsertions", 10)
self.__threadPool = ThreadPool(1, maxParallelInsertions)
self.__threadPool.daemonize()
self.catalogTableName = _getTableName("catalog", "Types")
self._createTables({
self.catalogTableName: {
'Fields': {
'name': "VARCHAR(64) UNIQUE NOT NULL",
'keyFields': "VARCHAR(255) NOT NULL",
'valueFields': "VARCHAR(255) NOT NULL",
'bucketsLength': "VARCHAR(255) NOT NULL",
},
'PrimaryKey': 'name'
}
})
self.__loadCatalogFromDB()
gMonitor.registerActivity("registeradded",
"Register added",
"Accounting",
"entries",
gMonitor.OP_ACUM)
gMonitor.registerActivity("insertiontime",
"Record insertion time",
"Accounting",
"seconds",
gMonitor.OP_MEAN)
gMonitor.registerActivity("querytime",
"Records query time",
"Accounting",
"seconds",
gMonitor.OP_MEAN)
self.__compactTime = datetime.time(hour=2,
minute=random.randint(0, 59),
second=random.randint(0, 59))
lcd = Time.dateTime()
lcd.replace(hour=self.__compactTime.hour + 1,
minute=0,
second=0)
self.__lastCompactionEpoch = Time.toEpoch(lcd)
self.__registerTypes()
def __loadTablesCreated(self):
result = self._query("show tables")
if not result['OK']: # pylint: disable=invalid-sequence-index
return result
return S_OK([f[0] for f in result['Value']]) # pylint: disable=invalid-sequence-index
def autoCompactDB(self):
self.autoCompact = True
th = threading.Thread(target=self.__periodicAutoCompactDB)
th.setDaemon(1)
th.start()
def __periodicAutoCompactDB(self):
while self.autoCompact:
nct = Time.dateTime()
if nct.hour >= self.__compactTime.hour:
nct = nct + datetime.timedelta(days=1)
nct = nct.replace(hour=self.__compactTime.hour,
minute=self.__compactTime.minute,
second=self.__compactTime.second)
self.log.info("Next db compaction will be at %s" % nct)
sleepTime = Time.toEpoch(nct) - Time.toEpoch()
time.sleep(sleepTime)
self.compactBuckets()
def __registerTypes(self):
"""
Register all types
"""
retVal = gConfig.getSections("/DIRAC/Setups")
if not retVal['OK']:
return S_ERROR("Can't get a list of setups: %s" % retVal['Message'])
setupsList = retVal['Value']
objectsLoaded = TypeLoader().getTypes()
# Load the files
for pythonClassName in sorted(objectsLoaded):
typeClass = objectsLoaded[pythonClassName]
for setup in setupsList:
typeName = "%s_%s" % (setup, pythonClassName)
typeDef = typeClass().getDefinition()
#dbTypeName = "%s_%s" % ( setup, typeName )
definitionKeyFields, definitionAccountingFields, bucketsLength = typeDef[1:]
# If already defined check the similarities
if typeName in self.dbCatalog:
bucketsLength.sort()
if bucketsLength != self.dbBucketsLength[typeName]:
bucketsLength = self.dbBucketsLength[typeName]
self.log.warn("Bucket length has changed for type %s" % typeName)
keyFields = [f[0] for f in definitionKeyFields]
if keyFields != self.dbCatalog[typeName]['keys']:
keyFields = self.dbCatalog[typeName]['keys']
self.log.error("Definition fields have changed", "Type %s" % typeName)
valueFields = [f[0] for f in definitionAccountingFields]
if valueFields != self.dbCatalog[typeName]['values']:
valueFields = self.dbCatalog[typeName]['values']
self.log.error("Accountable fields have changed", "Type %s" % typeName)
# Try to re register to check all the tables are there
retVal = self.registerType(typeName, definitionKeyFields,
definitionAccountingFields, bucketsLength)
if not retVal['OK']:
self.log.error("Can't register type", "%s: %s" % (typeName, retVal['Message']))
# If it has been properly registered, update info
elif retVal['Value']:
# Set the timespan
self.dbCatalog[typeName]['dataTimespan'] = typeClass().getDataTimespan()
self.dbCatalog[typeName]['definition'] = {'keys': definitionKeyFields,
'values': definitionAccountingFields}
return S_OK()
def __loadCatalogFromDB(self):
retVal = self._query(
"SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName)
if not retVal['OK']:
raise Exception(retVal['Message'])
for typesEntry in retVal['Value']:
typeName = typesEntry[0]
keyFields = List.fromChar(typesEntry[1], ",")
valueFields = List.fromChar(typesEntry[2], ",")
bucketsLength = DEncode.decode(typesEntry[3])[0]
self.__addToCatalog(typeName, keyFields, valueFields, bucketsLength)
def getWaitingRecordsLifeTime(self):
"""
Get the time records can live in the IN tables without no retry
"""
return self.getCSOption("RecordMaxWaitingTime", 86400)
def markAllPendingRecordsAsNotTaken(self):
"""
Mark all records to be processed as not taken
NOTE: ONLY EXECUTE THIS AT THE BEGINNING OF THE DATASTORE SERVICE!
"""
self.log.always("Marking all records to be processed as not taken")
for typeName in self.dbCatalog:
sqlTableName = _getTableName("in", typeName)
result = self._update("UPDATE `%s` SET taken=0" % sqlTableName)
if not result['OK']:
return result
return S_OK()
def loadPendingRecords(self):
"""
Load all records pending to insertion and generate threaded jobs
"""
gSynchro.lock()
try:
now = time.time()
if now - self.__doingPendingLockTime <= 3600:
return S_OK()
self.__doingPendingLockTime = now
finally:
gSynchro.unlock()
self.log.info("[PENDING] Loading pending records for insertion")
pending = 0
now = Time.toEpoch()
recordsPerSlot = self.getCSOption("RecordsPerSlot", 100)
for typeName in self.dbCatalog:
self.log.info("[PENDING] Checking %s" % typeName)
pendingInQueue = self.__threadPool.pendingJobs()
emptySlots = max(0, 3000 - pendingInQueue)
self.log.info("[PENDING] %s in the queue, %d empty slots" % (pendingInQueue, emptySlots))
if emptySlots < 1:
continue
emptySlots = min(100, emptySlots)
sqlTableName = _getTableName("in", typeName)
sqlFields = ['id'] + self.dbCatalog[typeName]['typeFields']
sqlCond = "WHERE taken = 0 or TIMESTAMPDIFF( SECOND, takenSince, UTC_TIMESTAMP() ) > %s" % self.getWaitingRecordsLifeTime(
)
result = self._query("SELECT %s FROM `%s` %s ORDER BY id ASC LIMIT %d" % (
", ".join(["`%s`" % f for f in sqlFields]), sqlTableName, sqlCond, emptySlots * recordsPerSlot))
if not result['OK']:
self.log.error("[PENDING] Error when trying to get pending records",
"for %s : %s" % (typeName, result['Message']))
return result
self.log.info("[PENDING] Got %s pending records for type %s" % (len(result['Value']), typeName))
dbData = result['Value']
idList = [str(r[0]) for r in dbData]
# If nothing to do, continue
if not idList:
continue
result = self._update(
"UPDATE `%s` SET taken=1, takenSince=UTC_TIMESTAMP() WHERE id in (%s)" %
(sqlTableName, ", ".join(idList)))
if not result['OK']:
self.log.error(
"[PENDING] Error when trying set state to waiting records", "for %s : %s" %
(typeName, result['Message']))
self.__doingPendingLockTime = 0
return result
# Group them in groups of 10
recordsToProcess = []
for record in dbData:
pending += 1
iD = record[0]
startTime = record[-2]
endTime = record[-1]
valuesList = list(record[1:-2])
recordsToProcess.append((iD, typeName, startTime, endTime, valuesList, now))
if len(recordsToProcess) % recordsPerSlot == 0:
self.__threadPool.generateJobAndQueueIt(self.__insertFromINTable,
args=(recordsToProcess, ))
recordsToProcess = []
if recordsToProcess:
self.__threadPool.generateJobAndQueueIt(self.__insertFromINTable,
args=(recordsToProcess, ))
self.log.info("[PENDING] Got %s records requests for all types" % pending)
self.__doingPendingLockTime = 0
return S_OK()
def __addToCatalog(self, typeName, keyFields, valueFields, bucketsLength):
"""
Add type to catalog
"""
self.log.verbose("Adding to catalog type %s" % typeName, "with length %s" % str(bucketsLength))
self.dbCatalog[typeName] = {'keys': keyFields, 'values': valueFields,
'typeFields': [], 'bucketFields': [], 'dataTimespan': 0}
self.dbCatalog[typeName]['typeFields'].extend(keyFields)
self.dbCatalog[typeName]['typeFields'].extend(valueFields)
self.dbCatalog[typeName]['bucketFields'] = list(self.dbCatalog[typeName]['typeFields'])
self.dbCatalog[typeName]['typeFields'].extend(['startTime', 'endTime'])
self.dbCatalog[typeName]['bucketFields'].extend(['entriesInBucket', 'startTime', 'bucketLength'])
self.dbBucketsLength[typeName] = bucketsLength
# ADRI: TEST COMPACT BUCKETS
#self.dbBucketsLength[ typeName ] = [ ( 31104000, 3600 ) ]
def changeBucketsLength(self, typeName, bucketsLength):
gSynchro.lock()
try:
if typeName not in self.dbCatalog:
return S_ERROR("%s is not a valid type name" % typeName)
bucketsLength.sort()
bucketsEncoding = DEncode.encode(bucketsLength)
retVal = self._update(
"UPDATE `%s` set bucketsLength = '%s' where name = '%s'" % (
self.catalogTableName,
bucketsEncoding,
typeName
)
)
if not retVal['OK']:
return retVal
self.dbBucketsLength[typeName] = bucketsLength
finally:
gSynchro.unlock()
return self.regenerateBuckets(typeName)
@gSynchro
def registerType(self, name, definitionKeyFields, definitionAccountingFields, bucketsLength):
"""
Register a new type
"""
gMonitor.registerActivity("registerwaiting:%s" % name,
"Records waiting for insertion for %s" % " ".join(name.split("_")),
"Accounting",
"records",
gMonitor.OP_MEAN)
gMonitor.registerActivity("registeradded:%s" % name,
"Register added for %s" % " ".join(name.split("_")),
"Accounting",
"entries",
gMonitor.OP_ACUM)
result = self.__loadTablesCreated()
if not result['OK']:
return result
tablesInThere = result['Value']
keyFieldsList = []
valueFieldsList = []
for key in definitionKeyFields:
keyFieldsList.append(key[0])
for value in definitionAccountingFields:
valueFieldsList.append(value[0])
for field in definitionKeyFields:
if field in valueFieldsList:
return S_ERROR("Key field %s is also in the list of value fields" % field)
for field in definitionAccountingFields:
if field in keyFieldsList:
return S_ERROR("Value field %s is also in the list of key fields" % field)
for bucket in bucketsLength:
if not isinstance(bucket, tuple):
return S_ERROR("Length of buckets should be a list of tuples")
if len(bucket) != 2:
return S_ERROR("Length of buckets should have 2d tuples")
updateDBCatalog = True
if name in self.dbCatalog:
updateDBCatalog = False
tables = {}
for key in definitionKeyFields:
keyTableName = _getTableName("key", name, key[0])
if keyTableName not in tablesInThere:
self.log.info("Table for key %s has to be created" % key[0])
tables[keyTableName] = {'Fields': {'id': 'INTEGER NOT NULL AUTO_INCREMENT',
'value': '%s NOT NULL' % key[1]
},
'UniqueIndexes': {'valueindex': ['value']},
'PrimaryKey': 'id'
}
# Registering type
fieldsDict = {}
bucketFieldsDict = {}
inbufferDict = {'id': 'BIGINT NOT NULL AUTO_INCREMENT'}
bucketIndexes = {'startTimeIndex': ['startTime'], 'bucketLengthIndex': ['bucketLength']}
uniqueIndexFields = ['startTime']
for field in definitionKeyFields:
bucketIndexes["%sIndex" % field[0]] = [field[0]]
uniqueIndexFields.append(field[0])
fieldsDict[field[0]] = "INTEGER NOT NULL"
bucketFieldsDict[field[0]] = "INTEGER NOT NULL"
inbufferDict[field[0]] = field[1] + " NOT NULL"
for field in definitionAccountingFields:
fieldsDict[field[0]] = field[1] + " NOT NULL"
bucketFieldsDict[field[0]] = "DECIMAL(30,10) NOT NULL"
inbufferDict[field[0]] = field[1] + " NOT NULL"
fieldsDict['startTime'] = "INT UNSIGNED NOT NULL"
fieldsDict['endTime'] = "INT UNSIGNED NOT NULL"
bucketFieldsDict['entriesInBucket'] = "DECIMAL(30,10) NOT NULL"
bucketFieldsDict['startTime'] = "INT UNSIGNED NOT NULL"
inbufferDict['startTime'] = "INT UNSIGNED NOT NULL"
inbufferDict['endTime'] = "INT UNSIGNED NOT NULL"
inbufferDict['taken'] = "TINYINT(1) DEFAULT 1 NOT NULL"
inbufferDict['takenSince'] = "DATETIME NOT NULL"
bucketFieldsDict['bucketLength'] = "MEDIUMINT UNSIGNED NOT NULL"
uniqueIndexFields.append('bucketLength')
bucketTableName = _getTableName("bucket", name)
if bucketTableName not in tablesInThere:
tables[bucketTableName] = {'Fields': bucketFieldsDict,
'UniqueIndexes': {'UniqueConstraint': uniqueIndexFields}
}
typeTableName = _getTableName("type", name)
if typeTableName not in tablesInThere:
tables[typeTableName] = {'Fields': fieldsDict}
inTableName = _getTableName("in", name)
if inTableName not in tablesInThere:
tables[inTableName] = {'Fields': inbufferDict,
'PrimaryKey': 'id'
}
if self.__readOnly:
if tables:
self.log.notice("ReadOnly mode: Skipping create of tables for %s. Removing from memory catalog" % name)
self.log.verbose("Skipping creation of tables %s" % ", ".join([tn for tn in tables]))
try:
self.dbCatalog.pop(name)
except KeyError:
pass
else:
self.log.notice("ReadOnly mode: %s is OK" % name)
return S_OK(not updateDBCatalog)
if tables:
retVal = self._createTables(tables)
if not retVal['OK']:
self.log.error("Can't create type", "%s: %s" % (name, retVal['Message']))
return S_ERROR("Can't create type %s: %s" % (name, retVal['Message']))
if updateDBCatalog:
bucketsLength.sort()
bucketsEncoding = DEncode.encode(bucketsLength)
self.insertFields(self.catalogTableName,
['name', 'keyFields', 'valueFields', 'bucketsLength'],
[name, ",".join(keyFieldsList), ",".join(valueFieldsList), bucketsEncoding])
self.__addToCatalog(name, keyFieldsList, valueFieldsList, bucketsLength)
self.log.info("Registered type %s" % name)
return S_OK(True)
def getRegisteredTypes(self):
"""
Get list of registered types
"""
retVal = self._query("SELECT `name`, `keyFields`, `valueFields`, `bucketsLength` FROM `%s`" % self.catalogTableName)
if not retVal['OK']:
return retVal
typesList = []
for typeInfo in retVal['Value']:
typesList.append([typeInfo[0],
List.fromChar(typeInfo[1]),
List.fromChar(typeInfo[2]),
DEncode.decode(typeInfo[3])
]
)
return S_OK(typesList)
def getKeyValues(self, typeName, condDict, connObj=False):
"""
Get all values for a given key field in a type
"""
keyValuesDict = {}
keyTables = []
sqlCond = []
mainTable = "`%s`" % _getTableName("bucket", typeName)
typeKeysList = self.dbCatalog[typeName]['keys']
for keyName in condDict:
if keyName in typeKeysList:
keyTable = "`%s`" % _getTableName("key", typeName, keyName)
if keyTable not in keyTables:
keyTables.append(keyTable)
sqlCond.append("%s.id = %s.`%s`" % (keyTable, mainTable, keyName))
for value in condDict[keyName]:
sqlCond.append("%s.value = %s" % (keyTable, self._escapeString(value)['Value']))
for keyName in typeKeysList:
keyTable = "`%s`" % _getTableName("key", typeName, keyName)
allKeyTables = keyTables
if keyTable not in allKeyTables:
allKeyTables = list(keyTables)
allKeyTables.append(keyTable)
cmd = "SELECT DISTINCT %s.value FROM %s" % (keyTable, ", ".join(allKeyTables))
if sqlCond:
sqlValueLink = "%s.id = %s.`%s`" % (keyTable, mainTable, keyName)
cmd += ", %s WHERE %s AND %s" % (mainTable, sqlValueLink, " AND ".join(sqlCond))
retVal = self._query(cmd, conn=connObj)
if not retVal['OK']:
return retVal
keyValuesDict[keyName] = [r[0] for r in retVal['Value']]
return S_OK(keyValuesDict)
@gSynchro
def deleteType(self, typeName):
"""
Deletes a type
"""
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
if typeName not in self.dbCatalog:
return S_ERROR("Type %s does not exist" % typeName)
self.log.info("Deleting type", typeName)
tablesToDelete = []
for keyField in self.dbCatalog[typeName]['keys']:
tablesToDelete.append("`%s`" % _getTableName("key", typeName, keyField))
tablesToDelete.insert(0, "`%s`" % _getTableName("type", typeName))
tablesToDelete.insert(0, "`%s`" % _getTableName("bucket", typeName))
tablesToDelete.insert(0, "`%s`" % _getTableName("in", typeName))
retVal = self._query("DROP TABLE %s" % ", ".join(tablesToDelete))
if not retVal['OK']:
return retVal
retVal = self._update("DELETE FROM `%s` WHERE name='%s'" % (_getTableName("catalog", "Types"), typeName))
del self.dbCatalog[typeName]
return S_OK()
def __getIdForKeyValue(self, typeName, keyName, keyValue, conn=False):
"""
Finds id number for value in a key table
"""
retVal = self._escapeString(keyValue)
if not retVal['OK']:
return retVal
keyValue = retVal['Value']
retVal = self._query("SELECT `id` FROM `%s` WHERE `value`=%s" % (_getTableName("key", typeName, keyName),
keyValue), conn=conn)
if not retVal['OK']:
return retVal
if len(retVal['Value']) > 0:
return S_OK(retVal['Value'][0][0])
return S_ERROR("Key id %s for value %s does not exist although it shoud" % (keyName, keyValue))
def __addKeyValue(self, typeName, keyName, keyValue):
"""
Adds a key value to a key table if not existant
"""
# Cast to string just in case
if not isinstance(keyValue, basestring):
keyValue = str(keyValue)
# No more than 64 chars for keys
if len(keyValue) > 64:
keyValue = keyValue[:64]
# Look into the cache
if typeName not in self.__keysCache:
self.__keysCache[typeName] = {}
typeCache = self.__keysCache[typeName]
if keyName not in typeCache:
typeCache[keyName] = {}
keyCache = typeCache[keyName]
if keyValue in keyCache:
return S_OK(keyCache[keyValue])
# Retrieve key
keyTable = _getTableName("key", typeName, keyName)
retVal = self.__getIdForKeyValue(typeName, keyName, keyValue)
if retVal['OK']:
keyCache[keyValue] = retVal['Value']
return retVal
# Key is not in there
retVal = self._getConnection()
if not retVal['OK']:
return retVal
connection = retVal['Value']
self.log.info("Value %s for key %s didn't exist, inserting" % (keyValue, keyName))
retVal = self.insertFields(keyTable, ['id', 'value'], [0, keyValue], connection)
if not retVal['OK'] and retVal['Message'].find("Duplicate key") == -1:
return retVal
result = self.__getIdForKeyValue(typeName, keyName, keyValue, connection)
if not result['OK']:
return result
keyCache[keyValue] = result['Value']
return result
def calculateBucketLengthForTime(self, typeName, now, when):
"""
Get the expected bucket time for a moment in time
"""
for granuT in self.dbBucketsLength[typeName]:
nowBucketed = now - now % granuT[1]
dif = max(0, nowBucketed - when)
if dif <= granuT[0]:
return granuT[1]
return self.maxBucketTime
def calculateBuckets(self, typeName, startTime, endTime, nowEpoch=False):
"""
Magic function for calculating buckets between two times and
the proportional part for each bucket
"""
if not nowEpoch:
nowEpoch = int(Time.toEpoch(Time.dateTime()))
bucketTimeLength = self.calculateBucketLengthForTime(typeName, nowEpoch, startTime)
currentBucketStart = startTime - startTime % bucketTimeLength
if startTime == endTime:
return [(currentBucketStart,
1,
bucketTimeLength)]
buckets = []
totalLength = endTime - startTime
while currentBucketStart < endTime:
start = max(currentBucketStart, startTime)
end = min(currentBucketStart + bucketTimeLength, endTime)
proportion = float(end - start) / totalLength
buckets.append((currentBucketStart,
proportion,
bucketTimeLength))
currentBucketStart += bucketTimeLength
bucketTimeLength = self.calculateBucketLengthForTime(typeName, nowEpoch, currentBucketStart)
return buckets
def __insertInQueueTable(self, typeName, startTime, endTime, valuesList):
sqlFields = ['id', 'taken', 'takenSince'] + self.dbCatalog[typeName]['typeFields']
sqlValues = ['0', '0', 'UTC_TIMESTAMP()'] + valuesList + [startTime, endTime]
if len(sqlFields) != len(sqlValues):
numRcv = len(valuesList) + 2
numExp = len(self.dbCatalog[typeName]['typeFields'])
return S_ERROR("Fields mismatch for record %s. %s fields and %s expected" % (typeName,
numRcv,
numExp))
retVal = self.insertFields(
_getTableName("in", typeName),
sqlFields,
sqlValues
)
if not retVal['OK']:
return retVal
return S_OK(retVal['lastRowId'])
def insertRecordBundleThroughQueue(self, recordsToQueue):
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
recordsToProcess = []
now = Time.toEpoch()
for record in recordsToQueue:
typeName, startTime, endTime, valuesList = record
result = self.__insertInQueueTable(typeName, startTime, endTime, valuesList)
if not result['OK']:
return result
iD = result['Value']
recordsToProcess.append((iD, typeName, startTime, endTime, valuesList, now))
return S_OK()
def insertRecordThroughQueue(self, typeName, startTime, endTime, valuesList):
"""
Insert a record in the intable to be really insterted afterwards
"""
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
self.log.info(
"Adding record to queue",
"for type %s\n [%s -> %s]" %
(typeName,
Time.fromEpoch(startTime),
Time.fromEpoch(endTime)))
if typeName not in self.dbCatalog:
return S_ERROR("Type %s has not been defined in the db" % typeName)
result = self.__insertInQueueTable(typeName, startTime, endTime, valuesList)
if not result['0K']:
return result
return S_OK()
def __insertFromINTable(self, recordTuples):
"""
Do the real insert and delete from the in buffer table
"""
self.log.verbose("Received bundle to process", "of %s elements" % len(recordTuples))
for record in recordTuples:
iD, typeName, startTime, endTime, valuesList, insertionEpoch = record
result = self.insertRecordDirectly(typeName, startTime, endTime, valuesList)
if not result['OK']:
self._update("UPDATE `%s` SET taken=0 WHERE id=%s" % (_getTableName("in", typeName), iD))
self.log.error("Can't insert row", result['Message'])
continue
result = self._update("DELETE FROM `%s` WHERE id=%s" % (_getTableName("in", typeName), iD))
if not result['OK']:
self.log.error("Can't delete row from the IN table", result['Message'])
gMonitor.addMark("insertiontime", Time.toEpoch() - insertionEpoch)
def insertRecordDirectly(self, typeName, startTime, endTime, valuesList):
"""
Add an entry to the type contents
"""
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
gMonitor.addMark("registeradded", 1)
gMonitor.addMark("registeradded:%s" % typeName, 1)
self.log.info("Adding record", "for type %s\n [%s -> %s]" %
(typeName, Time.fromEpoch(startTime), Time.fromEpoch(endTime)))
if typeName not in self.dbCatalog:
return S_ERROR("Type %s has not been defined in the db" % typeName)
# Discover key indexes
for keyPos in range(len(self.dbCatalog[typeName]['keys'])):
keyName = self.dbCatalog[typeName]['keys'][keyPos]
keyValue = valuesList[keyPos]
retVal = self.__addKeyValue(typeName, keyName, keyValue)
if not retVal['OK']:
return retVal
self.log.verbose("Value %s for key %s has id %s" % (keyValue, keyName, retVal['Value']))
valuesList[keyPos] = retVal['Value']
insertList = list(valuesList)
insertList.append(startTime)
insertList.append(endTime)
retVal = self._getConnection()
if not retVal['OK']:
return retVal
connObj = retVal['Value']
try:
retVal = self.insertFields(
_getTableName("type", typeName),
self.dbCatalog[typeName]['typeFields'],
insertList,
conn=connObj
)
if not retVal['OK']:
return retVal
# HACK: One more record to split in the buckets to be able to count total entries
valuesList.append(1)
retVal = self.__startTransaction(connObj)
if not retVal['OK']:
return retVal
retVal = self.__splitInBuckets(typeName, startTime, endTime, valuesList, connObj=connObj)
if not retVal['OK']:
self.__rollbackTransaction(connObj)
return retVal
return self.__commitTransaction(connObj)
finally:
connObj.close()
def deleteRecord(self, typeName, startTime, endTime, valuesList):
"""
Add an entry to the type contents
"""
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
self.log.info(
"Deleting record record",
"for type %s\n [%s -> %s]" %
(typeName,
Time.fromEpoch(startTime),
Time.fromEpoch(endTime)))
if typeName not in self.dbCatalog:
return S_ERROR("Type %s has not been defined in the db" % typeName)
sqlValues = []
sqlValues.extend(valuesList)
# Discover key indexes
for keyPos in range(len(self.dbCatalog[typeName]['keys'])):
keyName = self.dbCatalog[typeName]['keys'][keyPos]
keyValue = sqlValues[keyPos]
retVal = self.__addKeyValue(typeName, keyName, keyValue)
if not retVal['OK']:
return retVal
self.log.verbose("Value %s for key %s has id %s" % (keyValue, keyName, retVal['Value']))
sqlValues[keyPos] = retVal['Value']
sqlCond = []
mainTable = _getTableName("type", typeName)
sqlValues.extend([startTime, endTime])
numKeyFields = len(self.dbCatalog[typeName]['keys'])
numValueFields = len(self.dbCatalog[typeName]['values'])
for i in range(len(sqlValues)):
needToRound = False
if i >= numKeyFields and i - numKeyFields < numValueFields:
vIndex = i - numKeyFields
if self.dbCatalog[typeName]['definition']['values'][vIndex][1].find("FLOAT") > -1:
needToRound = True
if needToRound:
compVal = ["`%s`.`%s`" % (mainTable, self.dbCatalog[typeName]['typeFields'][i]),
"%f" % sqlValues[i]]
compVal = ["CEIL( %s * 1000 )" % v for v in compVal]
compVal = "ABS( %s ) <= 1 " % " - ".join(compVal)
else:
sqlCond.append("`%s`.`%s`=%s" % (mainTable,
self.dbCatalog[typeName]['typeFields'][i],
sqlValues[i]))
retVal = self._getConnection()
if not retVal['OK']:
return retVal
connObj = retVal['Value']
retVal = self.__startTransaction(connObj)
if not retVal['OK']:
return retVal
retVal = self._update("DELETE FROM `%s` WHERE %s" % (mainTable, " AND ".join(sqlCond)),
conn=connObj)
if not retVal['OK']:
return retVal
numInsertions = retVal['Value']
# Deleted from type, now the buckets
# HACK: One more record to split in the buckets to be able to count total entries
if numInsertions == 0:
return S_OK(0)
sqlValues.append(1)
retVal = self.__deleteFromBuckets(typeName, startTime, endTime, sqlValues, numInsertions, connObj=connObj)
if not retVal['OK']:
self.__rollbackTransaction(connObj)
return retVal
retVal = self.__commitTransaction(connObj)
if not retVal['OK']:
self.__rollbackTransaction(connObj)
return retVal
return S_OK(numInsertions)
def __splitInBuckets(self, typeName, startTime, endTime, valuesList, connObj=False):
"""
Bucketize a record
"""
# Calculate amount of buckets
buckets = self.calculateBuckets(typeName, startTime, endTime)
# Separate key values from normal values
numKeys = len(self.dbCatalog[typeName]['keys'])
keyValues = valuesList[:numKeys]
valuesList = valuesList[numKeys:]
self.log.verbose("Splitting entry", " in %s buckets" % len(buckets))
return self.__writeBuckets(typeName, buckets, keyValues, valuesList, connObj=connObj)
def __deleteFromBuckets(self, typeName, startTime, endTime, valuesList, numInsertions, connObj=False):
"""
DeBucketize a record
"""
# Calculate amount of buckets
buckets = self.calculateBuckets(typeName, startTime, endTime, self.__lastCompactionEpoch)
# Separate key values from normal values
numKeys = len(self.dbCatalog[typeName]['keys'])
keyValues = valuesList[:numKeys]
valuesList = valuesList[numKeys:]
self.log.verbose("Deleting bucketed entry", "from %s buckets" % len(buckets))
for bucketInfo in buckets:
bucketStartTime = bucketInfo[0]
bucketProportion = bucketInfo[1]
bucketLength = bucketInfo[2]
for _i in range(max(1, self.__deadLockRetries)):
retVal = self.__extractFromBucket(typeName,
bucketStartTime,
bucketLength,
keyValues,
valuesList, bucketProportion * numInsertions, connObj=connObj)
if not retVal['OK']:
# If failed because of dead lock try restarting
if retVal['Message'].find("try restarting transaction"):
continue
return retVal
# If OK, break loop
if retVal['OK']:
break
return S_OK()
def getBucketsDef(self, typeName):
return self.dbBucketsLength[typeName]
def __generateSQLConditionForKeys(self, typeName, keyValues):
"""
Generate sql condition for buckets, values are indexes to real values
"""
realCondList = []
for keyPos in range(len(self.dbCatalog[typeName]['keys'])):
keyField = self.dbCatalog[typeName]['keys'][keyPos]
keyValue = keyValues[keyPos]
retVal = self._escapeString(keyValue)
if not retVal['OK']:
return retVal
keyValue = retVal['Value']
realCondList.append("`%s`.`%s` = %s" % (_getTableName("bucket", typeName), keyField, keyValue))
return " AND ".join(realCondList)
def __getBucketFromDB(self, typeName, startTime, bucketLength, keyValues, connObj=False):
"""
Get a bucket from the DB
"""
tableName = _getTableName("bucket", typeName)
sqlFields = []
for valueField in self.dbCatalog[typeName]['values']:
sqlFields.append("`%s`.`%s`" % (tableName, valueField))
sqlFields.append("`%s`.`entriesInBucket`" % (tableName))
cmd = "SELECT %s FROM `%s`" % (", ".join(sqlFields), _getTableName("bucket", typeName))
cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % (
tableName,
startTime,
tableName,
bucketLength
)
cmd += self.__generateSQLConditionForKeys(typeName, keyValues)
return self._query(cmd, conn=connObj)
def __extractFromBucket(self, typeName, startTime, bucketLength, keyValues, bucketValues, proportion, connObj=False):
"""
Update a bucket when coming from the raw insert
"""
tableName = _getTableName("bucket", typeName)
cmd = "UPDATE `%s` SET " % tableName
sqlValList = []
for pos in range(len(self.dbCatalog[typeName]['values'])):
valueField = self.dbCatalog[typeName]['values'][pos]
value = bucketValues[pos]
fullFieldName = "`%s`.`%s`" % (tableName, valueField)
sqlValList.append("%s=GREATEST(0,%s-(%s*%s))" % (fullFieldName, fullFieldName, value, proportion))
sqlValList.append("`%s`.`entriesInBucket`=GREATEST(0,`%s`.`entriesInBucket`-(%s*%s))" % (
tableName,
tableName,
bucketValues[-1],
proportion
))
cmd += ", ".join(sqlValList)
cmd += " WHERE `%s`.`startTime`='%s' AND `%s`.`bucketLength`='%s' AND " % (
tableName,
startTime,
tableName,
bucketLength
)
cmd += self.__generateSQLConditionForKeys(typeName, keyValues)
return self._update(cmd, conn=connObj)
def __writeBuckets(self, typeName, buckets, keyValues, valuesList, connObj=False):
""" Insert or update a bucket
"""
# tableName = _getTableName( "bucket", typeName )
# INSERT PART OF THE QUERY
sqlFields = ['`startTime`', '`bucketLength`', '`entriesInBucket`']
for keyPos in range(len(self.dbCatalog[typeName]['keys'])):
sqlFields.append("`%s`" % self.dbCatalog[typeName]['keys'][keyPos])
sqlUpData = ["`entriesInBucket`=`entriesInBucket`+VALUES(`entriesInBucket`)"]
for valPos in range(len(self.dbCatalog[typeName]['values'])):
valueField = "`%s`" % self.dbCatalog[typeName]['values'][valPos]
sqlFields.append(valueField)
sqlUpData.append("%s=%s+VALUES(%s)" % (valueField, valueField, valueField))
valuesGroups = []
for bucketInfo in buckets:
bStartTime = bucketInfo[0]
bProportion = bucketInfo[1]
bLength = bucketInfo[2]
sqlValues = [bStartTime, bLength, "(%s*%s)" % (valuesList[-1], bProportion)]
for keyPos in range(len(self.dbCatalog[typeName]['keys'])):
sqlValues.append(keyValues[keyPos])
for valPos in range(len(self.dbCatalog[typeName]['values'])):
# value = valuesList[ valPos ]
sqlValues.append("(%s*%s)" % (valuesList[valPos], bProportion))
valuesGroups.append("( %s )" % ",".join(str(val) for val in sqlValues))
cmd = "INSERT INTO `%s` ( %s ) " % (_getTableName("bucket", typeName), ", ".join(sqlFields))
cmd += "VALUES %s " % ", ".join(valuesGroups)
cmd += "ON DUPLICATE KEY UPDATE %s" % ", ".join(sqlUpData)
for _i in range(max(1, self.__deadLockRetries)):
result = self._update(cmd, conn=connObj)
if not result['OK']:
# If failed because of dead lock try restarting
if result['Message'].find("try restarting transaction"):
continue
return result
# If OK, break loopo
if result['OK']:
return result
return S_ERROR("Cannot update bucket: %s" % result['Message'])
def __checkFieldsExistsInType(self, typeName, fields, tableType):
"""
Check wether a list of fields exist for a given typeName
"""
missing = []
tableFields = self.dbCatalog[typeName]['%sFields' % tableType]
for key in fields:
if key not in tableFields:
missing.append(key)
return missing
def __checkIncomingFieldsForQuery(self, typeName, selectFields, condDict, groupFields, orderFields, tableType):
missing = self.__checkFieldsExistsInType(typeName, selectFields[1], tableType)
if missing:
return S_ERROR("Value keys %s are not defined" % ", ".join(missing))
missing = self.__checkFieldsExistsInType(typeName, condDict, tableType)
if missing:
return S_ERROR("Condition keys %s are not defined" % ", ".join(missing))
if groupFields:
missing = self.__checkFieldsExistsInType(typeName, groupFields[1], tableType)
if missing:
return S_ERROR("Group fields %s are not defined" % ", ".join(missing))
if orderFields:
missing = self.__checkFieldsExistsInType(typeName, orderFields[1], tableType)
if missing:
return S_ERROR("Order fields %s are not defined" % ", ".join(missing))
return S_OK()
def retrieveRawRecords(self, typeName, startTime, endTime, condDict, orderFields, connObj=False):
"""
Get RAW data from the DB
"""
if typeName not in self.dbCatalog:
return S_ERROR("Type %s not defined" % typeName)
selectFields = [["%s", "%s"], ["startTime", "endTime"]]
for tK in ('keys', 'values'):
for key in self.dbCatalog[typeName][tK]:
selectFields[0].append("%s")
selectFields[1].append(key)
selectFields[0] = ", ".join(selectFields[0])
return self.__queryType(typeName, startTime, endTime, selectFields,
condDict, False, orderFields, "type")
def retrieveBucketedData(
self,
typeName,
startTime,
endTime,
selectFields,
condDict,
groupFields,
orderFields,
connObj=False):
"""
Get data from the DB
Parameters:
- typeName -> typeName
- startTime & endTime -> int
epoch objects. Do I need to explain the meaning?
- selectFields: tuple containing a string and a list of fields:
( "SUM(%s), %s/%s", ( "field1name", "field2name", "field3name" ) )
- condDict -> conditions for the query
key -> name of the field
value -> list of possible values
- groupFields -> list of fields to group by
( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) )
- orderFields -> list of fields to order by
( "%s, %s, %s", ( "field1name", "field2name", "field3name" ) )
"""
if typeName not in self.dbCatalog:
return S_ERROR("Type %s is not defined" % typeName)
startQueryEpoch = time.time()
if len(selectFields) < 2:
return S_ERROR("selectFields has to be a list containing a string and a list of fields")
retVal = self.__checkIncomingFieldsForQuery(typeName, selectFields, condDict, groupFields, orderFields, "bucket")
if not retVal['OK']:
return retVal
nowEpoch = Time.toEpoch(Time.dateTime())
bucketTimeLength = self.calculateBucketLengthForTime(typeName, nowEpoch, startTime)
startTime = startTime - startTime % bucketTimeLength
result = self.__queryType(
typeName,
startTime,
endTime,
selectFields,
condDict,
groupFields,
orderFields,
"bucket",
connObj=connObj
)
gMonitor.addMark("querytime", Time.toEpoch() - startQueryEpoch)
return result
def __queryType(
self,
typeName,
startTime,
endTime,
selectFields,
condDict,
groupFields,
orderFields,
tableType,
connObj=False):
"""
Execute a query over a main table
"""
tableName = _getTableName(tableType, typeName)
cmd = "SELECT"
sqlLinkList = []
# Check if groupFields and orderFields are in ( "%s", ( field1, ) ) form
if groupFields:
try:
groupFields[0] % tuple(groupFields[1])
# We can have the case when we have multiple grouping and the fields in the select does not much the group by conditions
# for example: selectFields = ('%s, %s, %s, SUM(%s)', ['Site', 'startTime', 'bucketLength', 'entriesInBucket'])
# groupFields = ('%s, %s', ['startTime', 'Site'])
# in this case the correct query must be: select Site, startTime, bucketlength, sum(entriesInBucket) from xxxx where yyy Group by Site, startTime, bucketlength
#
# When we have multiple grouping then we must have all the fields in Group by. This is from mysql 5.7.
# We have fields which are not in the groupFields and it is in selectFields
if 'bucketLength' in selectFields[1]:
groupFields = list(groupFields)
groupFields[0] = "%s, %s" % (groupFields[0], "%s")
groupFields[1].append('bucketlength')
groupFields = tuple(groupFields)
except TypeError as e:
return S_ERROR("Cannot format properly group string: %s" % repr(e))
if orderFields:
try:
orderFields[0] % tuple(orderFields[1])
except TypeError as e:
return S_ERROR("Cannot format properly order string: %s" % repr(e))
# Calculate fields to retrieve
realFieldList = []
for rawFieldName in selectFields[1]:
keyTable = _getTableName("key", typeName, rawFieldName)
if rawFieldName in self.dbCatalog[typeName]['keys']:
realFieldList.append("`%s`.`value`" % keyTable)
List.appendUnique(sqlLinkList, "`%s`.`%s` = `%s`.`id`" % (tableName,
rawFieldName,
keyTable))
else:
realFieldList.append("`%s`.`%s`" % (tableName, rawFieldName))
try:
cmd += " %s" % selectFields[0] % tuple(realFieldList)
except TypeError as e:
return S_ERROR("Error generating select fields string: %s" % repr(e))
# Calculate tables needed
sqlFromList = ["`%s`" % tableName]
for key in self.dbCatalog[typeName]['keys']:
if key in condDict or key in selectFields[1] \
or (groupFields and key in groupFields[1]) \
or (orderFields and key in orderFields[1]):
sqlFromList.append("`%s`" % _getTableName("key", typeName, key))
cmd += " FROM %s" % ", ".join(sqlFromList)
# Calculate time conditions
sqlTimeCond = []
if startTime:
if tableType == 'bucket':
# HACK because MySQL and UNIX do not start epoch at the same time
startTime = startTime + 3600
startTime = self.calculateBuckets(typeName, startTime, startTime)[0][0]
sqlTimeCond.append("`%s`.`startTime` >= %s" % (tableName, startTime))
if endTime:
if tableType == "bucket":
endTimeSQLVar = "startTime"
endTime = endTime + 3600
endTime = self.calculateBuckets(typeName, endTime, endTime)[0][0]
else:
endTimeSQLVar = "endTime"
sqlTimeCond.append("`%s`.`%s` <= %s" % (tableName, endTimeSQLVar, endTime))
cmd += " WHERE %s" % " AND ".join(sqlTimeCond)
# Calculate conditions
sqlCondList = []
for keyName in condDict:
sqlORList = []
if keyName in self.dbCatalog[typeName]['keys']:
List.appendUnique(sqlLinkList, "`%s`.`%s` = `%s`.`id`" % (tableName,
keyName,
_getTableName("key", typeName, keyName)
))
if not isinstance(condDict[keyName], (list, tuple)):
condDict[keyName] = [condDict[keyName]]
for keyValue in condDict[keyName]:
retVal = self._escapeString(keyValue)
if not retVal['OK']:
return retVal
keyValue = retVal['Value']
if keyName in self.dbCatalog[typeName]['keys']:
sqlORList.append("`%s`.`value` = %s" % (_getTableName("key", typeName, keyName), keyValue))
else:
sqlORList.append("`%s`.`%s` = %s" % (tableName, keyName, keyValue))
sqlCondList.append("( %s )" % " OR ".join(sqlORList))
if sqlCondList:
cmd += " AND %s" % " AND ".join(sqlCondList)
# Calculate grouping and sorting
for preGenFields in (groupFields, orderFields):
if preGenFields:
for i in range(len(preGenFields[1])):
field = preGenFields[1][i]
if field in self.dbCatalog[typeName]['keys']:
List.appendUnique(sqlLinkList, "`%s`.`%s` = `%s`.`id`" % (tableName,
field,
_getTableName("key", typeName, field)
))
if preGenFields[0] != "%s":
# The default grouping was changed
preGenFields[1][i] = "`%s`.Value" % _getTableName("key", typeName, field)
else:
# The default grouping is maintained
preGenFields[1][i] = "`%s`.`%s`" % (tableName, field)
elif field in ['bucketLength', 'entriesInBucket']: # these are not in the dbCatalog
preGenFields[1][i] = "`%s`.`%s`" % (tableName, field)
if sqlLinkList:
cmd += " AND %s" % " AND ".join(sqlLinkList)
if groupFields:
cmd += " GROUP BY %s" % (groupFields[0] % tuple(groupFields[1]))
if orderFields:
cmd += " ORDER BY %s" % (orderFields[0] % tuple(orderFields[1]))
self.log.verbose(cmd)
return self._query(cmd, conn=connObj)
def compactBuckets(self, typeFilter=False):
"""
Compact buckets for all defined types
"""
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
gSynchro.lock()
try:
if self.__doingCompaction:
return S_OK()
self.__doingCompaction = True
finally:
gSynchro.unlock()
slow = True
for typeName in self.dbCatalog:
if typeFilter and typeName.find(typeFilter) == -1:
self.log.info("[COMPACT] Skipping %s" % typeName)
continue
if self.dbCatalog[typeName]['dataTimespan'] > 0:
self.log.info("[COMPACT] Deleting records older that timespan for type %s" % typeName)
self.__deleteRecordsOlderThanDataTimespan(typeName)
self.log.info("[COMPACT] Compacting %s" % typeName)
if slow:
self.__slowCompactBucketsForType(typeName)
else:
self.__compactBucketsForType(typeName)
self.log.info("[COMPACT] Compaction finished")
self.__lastCompactionEpoch = int(Time.toEpoch())
gSynchro.lock()
try:
if self.__doingCompaction:
self.__doingCompaction = False
finally:
gSynchro.unlock()
return S_OK()
def __selectForCompactBuckets(self, typeName, timeLimit, bucketLength, nextBucketLength, connObj=False):
"""
Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents
"""
tableName = _getTableName("bucket", typeName)
selectSQL = "SELECT "
sqlSelectList = []
for field in self.dbCatalog[typeName]['keys']:
sqlSelectList.append("`%s`.`%s`" % (tableName, field))
for field in self.dbCatalog[typeName]['values']:
sqlSelectList.append("SUM( `%s`.`%s` )" % (tableName, field))
sqlSelectList.append("SUM( `%s`.`entriesInBucket` )" % (tableName))
sqlSelectList.append("MIN( `%s`.`startTime` )" % tableName)
sqlSelectList.append("MAX( `%s`.`startTime` )" % tableName)
selectSQL += ", ".join(sqlSelectList)
selectSQL += " FROM `%s`" % tableName
selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % (tableName, timeLimit)
selectSQL += " `%s`.`bucketLength` = %s" % (tableName, bucketLength)
# MAGIC bucketing
sqlGroupList = [_bucketizeDataField("`%s`.`startTime`" % tableName, nextBucketLength)]
for field in self.dbCatalog[typeName]['keys']:
sqlGroupList.append("`%s`.`%s`" % (tableName, field))
selectSQL += " GROUP BY %s" % ", ".join(sqlGroupList)
return self._query(selectSQL, conn=connObj)
def __deleteForCompactBuckets(self, typeName, timeLimit, bucketLength, connObj=False):
"""
Delete compacted buckets
"""
tableName = _getTableName("bucket", typeName)
deleteSQL = "DELETE FROM `%s` WHERE " % tableName
deleteSQL += "`%s`.`startTime` < '%s' AND " % (tableName, timeLimit)
deleteSQL += "`%s`.`bucketLength` = %s" % (tableName, bucketLength)
return self._update(deleteSQL, conn=connObj)
def __compactBucketsForType(self, typeName):
"""
Compact all buckets for a given type
"""
nowEpoch = Time.toEpoch()
#retVal = self.__startTransaction( connObj )
# if not retVal[ 'OK' ]:
# return retVal
for bPos in range(len(self.dbBucketsLength[typeName]) - 1):
self.log.info("[COMPACT] Query %d of %d" % (bPos + 1, len(self.dbBucketsLength[typeName]) - 1))
secondsLimit = self.dbBucketsLength[typeName][bPos][0]
bucketLength = self.dbBucketsLength[typeName][bPos][1]
timeLimit = (nowEpoch - nowEpoch % bucketLength) - secondsLimit
nextBucketLength = self.dbBucketsLength[typeName][bPos + 1][1]
self.log.info(
"[COMPACT] Compacting data newer that %s with bucket size %s" %
(Time.fromEpoch(timeLimit), bucketLength))
# Retrieve the data
retVal = self.__selectForCompactBuckets(typeName, timeLimit, bucketLength, nextBucketLength)
if not retVal['OK']:
#self.__rollbackTransaction( connObj )
return retVal
bucketsData = retVal['Value']
self.log.info("[COMPACT] Got %d records to compact" % len(bucketsData))
if len(bucketsData) == 0:
continue
retVal = self.__deleteForCompactBuckets(typeName, timeLimit, bucketLength)
if not retVal['OK']:
#self.__rollbackTransaction( connObj )
return retVal
self.log.info(
"[COMPACT] Compacting %s records %s seconds size for %s" %
(len(bucketsData), bucketLength, typeName))
# Add data
for record in bucketsData:
startTime = record[-2]
endTime = record[-1]
valuesList = record[:-2]
retVal = self.__splitInBuckets(typeName, startTime, endTime, valuesList)
if not retVal['OK']:
#self.__rollbackTransaction( connObj )
self.log.error("[COMPACT] Error while compacting data for record", "%s: %s" % (typeName, retVal['Value']))
self.log.info("[COMPACT] Finished compaction %d of %d" % (bPos, len(self.dbBucketsLength[typeName]) - 1))
# return self.__commitTransaction( connObj )
return S_OK()
def __slowCompactBucketsForType(self, typeName):
"""
Compact all buckets for a given type
"""
nowEpoch = Time.toEpoch()
for bPos in range(len(self.dbBucketsLength[typeName]) - 1):
self.log.info("[COMPACT] Query %d of %d" % (bPos, len(self.dbBucketsLength[typeName]) - 1))
secondsLimit = self.dbBucketsLength[typeName][bPos][0]
bucketLength = self.dbBucketsLength[typeName][bPos][1]
timeLimit = (nowEpoch - nowEpoch % bucketLength) - secondsLimit
self.log.info("[COMPACT] Compacting data newer that %s with bucket size %s for %s" % (
Time.fromEpoch(timeLimit),
bucketLength,
typeName))
querySize = 10000
previousRecordsSelected = querySize
totalCompacted = 0
while previousRecordsSelected == querySize:
# Retrieve the data
self.log.info("[COMPACT] Retrieving buckets to compact newer that %s with size %s" % (
Time.fromEpoch(timeLimit),
bucketLength))
roundStartTime = time.time()
result = self.__selectIndividualForCompactBuckets(typeName, timeLimit, bucketLength,
querySize)
if not result['OK']:
#self.__rollbackTransaction( connObj )
return result
bucketsData = result['Value']
previousRecordsSelected = len(bucketsData)
selectEndTime = time.time()
self.log.info("[COMPACT] Got %d buckets (%d done) (took %.2f secs)" % (previousRecordsSelected,
totalCompacted,
selectEndTime - roundStartTime))
if len(bucketsData) == 0:
break
result = self.__deleteIndividualForCompactBuckets(typeName, bucketsData)
if not result['OK']:
#self.__rollbackTransaction( connObj )
return result
bucketsData = result['Value']
deleteEndTime = time.time()
self.log.info("[COMPACT] Deleted %s out-of-bounds buckets (took %.2f secs)" % (len(bucketsData),
deleteEndTime - selectEndTime))
# Add data
for record in bucketsData:
startTime = record[-2]
endTime = record[-2] + record[-1]
valuesList = record[:-2]
retVal = self.__splitInBuckets(typeName, startTime, endTime, valuesList)
if not retVal['OK']:
self.log.error("[COMPACT] Error while compacting data for buckets", "%s: %s" % (typeName, retVal['Value']))
totalCompacted += len(bucketsData)
insertElapsedTime = time.time() - deleteEndTime
self.log.info("[COMPACT] Records compacted (took %.2f secs, %.2f secs/bucket)" %
(insertElapsedTime, insertElapsedTime / len(bucketsData)))
self.log.info("[COMPACT] Finised compaction %d of %d" % (bPos, len(self.dbBucketsLength[typeName]) - 1))
# return self.__commitTransaction( connObj )
return S_OK()
def __selectIndividualForCompactBuckets(self, typeName, timeLimit, bucketLength, querySize, connObj=False):
"""
Nasty SQL query to get ideal buckets using grouping by date calculations and adding value contents
"""
tableName = _getTableName("bucket", typeName)
selectSQL = "SELECT "
sqlSelectList = []
for field in self.dbCatalog[typeName]['keys']:
sqlSelectList.append("`%s`.`%s`" % (tableName, field))
for field in self.dbCatalog[typeName]['values']:
sqlSelectList.append("`%s`.`%s`" % (tableName, field))
sqlSelectList.append("`%s`.`entriesInBucket`" % (tableName))
sqlSelectList.append("`%s`.`startTime`" % tableName)
sqlSelectList.append("`%s`.bucketLength" % (tableName))
selectSQL += ", ".join(sqlSelectList)
selectSQL += " FROM `%s`" % tableName
selectSQL += " WHERE `%s`.`startTime` < '%s' AND" % (tableName, timeLimit)
selectSQL += " `%s`.`bucketLength` = %s" % (tableName, bucketLength)
# MAGIC bucketing
selectSQL += " LIMIT %d" % querySize
return self._query(selectSQL, conn=connObj)
def __deleteIndividualForCompactBuckets(self, typeName, bucketsData, connObj=False):
"""
Delete compacted buckets
"""
tableName = _getTableName("bucket", typeName)
keyFields = self.dbCatalog[typeName]['keys']
deleteQueryLimit = 50
deletedBuckets = []
for bLimit in range(0, len(bucketsData), deleteQueryLimit):
delCondsSQL = []
for record in bucketsData[bLimit: bLimit + deleteQueryLimit]:
condSQL = []
for iPos in range(len(keyFields)):
field = keyFields[iPos]
condSQL.append("`%s`.`%s` = %s" % (tableName, field, record[iPos]))
condSQL.append("`%s`.`startTime` = %d" % (tableName, record[-2]))
condSQL.append("`%s`.`bucketLength` = %d" % (tableName, record[-1]))
delCondsSQL.append("(%s)" % " AND ".join(condSQL))
delSQL = "DELETE FROM `%s` WHERE %s" % (tableName, " OR ".join(delCondsSQL))
result = self._update(delSQL, conn=connObj)
if not result['OK']:
self.log.error("Cannot delete individual records for compaction", result['Message'])
else:
deletedBuckets.extend(bucketsData[bLimit: bLimit + deleteQueryLimit])
return S_OK(deletedBuckets)
def __deleteRecordsOlderThanDataTimespan(self, typeName):
"""
IF types define dataTimespan, then records older than datatimespan seconds will be deleted
automatically
"""
dataTimespan = self.dbCatalog[typeName]['dataTimespan'] + self.dbBucketsLength[typeName][-1][1]
if dataTimespan < 86400 * 30:
return
for table, field in ((_getTableName("type", typeName), 'endTime'),
(_getTableName("bucket", typeName), 'startTime')):
self.log.info("[COMPACT] Deleting old records for table %s" % table)
deleteLimit = 100000
deleted = deleteLimit
while deleted >= deleteLimit:
sqlCmd = "DELETE FROM `%s` WHERE %s < UNIX_TIMESTAMP()-%d LIMIT %d" % (table, field, dataTimespan, deleteLimit)
result = self._update(sqlCmd)
if not result['OK']:
self.log.error("[COMPACT] Cannot delete old records", "Table: %s Timespan: %s Error: %s" % (
table,
dataTimespan,
result['Message']
))
break
self.log.info("[COMPACT] Deleted %d records for %s table" % (result['Value'], table))
deleted = result['Value']
time.sleep(1)
def regenerateBuckets(self, typeName):
if self.__readOnly:
return S_ERROR("ReadOnly mode enabled. No modification allowed")
# Delete old entries if any
if self.dbCatalog[typeName]['dataTimespan'] > 0:
self.log.info("[REBUCKET] Deleting records older that timespan for type %s" % typeName)
self.__deleteRecordsOlderThanDataTimespan(typeName)
self.log.info("[REBUCKET] Done deleting old records")
rawTableName = _getTableName("type", typeName)
#retVal = self.__startTransaction( connObj )
# if not retVal[ 'OK' ]:
# return retVal
self.log.info("[REBUCKET] Deleting buckets for %s" % typeName)
retVal = self._update("DELETE FROM `%s`" % _getTableName("bucket", typeName))
if not retVal['OK']:
return retVal
# Generate the common part of the query
# SELECT fields
startTimeTableField = "`%s`.startTime" % rawTableName
endTimeTableField = "`%s`.endTime" % rawTableName
# Select strings and sum select strings
sqlSUMSelectList = []
sqlSelectList = []
for field in self.dbCatalog[typeName]['keys']:
sqlSUMSelectList.append("`%s`.`%s`" % (rawTableName, field))
sqlSelectList.append("`%s`.`%s`" % (rawTableName, field))
for field in self.dbCatalog[typeName]['values']:
sqlSUMSelectList.append("SUM( `%s`.`%s` )" % (rawTableName, field))
sqlSelectList.append("`%s`.`%s`" % (rawTableName, field))
sumSelectString = ", ".join(sqlSUMSelectList)
selectString = ", ".join(sqlSelectList)
# Grouping fields
sqlGroupList = []
for field in self.dbCatalog[typeName]['keys']:
sqlGroupList.append("`%s`.`%s`" % (rawTableName, field))
groupingString = ", ".join(sqlGroupList)
# List to contain all queries
sqlQueries = []
dateInclusiveConditions = []
countedField = "`%s`.`%s`" % (rawTableName, self.dbCatalog[typeName]['keys'][0])
lastTime = Time.toEpoch()
# Iterate for all ranges
for iRange in range(len(self.dbBucketsLength[typeName])):
bucketTimeSpan = self.dbBucketsLength[typeName][iRange][0]
bucketLength = self.dbBucketsLength[typeName][iRange][1]
startRangeTime = lastTime - bucketTimeSpan
endRangeTime = lastTime
lastTime -= bucketTimeSpan
bucketizedStart = _bucketizeDataField(startTimeTableField, bucketLength)
bucketizedEnd = _bucketizeDataField(endTimeTableField, bucketLength)
timeSelectString = "MIN(%s), MAX(%s)" % (startTimeTableField,
endTimeTableField)
# Is the last bucket?
if iRange == len(self.dbBucketsLength[typeName]) - 1:
whereString = "%s <= %d" % (endTimeTableField,
endRangeTime)
else:
whereString = "%s > %d AND %s <= %d" % (
startTimeTableField,
startRangeTime,
endTimeTableField,
endRangeTime)
sameBucketCondition = "(%s) = (%s)" % (bucketizedStart, bucketizedEnd)
# Records that fit in a bucket
sqlQuery = "SELECT %s, %s, COUNT(%s) FROM `%s` WHERE %s AND %s GROUP BY %s, %s" % (
timeSelectString,
sumSelectString,
countedField,
rawTableName,
whereString,
sameBucketCondition,
groupingString,
bucketizedStart)
sqlQueries.append(sqlQuery)
# Records that fit in more than one bucket
sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE %s AND NOT %s" % (startTimeTableField,
endTimeTableField,
selectString,
rawTableName,
whereString,
sameBucketCondition
)
sqlQueries.append(sqlQuery)
dateInclusiveConditions.append("( %s )" % whereString)
# Query for records that are in between two ranges
sqlQuery = "SELECT %s, %s, %s, 1 FROM `%s` WHERE NOT %s" % (
startTimeTableField,
endTimeTableField,
selectString,
rawTableName,
" AND NOT ".join(dateInclusiveConditions)
)
sqlQueries.append(sqlQuery)
self.log.info("[REBUCKET] Retrieving data for rebuilding buckets for type %s..." % (typeName))
queryNum = 0
for sqlQuery in sqlQueries:
self.log.info("[REBUCKET] Executing query #%s..." % queryNum)
queryNum += 1
retVal = self._query(sqlQuery)
if not retVal['OK']:
self.log.error("[REBUCKET] Can't retrieve data for rebucketing", retVal['Message'])
#self.__rollbackTransaction( connObj )
return retVal
rawData = retVal['Value']
self.log.info("[REBUCKET] Retrieved %s records" % len(rawData))
rebucketedRecords = 0
startQuery = time.time()
startBlock = time.time()
numRecords = len(rawData)
for entry in rawData:
startT = entry[0]
endT = entry[1]
values = entry[2:]
retVal = self.__splitInBuckets(typeName, startT, endT, values)
if not retVal['OK']:
#self.__rollbackTransaction( connObj )
return retVal
rebucketedRecords += 1
if rebucketedRecords % 1000 == 0:
queryAvg = rebucketedRecords / float(time.time() - startQuery)
blockAvg = 1000 / float(time.time() - startBlock)
startBlock = time.time()
perDone = 100 * rebucketedRecords / float(numRecords)
expectedEnd = str(datetime.timedelta(seconds=int((numRecords - rebucketedRecords) / blockAvg)))
self.log.info("[REBUCKET] Rebucketed %.2f%% %s (%.2f r/s block %.2f r/s query | ETA %s )..." %
(perDone, typeName, blockAvg, queryAvg, expectedEnd))
# return self.__commitTransaction( connObj )
return S_OK()
def __startTransaction(self, connObj):
return self._query("START TRANSACTION", conn=connObj)
def __commitTransaction(self, connObj):
return self._query("COMMIT", conn=connObj)
def __rollbackTransaction(self, connObj):
return self._query("ROLLBACK", conn=connObj)
def _bucketizeDataField(dataField, bucketLength):
return "%s - ( %s %% %s )" % (dataField, dataField, bucketLength)
def _getTableName(tableType, typeName, keyName=None):
"""
Generate table name
"""
if not keyName:
return "ac_%s_%s" % (tableType, typeName)
elif tableType == "key":
return "ac_%s_%s_%s" % (tableType, typeName, keyName)
else:
raise Exception("Call to _getTableName with tableType as key but with no keyName")
|
arrabito/DIRAC
|
AccountingSystem/DB/AccountingDB.py
|
Python
|
gpl-3.0
| 66,426
|
[
"DIRAC"
] |
1fc92955bdd050ab14ee6b97c21ed86cf9fcdda925c6ec77d3369760c4bc4e27
|
# -*- coding: utf-8 -*-
#This is generated code - do not edit
encoding = 'utf-8'
dict = {
' of ': ' van ',
'&About...': '&Info...',
'&Close Document': 'Document &sluiten',
'&Comment Region': '&Commentaar toevoegen',
'&Delete Window': '&Venster verwijderen',
'&Describe Action': 'Actie beschrijven',
'&Describe Key': 'Toets beschrijven',
'&Execute Action': 'Actie uitvoeren',
'&Execute Macro': 'Macro uitvoeren',
'&Folding': '&Samenvouwen',
'&Line Numbers': '&Regelnummers',
'&Line Wrapping': '&Regelafbraak',
'&New Window': '&Nieuw venster',
'&Open Sample Graphviz dot file': '&Open Graphviz dot file voorbeeld',
'&Open Sample Python': '&Open Python-voorbeeld',
'&Preferences...': '&Voorkeuren...',
'&Revert': '&Terugdraaien',
'&Save...': '&Opslaan...',
'&Show Key Bindings': '&Toon Toetstoewijzingen',
'&Show Toolbars': '&Toon Werkbalken',
'&Tabify': '&Voeg tabs in',
'&Uncomment Region': '&Verwijder commentaar',
'&Untabify': '&Verwijder tabs',
'&Word Count': '&Woorden tellen',
'&Wrap Words': '&Breek woorden af',
'. Do you wish to continue?': '. Wilt u doorgaan?',
'Abort': 'Onderbreek',
'About this program': 'Over dit programma',
'Act on the marked buffers according to their flags': 'Verwerk de gemarkeerde buffers volgens hun attributen',
'Actions': 'Acties',
'Add ChangeLog Entry': 'Maak nieuw ChangeLog item',
'Add new ChangeLog entry to the top of the ChangeLog': 'Plaats nieuw ChangeLog item bovenaan het ChangeLog',
'Attributes': 'Attributen',
'Background': 'Achtergrond',
'Bad input': 'Foute invoer',
'Cancel': 'Annuleren',
'Cancel Minibuffer': 'Annuleer Minibuffer',
'Capitalize': 'Elk woord met hoofdletter beginnen',
'Case': 'Hoofd-/kleine letter',
'Clear Flags': 'Verwijder attributen',
'Clear Playlist': 'Afspeellijst leegmaken',
'Clear all flags from the selected item(s)': 'Verwijder alle attributen van geselecteerd(e) item(s)',
'Close Tab': 'Tabblad sluiten',
'Close the current tab': 'Sluit het huidige tabblad',
'Color': 'Kleur',
'Contributions by:': 'Bijdragen van:',
'Copy': 'Kopi\xc3\xabren',
'Cut': 'Knippen',
'Debug': 'Fouten zoeken',
'Decrease Volume': 'Volume verlagen',
'Decrease the volume': 'Verlaag het volume',
'Delete Playlist Entry': 'Verwijder afspeellijst-item',
'Delete current window': 'Verwijder huidig venster',
'Delete selected songs from playlist': 'Verwijder geselecteerde nummers uit afspeellijst',
'Describe an action by name': 'Beschrijf een actie op basis van naam',
'Display a list of all buffers': 'Geef lijst weer van alle buffers',
'Documents': 'Documenten',
'Downcase': 'Kleine letters',
'E&xit': '&Afsluiten',
'EOL Characters': 'Regeleindekarakters',
'Edit': 'Bewerken',
'Enter a hex color value': 'Voer een hexadecimale kleurwaarde in',
'Execute an action by name': 'Voer een actie uit op basis van naam',
'Export': 'Exporteren',
'Fast test of the progress bar': 'Snelle test van de voortgangsbalk',
'File': 'Bestand',
'Fill Paragraph': 'Vul alinea',
'Find...': 'Zoeken...',
'Floating Point': 'Drijvende komma (Floating point)',
'Focal Plane View': 'Focaal vlak Weergave',
'Font': 'Lettertype',
'Font Settings': 'Lettertype-instellingen',
'Foreground': 'Voorgrond',
'Garbage Objects': 'Garbage objecten',
'General': 'Algemeen',
'Goto Band': 'Ga naar Band',
'Goto Line...': 'Ga naar regel...',
'Goto Offset...': 'Ga naar Offset...',
'Goto a line in the text': 'Ga naar een regel in de tekst',
'Goto an offset': 'Ga naar een offset',
'Hangman': 'Galgje',
'Hello World Action': 'Hello World Actie',
'Image View': 'Beeldweergave',
'Increase Volume': 'Volume verhogen',
'Increase the volume': 'Verhoog het volume',
'Indent the next line following a return': 'Spring volgende regel na een return in',
'Input:': 'Invoer:',
"Insert 'Hello, world' at the current cursor position": "Voer 'Hello, world' in op huidige cursorpositie",
'Line Endings': 'Regeleinden',
'List All Documents': 'Toon lijst van alle documenten',
'Login': 'Aanmelden',
'Major Mode': 'Major modus',
'Mark for Deletion': 'Markeer voor verwijderen',
'Mark for Deletion and Move Backwards': 'Markeer voor verwijderen en ga verder naar achteren',
'Mark for Display': 'Markeer voor weergave',
'Mark for Display and Move Backwards': 'Markeer voor weergave en ga verder naar achteren',
'Mark for Save': 'Markeer voor opslaan',
'Mark for Save and Move Backwards': 'Markeer voor opslaan en ga verder naar achteren',
'Mark the selected buffer for deletion': 'Markeer geselecteerde buffer voor verwijderen',
'Mark the selected buffer for deletion and move to the previous item': 'Markeer de geselecteerde buffer voor verwijderen en ga verder naar het vorige item',
'Mark the selected buffer to be displayed': 'Markeer geselecteerde buffer voor weergave',
'Mark the selected buffer to be displayed and move to the previous item': 'Markeer geselecteerde buffer voor weergave en ga verder naar vorige item',
'Mark the selected buffer to be saved': 'Markeer geselecteerde buffer voor opslaan',
'Median Filter': 'Mediaanfilter',
'Minor Modes': 'Minor modi',
'Modes': 'Modi',
'Move the selection to the next item in the list': 'Verplaats de selectie naar het volgende item in de lijst',
'Move the selection to the previous item in the list': 'Verplaats de selectie naar het vorige item in de lijst',
'Move to Next Item': 'Ga verder naar volgende item',
'Move to Previous Item': 'Ga naar vorige item',
'Mute': 'Dempen',
'Mute the volume': 'Demp het volume',
'New': 'Nieuw',
'New Tab': 'Nieuw tabblad',
'New plain text file': 'Nieuw platte tekst bestand',
'Next Band': 'Volgende Band',
'Next Song': 'Volgende nummer',
'Not a numeric expression': 'Geen numerieke expressie',
'Not an integer expression': 'Geen integer expressie',
'Open': 'Openen',
'Open File Using Minibuffer...': 'Bestand openen met minibuffer...',
'Open File...': 'Bestand openen...',
'Open Recent': 'Recent geopend',
'Open URL Using Minibuffer...': 'Open URL met minibuffer...',
'Open a Hex Editor': 'Open een hex-editor',
'Open a file': 'Bestand openen',
'Open a file using URL name completion': 'Open een bestand met URL-naamvoltooiing',
'Open a file using filename completion': 'Open een bestand met automatische bestandsnaamvoltooiing',
'Open a new tab': 'Nieuw tabblad openen',
'Open a new window': 'Nieuw venster openen',
'Open a sample Graphviz file': 'Open een Graphviz-voorbeeldbestand',
'Open a sample Python file': 'Open een Python-voorbeeldbestand',
'Open an Image Viewer': 'Open een Image Viewer-voorbeeldbestand',
'Open an MPD server through a URL': 'Open een MPD-server via een URL',
"Open the STC Style Editor to edit the current mode's text display": 'Open de STC-Stijleditor om de huidige tekstweergave te veranderen',
'Open the wxPython widget inspector': 'Open de wxPython widget inspector',
'Paste': 'Plakken',
'Paste at Column': 'Plakken in kolom',
'Play/Pause Song': 'Afspelen/Pauzeren Nummer',
'Plugins': 'Invoegtoepassingen',
'Preferences, settings, and configurations...': 'Voorkeuren, instellingen en configuraties...',
'Prev Band': 'Vorige Band',
'Prev Song': 'Vorig Nummer',
'Preview': 'Voorvertoning',
'Previous Song': 'Vorig Nummer',
'Project Homepage': 'Project website',
'Quit the program': 'Sluit het programma',
'Record Format...': 'Opnameformaat...',
'Redo': 'Herhalen',
'Refresh': 'Vernieuwen',
'Refresh the current view to show any changes': 'Vernieuw de huidige weergave om veranderingen weer te geven',
'Reindent': 'Opnieuw inspringen',
'Remove all songs from the current playlist': 'Verwijder alle nummers van de huidige afspeellijst',
'Replace Buffer': 'Vervang Buffer',
'Replace...': 'Vervangen...',
'Report a bug': 'Rapporteer een bug',
'Rescan the filesystem and update the MPD database': 'Herscan het bestandssysteem en update de MPD-database',
'Restart Game': 'Herstart Spel',
'Revert to last saved version': 'Keer terug naar laatst opgeslagen versie',
'Run': 'Uitvoeren',
'Run this script through the interpreter': 'Verwerk dit script door de interpreter',
'Run with Args': 'Voer uit met argumenten',
'Running Jobs': 'Lopende taken',
'Same Major Mode': 'Zelfde Major-modus',
'Samples': 'Voorbeelden',
'Save &As...': 'Opslaan &Als...',
'Save Styles': 'Bewaar stijlen',
'Save or Delete Marked Buffers': 'Opslaan of Verwijderen Gemarkeerde Buffers',
'Save the current file': 'Huidige bestand opslaan',
'Save to URL Using Minibuffer...': 'Opslaan naar URL met minibuffer',
'Search for a string in the text': 'Zoek naar een tekenreeks in de tekst',
'Select All': 'Alles selecteren',
'Select Rect': 'Selecteer vierhoek',
'Select rectangular region': 'Selecteer vierhoekig gebied',
'Set the preview file type': 'Stel voorbeeldlettertype in',
'Shift &Left': 'Verschuif &Links',
'Shift &Right': 'Verschuif %Rechts',
'Show Buffer': 'Toon Buffer',
'Show Hex Digits': 'Toon Hexadecimale Cijfers',
'Show Line Style': 'Toon Regelstijl',
'Show Pixel Values': 'Toon Pixelwaarden',
'Show Record Numbers': 'Toon Recordnummers',
'Show the buffer in a new tab': 'Toon de buffer in een nieuw tabblad',
'Show the buffer in place of this tab': 'Toon de buffer in plaats van dit tabblad',
'Show the styling information of the current line': 'Toon de stijlinformatie van de huidige regel',
'Show uncollectable objects': 'Toon onverzamelbare objecten',
'Sidebars': 'Zijbalken',
'Size': 'Grootte',
'Slow test of the progress bar': 'Trage test van de voorgangsbalk',
'Some styles have been changed would you like to save before exiting?': 'Bepaalde stijlen zijn gewijzigd. Wil je de wijzigingen opslaan voor afsluiten?',
'Sort Order': 'Sorteervolgorde',
'Start a blank new style': 'Open een nieuwe stijl',
'Start a hangman game': 'Start een galgjespel',
'Stop the currently running script': 'Stop het huidig draaiende script',
'Style Editor': 'Stijleditor',
'Style Tags': 'Stijlmarkeringen',
'Style Theme': 'Stijlthema',
'Syntax Files': 'Syntaxbestanden',
'Tests': 'Testen',
'Text': 'Tekst',
'Text Styles...': 'Tekststijlen...',
'Text file': 'Tekstbestand',
'Tools': 'Extra',
'Transform': 'Transformeren',
'Undo': 'Ongedaan maken',
'Upcase': 'Hoofdletters',
'View': 'Weergave',
'View Direction': 'Leesrichting',
'Window': 'Venster',
'Write to a new URL using name completion': 'Schrijf naar een nieuwe URL met gebruikmaking van naamvoltoo\xc3\xafng',
'Zoom In': 'Inzoomen',
'Zoom Out': 'Uitzoomen',
'Zoom in (magnify) image': 'Zoom in (vergroot) op afbeelding',
'Zoom out (demagnify) image': 'Zoom uit (verklein) op afbeelding',
'bold': 'vetgedrukt',
'eol': 'einde regel',
'hangman': 'galgje',
'italic': 'cursief',
'restart-game': 'herstart-spel',
'underline': 'onderstrepen',
'unknown': 'onbekend',
}
|
robmcmullen/peppy
|
peppy/i18n/nl.py
|
Python
|
gpl-2.0
| 10,493
|
[
"Elk"
] |
6a1254bd41c493609545a7c1d3783c7ac5fce474a84eb2565c4525d06874e969
|
# BOLD monitoring example to demonstrate the two-input model
#
# please note: This example is just intented to demonstrate the coupling between the source and input variables.
# The coupling used in this script are not based on biological constraints.
#
# More details can be found in the recent article: TODO
#
# author: Helge Uelo Dinkelbach, Oliver Maith
from ANNarchy import *
from ANNarchy.extensions.bold import *
import matplotlib.pyplot as plt
# Two populations of 100 izhikevich neurons
pop0 = Population(100, neuron=Izhikevich)
pop1 = Population(100, neuron=Izhikevich)
# Set noise to create some baseline activity
pop0.noise = 5.0; pop1.noise = 5.0
# Compute mean firing rate in Hz on 100ms window
pop0.compute_firing_rate(window=100.0)
pop1.compute_firing_rate(window=100.0)
# Create required monitors
mon_pop0 = Monitor(pop0, ["r"], start=False)
mon_pop1 = Monitor(pop1, ["r"], start=False)
m_bold = BoldMonitor(
populations = [pop0, pop1], # recorded populations
bold_model = balloon_two_inputs(), # BOLD model to use
# mean firing rate as source variable coupled to the input variable I_CBF
# membrane potential as source variable coupled to the input variable I_CMRO2
mapping={'I_CBF': 'r','I_CMRO2': 'v'},
normalize_input=2000, # time window to compute the baseline
recorded_variables=["I_CBF", "I_CMRO2", "BOLD"]
)
# Compile and initialize the network
compile()
# Ramp up time
simulate(1000)
# Start recording
mon_pop0.start()
mon_pop1.start()
m_bold.start()
# we manipulate the noise for the half of the neurons
simulate(5000) # 5s with low noise
pop0.noise = 7.5
simulate(5000) # 5s with higher noise (one population)
pop0.noise = 5
simulate(10000) # 10s with low noise
# retrieve the recordings
mean_fr1 = np.mean(mon_pop0.get("r"), axis=1)
mean_fr2 = np.mean(mon_pop1.get("r"), axis=1)
If_data = m_bold.get("I_CBF")
Ir_data = m_bold.get("I_CMRO2")
bold_data = m_bold.get("BOLD")
# An example evaluation, which consists of:
# A) the mean firing activity
# B) the recorded activity which serves as input to BOLD
# C) the resulting BOLD signal
plt.figure(figsize=(20,6))
grid = plt.GridSpec(1, 3, left=0.05, right=0.95)
# mean firing rate
ax1 = plt.subplot(grid[0, 0])
ax1.plot(mean_fr1, label="pop0")
ax1.plot(mean_fr2, label="pop1")
plt.legend()
ax1.set_ylabel("average mean firing rate [Hz]", fontweight="bold", fontsize=18)
# BOLD input signal
ax2 = plt.subplot(grid[0, 1])
ax2.plot(If_data, label='I_CBF')
ax2.plot(Ir_data, label='I_CMRO2')
ax2.set_ylabel("BOLD input variables", fontweight="bold", fontsize=18)
ax2.legend()
# BOLD input signal as percent
ax3 = plt.subplot(grid[0, 2])
ax3.plot(bold_data*100.0)
ax3.set_ylabel("BOLD [%]", fontweight="bold", fontsize=18)
# x-axis labels as seconds
for ax in [ax1, ax2, ax3]:
ax.set_xticks(np.arange(0,21,2)*1000)
ax.set_xticklabels(np.arange(0,21,2))
ax.set_xlabel("time [s]", fontweight="bold", fontsize=18)
plt.show()
|
ANNarchy/ANNarchy
|
examples/bold_monitor/BOLD_two_inputs.py
|
Python
|
gpl-2.0
| 3,003
|
[
"NEURON"
] |
c02037a89b851eae13bde93ce1def93d7b16751c43e1709934b79cf40f7733e2
|
################################################################################
# Copyright (C) 2011-2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Functions for plotting nodes.
Functions
=========
.. currentmodule:: bayespy.plot
.. autosummary::
:toctree: generated/
pdf
contour
plot
hinton
gaussian_mixture_2d
Plotters
========
.. autosummary::
:toctree: generated/
Plotter
PDFPlotter
ContourPlotter
HintonPlotter
FunctionPlotter
GaussianTimeseriesPlotter
CategoricalMarkovChainPlotter
"""
import os, sys
############################################################################
# A STUPID WORKAROUND FOR A MATPLOTLIB 1.4.0 BUG RELATED TO INTERACTIVE MODE
# See: https://github.com/matplotlib/matplotlib/issues/3505
import __main__
if hasattr(__main__, '__file__'):
sys.ps1 = ('WORKAROUND FOR A BUG #3505 IN MATPLOTLIB.\n'
'IF YOU SEE THIS MESSAGE, TRY MATPLOTLIB!=1.4.0.')
# This workaround does not work on Python shell, only on stand-alone scripts
# and IPython. A better solution: require MPL!=1.4.0.
#############################################################################
import numpy as np
import scipy.sparse as sp
import scipy
from scipy import special
import matplotlib.pyplot as plt
from matplotlib import animation
#from matplotlib.pyplot import *
from bayespy.inference.vmp.nodes.categorical import CategoricalMoments
from bayespy.inference.vmp.nodes.gaussian import (GaussianMoments,
GaussianWishartMoments)
from bayespy.inference.vmp.nodes.beta import BetaMoments
from bayespy.inference.vmp.nodes.beta import DirichletMoments
from bayespy.inference.vmp.nodes.bernoulli import BernoulliMoments
from bayespy.inference.vmp.nodes.categorical import CategoricalMoments
from bayespy.inference.vmp.nodes.gamma import GammaMoments
from bayespy.inference.vmp.nodes.node import Node, Moments
from bayespy.utils import (misc,
random,
linalg)
# Users can use pyplot via this module
import matplotlib
mpl = matplotlib
pyplot = plt
def interactive(function):
"""A decorator for forcing functions to use the interactive mode.
Parameters
----------
function : callable
The function to be decorated
"""
def new_function(*args, **kwargs):
if mpl.is_interactive():
was_interactive = True
else:
was_interactive = False
mpl.interactive(True)
retval = function(*args, **kwargs)
if not was_interactive:
mpl.interactive(False)
return retval
return new_function
def _subplots(plotfunc, *args, fig=None, kwargs=None):
"""Create a collection of subplots
Each subplot is created with the same plotting function.
Inputs are given as pairs:
(x, 3), (y, 2), ...
where x,y,... are the input arrays and 3,2,... are the ndim
parameters. The last ndim axes of each array are interpreted as a
single element to the plotting function.
All high-level plotting functions should wrap low-level plotting
functions with this function in order to generate subplots for
plates.
"""
if kwargs is None:
kwargs = {}
if fig is None:
fig = plt.gcf()
# Parse shape and plates of each input array
shapes = [np.shape(x)[-n:] if n > 0 else ()
for (x,n) in args]
plates = [np.shape(x)[:-n] if n > 0 else np.shape(x)
for (x,n) in args]
# Get the full grid shape of the subplots
broadcasted_plates = misc.broadcasted_shape(*plates)
# Subplot indexing layout
M = np.prod(broadcasted_plates[-2::-2])
N = np.prod(broadcasted_plates[-1::-2])
strides_subplot = [np.prod(broadcasted_plates[(j+2)::2]) * N
if ((len(broadcasted_plates)-j) % 2) == 0 else
np.prod(broadcasted_plates[(j+2)::2])
for j in range(len(broadcasted_plates))]
# Plot each subplot
for ind in misc.nested_iterator(broadcasted_plates):
# Get the list of inputs for this subplot
broadcasted_args = []
for n in range(len(args)):
i = misc.safe_indices(ind, plates[n])
broadcasted_args.append(args[n][0][i])
# Plot the subplot using the given function
ind_subplot = np.einsum('i,i', ind, strides_subplot)
axes = fig.add_subplot(M, N, ind_subplot+1)
plotfunc(*broadcasted_args, axes=axes, **kwargs)
def pdf(Z, x, *args, name=None, axes=None, fig=None, **kwargs):
"""
Plot probability density function of a scalar variable.
Parameters
----------
Z : node or function
Stochastic node or log pdf function
x : array
Grid points
"""
# TODO: Make it possible to plot a plated variable using _subplots function.
if axes is None and fig is None:
axes = plt.gca()
else:
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
try:
lpdf = Z.logpdf(x)
except AttributeError:
lpdf = Z(x)
p = np.exp(lpdf)
retval = axes.plot(x, p, *args, **kwargs)
if name is None:
try:
name = Z.name
except AttributeError:
pass
if name:
axes.set_title(r'$q(%s)$' % (name))
axes.set_xlabel(r'$%s$' % (name))
return retval
def contour(Z, x, y, n=None, axes=None, fig=None, **kwargs):
"""
Plot 2-D probability density function of a 2-D variable.
Parameters
----------
Z : node or function
Stochastic node or log pdf function
x : array
Grid points on x axis
y : array
Grid points on y axis
"""
# TODO: Make it possible to plot a plated variable using _subplots function.
if axes is None and fig is None:
axes = plt.gca()
else:
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
XY = misc.grid(x, y)
try:
lpdf = Z.logpdf(XY)
except AttributeError:
lpdf = Z(XY)
p = np.exp(lpdf)
shape = (np.size(x), np.size(y))
X = np.reshape(XY[:,0], shape)
Y = np.reshape(XY[:,1], shape)
P = np.reshape(p, shape)
if n is not None:
levels = np.linspace(0, np.amax(P), num=n+2)[1:-1]
return axes.contour(X, Y, P, levels, **kwargs)
else:
return axes.contour(X, Y, P, **kwargs)
def plot_gaussian_mc(X, scale=2, **kwargs):
"""
Plot Gaussian Markov chain as a 1-D function
Parameters
----------
X : node
Node with Gaussian Markov chain moments.
"""
timeseries_gaussian(X, axis=-2, scale=scale, **kwargs)
def plot_bernoulli(X, axis=-1, scale=2, **kwargs):
"""
Plot Bernoulli node as a 1-D function
"""
X = X._convert(BernoulliMoments)
u_X = X.get_moments()
z = u_X[0]
return _timeseries_mean_and_error(z, None, axis=axis, **kwargs)
def plot_gaussian(X, axis=-1, scale=2, **kwargs):
"""
Plot Gaussian node as a 1-D function
Parameters
----------
X : node
Node with Gaussian moments.
axis : int
The index of the time axis.
"""
X = X._convert(GaussianMoments)
u_X = X.get_moments()
x = u_X[0]
xx = misc.get_diag(u_X[1], ndim=len(X.dims[0]))
std = scale * np.sqrt(xx - x**2)
#std = scale * np.sqrt(np.einsum('...ii->...i', xx) - x**2)
return _timeseries_mean_and_error(x, std, axis=axis, **kwargs)
def plot(Y, axis=-1, scale=2, center=False, **kwargs):
"""
Plot a variable or an array as 1-D function with errorbars
"""
if misc.is_numeric(Y):
return _timeseries_mean_and_error(Y, None, axis=axis, center=center, **kwargs)
if isinstance(Y, Node):
# Try Bernoulli plotting
try:
Y = Y._convert(BernoulliMoments)
except BernoulliMoments.NoConverterError:
pass
else:
return plot_bernoulli(Y, axis=axis, scale=scale, center=center, **kwargs)
# Try Gaussian plotting
try:
Y = Y._convert(GaussianMoments)
except GaussianMoments.NoConverterError:
pass
else:
return plot_gaussian(Y, axis=axis, scale=scale, center=center, **kwargs)
(mu, var) = Y.get_mean_and_variance()
std = np.sqrt(var)
return _timeseries_mean_and_error(mu, std,
axis=axis,
scale=scale,
center=center,
**kwargs)
# Some backward compatibility
def timeseries_gaussian_mc(*args, center=True, **kwargs):
return plot_gaussian_mc(*args, center=center, **kwargs)
def timeseries_gaussian(*args, center=True, **kwargs):
return plot_gaussian(*args, center=center, **kwargs)
timeseries_normal = timeseries_gaussian
def timeseries(*args, center=True, **kwargs):
return plot(*args, center=center, **kwargs)
def _timeseries_mean_and_error(y, std, *args, axis=-1, center=True, fig=None, **kwargs):
# TODO/FIXME: You must multiply by ones(plates) in order to plot
# broadcasted plates properly
if fig is None:
fig = plt.gcf()
y = np.atleast_1d(y)
shape = list(np.shape(y))
# Get and remove the length of the time axis
T = shape.pop(axis)
# Move time axis to first
y = np.rollaxis(y, axis)
if std is not None:
std = np.rollaxis(std, axis)
y = np.reshape(y, (T, -1))
if std is not None:
std = np.reshape(std, (T, -1))
# Remove 1s
shape = [s for s in shape if s > 1]
# Calculate number of rows and columns
shape = misc.multiply_shapes(shape, (1,1))
if len(shape) > 2:
raise Exception("Can plot only in 2 dimensions (rows and columns)")
(M, N) = shape
# Prefer plotting to rows
if M == 1:
M = N
N = 1
# Plot each timeseries
ax0 = fig.add_subplot(M, N, 1)
for i in range(M*N):
if i > 0:
# Share x axis between all subplots
ax = fig.add_subplot(M, N, i+1, sharex=ax0)
else:
ax = ax0
# Autoscale the axes to data and use tight y and x axes
ax.autoscale(enable=True, tight=True)
ax.set_ylim(auto=True)
if i < (M-1)*N:
# Remove x tick labels from other than the last row
plt.setp(ax.get_xticklabels(), visible=False)
if std is None:
errorplot(y=y[:,i], axes=ax, **kwargs)
else:
if len(args) > 0:
raise Exception("Can't handle extra arguments")
errorplot(y=y[:,i], error=std[:,i], axes=ax, **kwargs)
if center:
# Center the zero level on y-axis
ylim = ax.get_ylim()
vmax = np.max(np.abs(ylim))
ax.set_ylim([-vmax, vmax])
# Remove height space between subplots
fig.subplots_adjust(hspace=0)
def _blob(axes, x, y, area, colour):
"""
Draws a square-shaped blob with the given area (< 1) at
the given coordinates.
"""
hs = np.sqrt(area) / 2
xcorners = np.array([x - hs, x + hs, x + hs, x - hs])
ycorners = np.array([y - hs, y - hs, y + hs, y + hs])
axes.fill(xcorners, ycorners, colour, edgecolor=colour)
def _rectangle(axes, x, y, width, height, **kwargs):
_x = x - width/2
_y = y - height/2
rectangle = plt.Rectangle((_x, _y),
width,
height,
**kwargs)
axes.add_patch(rectangle)
return
def gaussian_mixture_2d(X, alpha=None, scale=2, fill=False, axes=None, **kwargs):
"""
Plot Gaussian mixture as ellipses in 2-D
Parameters
----------
X : Mixture node
alpha : Dirichlet-like node (optional)
Probabilities for the clusters
scale : float (optional)
Scale for the covariance ellipses (by default, 2)
"""
if axes is None:
axes = plt.gca()
mu_Lambda = X.parents[1]._convert(GaussianWishartMoments)
(mu, _, Lambda, _) = mu_Lambda.get_moments()
mu = np.linalg.solve(Lambda, mu)
if len(mu_Lambda.plates) != 1:
raise NotImplementedError("Not yet implemented for more plates")
K = mu_Lambda.plates[0]
width = np.zeros(K)
height = np.zeros(K)
angle = np.zeros(K)
for k in range(K):
m = mu[k]
L = Lambda[k]
(u, W) = scipy.linalg.eigh(L)
u[0] = np.sqrt(1/u[0])
u[1] = np.sqrt(1/u[1])
width[k] = 2*u[0]
height[k] = 2*u[1]
angle[k] = np.arctan(W[0,1] / W[0,0])
angle = 180 * angle / np.pi
mode_height = 1 / (width * height)
# Use cluster probabilities to adjust alpha channel
if alpha is not None:
# Compute the normalized probabilities in a numerically stable way
logsum_p = misc.logsumexp(alpha.u[0], axis=-1, keepdims=True)
logp = alpha.u[0] - logsum_p
p = np.exp(logp)
# Visibility is based on cluster mode peak height
visibility = mode_height * p
visibility /= np.amax(visibility)
else:
visibility = np.ones(K)
for k in range(K):
ell = mpl.patches.Ellipse(mu[k], scale*width[k], scale*height[k],
angle=(180+angle[k]),
fill=fill,
alpha=visibility[k],
**kwargs)
axes.add_artist(ell)
plt.axis('equal')
# If observed, plot the data too
if np.any(X.observed):
mask = np.array(X.observed) * np.ones(X.plates, dtype=np.bool)
y = X.u[0][mask]
plt.plot(y[:,0], y[:,1], 'r.')
return
def _hinton(W, error=None, vmax=None, square=True, axes=None):
"""
Draws a Hinton diagram for visualizing a weight matrix.
Temporarily disables matplotlib interactive mode if it is on,
otherwise this takes forever.
Originally copied from
http://wiki.scipy.org/Cookbook/Matplotlib/HintonDiagrams
"""
if axes is None:
axes = plt.gca()
W = misc.atleast_nd(W, 2)
(height, width) = W.shape
if not vmax:
#vmax = 2**np.ceil(np.log(np.max(np.abs(W)))/np.log(2))
if error is not None:
vmax = np.max(np.abs(W) + error)
else:
vmax = np.max(np.abs(W))
axes.fill(0.5+np.array([0,width,width,0]),
0.5+np.array([0,0,height,height]),
'gray')
axes.axis('off')
if square:
axes.axis('equal')
axes.invert_yaxis()
for x in range(width):
for y in range(height):
_x = x+1
_y = y+1
w = W[y,x]
_w = np.abs(w)
if w > 0:
_c = 'white'
else:
_c = 'black'
if error is not None:
e = error[y,x]
if e < 0:
print(e, _w, vmax)
raise Exception("BUG? Negative error")
if _w + e > vmax:
print(e, _w, vmax)
raise Exception("BUG? Value+error greater than max")
_rectangle(axes,
_x,
_y,
min(1, np.sqrt((_w+e)/vmax)),
min(1, np.sqrt((_w+e)/vmax)),
edgecolor=_c,
fill=False)
_blob(axes, _x, _y, min(1, _w/vmax), _c)
def matrix(A, axes=None):
if axes is None:
axes = plt.gca()
A = np.atleast_2d(A)
vmax = np.max(np.abs(A))
return axes.imshow(A,
interpolation='nearest',
cmap='RdBu_r',
vmin=-vmax,
vmax=vmax)
def new_matrix(A, vmax=None):
A = np.atleast_2d(A)
if vmax is None:
vmax = np.max(np.abs(A))
(M, N) = np.shape(A)
for i in range(M):
for j in range(N):
pass
def gaussian_hinton(X, rows=None, cols=None, scale=1, fig=None):
"""
Plot the Hinton diagram of a Gaussian node
"""
if fig is None:
fig = plt.gcf()
# Get mean and second moment
X = X._convert(GaussianMoments)
(x, xx) = X.get_moments()
ndim = len(X.dims[0])
shape = X.get_shape(0)
size = len(X.get_shape(0))
# Compute standard deviation
xx = misc.get_diag(xx, ndim=ndim)
std = np.sqrt(xx - x**2)
# Force explicit elements when broadcasting
x = x * np.ones(shape)
std = std * np.ones(shape)
if rows is None:
rows = np.nan
if cols is None:
cols = np.nan
# Preprocess the axes to 0,...,ndim
if rows < 0:
rows += size
if cols < 0:
cols += size
if rows < 0 or rows >= size:
raise ValueError("Row axis invalid")
if cols < 0 or cols >= size:
raise ValueError("Column axis invalid")
# Remove non-row and non-column axes that have length 1
squeezed_shape = list(shape)
for i in reversed(range(len(shape))):
if shape[i] == 1 and i != rows and i != cols:
squeezed_shape.pop(i)
if i < cols:
cols -= 1
if i < rows:
rows -= 1
x = np.reshape(x, squeezed_shape)
std = np.reshape(std, squeezed_shape)
if np.ndim(x) < 2:
cols += 2 - np.ndim(x)
rows += 2 - np.ndim(x)
x = np.atleast_2d(x)
std = np.atleast_2d(std)
size = np.ndim(x)
if np.isnan(cols):
if rows != size - 1:
cols = size - 1
else:
cols = size - 2
if np.isnan(rows):
if cols != size - 1:
rows = size - 1
else:
rows = size - 2
# Put the row and column axes to the end
axes = [i for i in range(size) if i not in (rows, cols)] + [rows, cols]
x = np.transpose(x, axes=axes)
std = np.transpose(std, axes=axes)
vmax = np.max(np.abs(x) + scale*std)
if scale == 0:
_subplots(_hinton, (x, 2), fig=fig, kwargs=dict(vmax=vmax))
else:
def plotfunc(z, e, **kwargs):
return _hinton(z, error=e, **kwargs)
_subplots(plotfunc, (x, 2), (scale*std, 2), fig=fig, kwargs=dict(vmax=vmax))
def _hinton_figure(x, rows=None, cols=None, fig=None, square=True):
"""
Plot the Hinton diagram of a Gaussian node
"""
scale = 0
std = 0
if fig is None:
fig = plt.gcf()
# Get mean and second moment
shape = np.shape(x)
size = np.ndim(x)
if rows is None:
rows = np.nan
if cols is None:
cols = np.nan
# Preprocess the axes to 0,...,ndim
if rows < 0:
rows += size
if cols < 0:
cols += size
if rows < 0 or rows >= size:
raise ValueError("Row axis invalid")
if cols < 0 or cols >= size:
raise ValueError("Column axis invalid")
# Remove non-row and non-column axes that have length 1
squeezed_shape = list(shape)
for i in reversed(range(len(shape))):
if shape[i] == 1 and i != rows and i != cols:
squeezed_shape.pop(i)
if i < cols:
cols -= 1
if i < rows:
rows -= 1
x = np.reshape(x, squeezed_shape)
size = np.ndim(x)
if np.isnan(cols):
if rows != size - 1:
cols = size - 1
else:
cols = size - 2
if np.isnan(rows):
if cols != size - 1:
rows = size - 1
else:
rows = size - 2
# Put the row and column axes to the end
if np.ndim(x) >= 2:
axes = [i for i in range(size) if i not in (rows, cols)] + [rows, cols]
x = np.transpose(x, axes=axes)
#std = np.transpose(std, axes=axes)
vmax = np.max(np.abs(x) + scale*std)
kw = dict(vmax=vmax, square=square)
if scale == 0:
_subplots(_hinton, (x, 2), fig=fig, kwargs=kw)
else:
def plotfunc(z, e, **kwargs):
return _hinton(z, error=e, **kwargs)
_subplots(plotfunc, (x, 2), (scale*std, 2), fig=fig, kwargs=kw)
# For backwards compatibility:
gaussian_array = gaussian_hinton
def timeseries_categorical_mc(Z, fig=None):
if fig is None:
fig = plt.gcf()
# Make sure that the node is categorical
Z = Z._convert(CategoricalMoments)
# Get expectations (and broadcast explicitly)
z = Z._message_to_child()[0] * np.ones(Z.get_shape(0))
# Compute the subplot layout
z = misc.atleast_nd(z, 4)
if np.ndim(z) != 4:
raise ValueError("Can not plot arrays with over 4 axes")
M = np.shape(z)[0]
N = np.shape(z)[1]
# Plot Hintons
for i in range(M):
for j in range(N):
axes = fig.add_subplot(M, N, i*N+j+1)
_hinton(z[i,j].T, vmax=1.0, square=False, axes=axes)
def gamma_hinton(alpha, square=True, **kwargs):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
alpha = alpha._convert(GammaMoments)
# Compute exp( <log p> )
x = alpha.get_moments()[0]
# Explicit broadcasting
x = x * np.ones(alpha.plates)
# Plot Hinton diagram
return _hinton_figure(x, square=square, **kwargs)
def beta_hinton(P, square=True):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
P = P._convert(BetaMoments)
# Compute exp( <log p> )
p = np.exp(P._message_to_child()[0][...,0])
# Explicit broadcasting
p = p * np.ones(P.plates)
# Plot Hinton diagram
return _hinton(p, vmax=1.0, square=square)
def dirichlet_hinton(P, square=True):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
P = P._convert(DirichletMoments)
# Compute exp( <log p> )
p = np.exp(P._message_to_child()[0])
# Explicit broadcasting
p = p * np.ones(P.plates+(1,))
# Plot Hinton diagram
return _hinton(p, vmax=1.0, square=square)
def bernoulli_hinton(Z, square=True):
"""
Plot a Bernoulli distributed random variable as a Hinton diagram
"""
# Make sure that the node is Bernoulli
Z = Z._convert(BernoulliMoments)
# Get <Z>
z = Z._message_to_child()[0]
# Explicit broadcasting
z = z * np.ones(Z.plates)
# Plot Hinton diagram
return _hinton(z, vmax=1.0, square=square)
def categorical_hinton(Z, square=True):
"""
Plot a Bernoulli distributed random variable as a Hinton diagram
"""
# Make sure that the node is Bernoulli
Z = Z._convert(CategoricalMoments)
# Get <Z>
z = Z._message_to_child()[0]
# Explicit broadcasting
z = z * np.ones(Z.plates+(1,))
# Plot Hinton diagram
return _hinton(np.squeeze(z), vmax=1.0, square=square)
def hinton(X, **kwargs):
r"""
Plot the Hinton diagram of a node
The keyword arguments depend on the node type. For some node types, the
diagram also shows uncertainty with non-filled rectangles. Currently,
beta-like, Gaussian-like and Dirichlet-like nodes are supported.
Parameters
----------
X : node
"""
if hasattr(X, "_convert"):
try:
X = X._convert(GaussianMoments)
except Moments.NoConverterError:
pass
else:
return gaussian_hinton(X, **kwargs)
try:
X = X._convert(GammaMoments)
except Moments.NoConverterError:
pass
else:
return gamma_hinton(X, **kwargs)
try:
X = X._convert(BetaMoments)
except Moments.NoConverterError:
pass
else:
return beta_hinton(X, **kwargs)
try:
X = X._convert(DirichletMoments)
except Moments.NoConverterError:
pass
else:
return dirichlet_hinton(X, **kwargs)
try:
X = X._convert(BernoulliMoments)
except Moments.NoConverterError:
pass
else:
return bernoulli_hinton(X, **kwargs)
try:
X = X._convert(CategoricalMoments)
except Moments.NoConverterError:
pass
else:
return categorical_hinton(X, **kwargs)
return _hinton_figure(X, **kwargs)
class Plotter():
r"""
Wrapper for plotting functions and base class for node plotters
The purpose of this class is to collect all the parameters needed by a
plotting function and provide a callable interface which needs only the node
as the input.
Plotter instances are callable objects that plot a given node using a
specified plotting function.
Parameters
----------
plotter : function
Plotting function to use
args : defined by the plotting function
Additional inputs needed by the plotting function
kwargs : defined by the plotting function
Additional keyword arguments supported by the plotting function
Examples
--------
First, create a gamma variable:
>>> import numpy as np
>>> from bayespy.nodes import Gamma
>>> x = Gamma(4, 5)
The probability density function can be plotted as:
>>> import bayespy.plot as bpplt
>>> bpplt.pdf(x, np.linspace(0.1, 10, num=100)) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
However, this can be problematic when one needs to provide a
plotting function for the inference engine as the inference engine
gives only the node as input. Thus, we need to create a simple
plotter wrapper:
>>> p = bpplt.Plotter(bpplt.pdf, np.linspace(0.1, 10, num=100))
Now, this callable object ``p`` needs only the node as the input:
>>> p(x) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
Thus, it can be given to the inference engine to use as a plotting function:
>>> x = Gamma(4, 5, plotter=p)
>>> x.plot() # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
"""
def __init__(self, plotter, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._plotter = plotter
def __call__(self, X, fig=None):
"""
Plot the node using the specified plotting function
Parameters
----------
X : node
The plotted node
"""
return self._plotter(X, *self._args, fig=fig, **self._kwargs)
class PDFPlotter(Plotter):
r"""
Plotter of probability density function of a scalar node
Parameters
----------
x_grid : array
Numerical grid on which the density function is computed and
plotted
See also
--------
pdf
"""
def __init__(self, x_grid, **kwargs):
super().__init__(pdf, x_grid, **kwargs)
class ContourPlotter(Plotter):
r"""
Plotter of probability density function of a two-dimensional node
Parameters
----------
x1_grid : array
Grid for the first dimension
x2_grid : array
Grid for the second dimension
See also
--------
contour
"""
def __init__(self, x1_grid, x2_grid, **kwargs):
super().__init__(contour, x1_grid, x2_grid, **kwargs)
class HintonPlotter(Plotter):
r"""
Plotter of the Hinton diagram of a node
See also
--------
hinton
"""
def __init__(self, **kwargs):
super().__init__(hinton, **kwargs)
class FunctionPlotter(Plotter):
r"""
Plotter of a node as a 1-dimensional function
See also
--------
plot
"""
def __init__(self, **kwargs):
super().__init__(plot, **kwargs)
class GaussianMarkovChainPlotter(Plotter):
r"""
Plotter of a Gaussian Markov chain as a timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_gaussian_mc, **kwargs)
class GaussianTimeseriesPlotter(Plotter):
r"""
Plotter of a Gaussian node as a timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_gaussian, **kwargs)
class GaussianHintonPlotter(Plotter):
r"""
Plotter of a Gaussian node as a Hinton diagram
"""
def __init__(self, **kwargs):
super().__init__(gaussian_array, **kwargs)
class CategoricalMarkovChainPlotter(Plotter):
r"""
Plotter of a Categorical timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_categorical_mc, **kwargs)
def matrix_animation(A, filename=None, fps=25, fig=None, **kwargs):
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
A = np.atleast_3d(A)
vmax = np.max(np.abs(A))
x = axes.imshow(A[0],
interpolation='nearest',
cmap='RdBu_r',
vmin=-vmax,
vmax=vmax,
**kwargs)
s = axes.set_title('t = %d' % 0)
def animate(nframe):
s.set_text('t = %d' % nframe)
x.set_array(A[nframe])
return (x, s)
anim = animation.FuncAnimation(fig, animate,
frames=np.shape(A)[0],
interval=1000/fps,
blit=False,
repeat=False)
return anim
def save_animation(anim, filename, fps=25, bitrate=5000, fig=None):
# A bug in numpy/matplotlib causes this not to work in python3.3:
# https://github.com/matplotlib/matplotlib/issues/1891
#
# So the following command does not work currently..
#
# anim.save(filename, fps=fps)
if fig is None:
fig = plt.gcf()
writer = animation.FFMpegFileWriter(fps=fps, bitrate=bitrate)
writer.setup(fig, filename, 100)
anim.save(filename,
fps=fps,
writer=writer,
bitrate=bitrate)
return
def binary_matrix(A, axes=None):
if axes is None:
axes = plt.gca()
A = np.atleast_2d(A)
G = np.zeros(np.shape(A) + (3,))
G[A] = [0,0,0]
G[np.logical_not(A)] = [1,1,1]
axes.imshow(G, interpolation='nearest')
def gaussian_mixture_logpdf(x, w, mu, Sigma):
# Shape(x) = (N, D)
# Shape(w) = (K,)
# Shape(mu) = (K, D)
# Shape(Sigma) = (K, D, D)
# Shape(result) = (N,)
# Dimensionality
D = np.shape(x)[-1]
# Cholesky decomposition of the covariance matrix
U = linalg.chol(Sigma)
# Reshape x:
# Shape(x) = (N, 1, D)
x = np.expand_dims(x, axis=-2)
# (x-mu) and (x-mu)'*inv(Sigma)*(x-mu):
# Shape(v) = (N, K, D)
# Shape(z) = (N, K)
v = x - mu
z = np.einsum('...i,...i', v, linalg.chol_solve(U, v))
# Log-determinant of Sigma:
# Shape(ldet) = (K,)
ldet = linalg.chol_logdet(U)
# Compute log pdf for each cluster:
# Shape(lpdf) = (N, K)
lpdf = misc.gaussian_logpdf(z, 0, 0, ldet, D)
def matrixplot(A, colorbar=False, axes=None):
if axes is None:
axes = plt.gca()
if sp.issparse(A):
A = A.toarray()
axes.imshow(A, interpolation='nearest')
if colorbar:
plt.colorbar(ax=axes)
def contourplot(x1, x2, y, colorbar=False, filled=True, axes=None):
""" Plots 2D contour plot. x1 and x2 are 1D vectors, y contains
the function values. y.size must be x1.size*x2.size. """
if axes is None:
axes = plt.gca()
y = np.reshape(y, (len(x2),len(x1)))
if filled:
axes.contourf(x1, x2, y)
else:
axes.contour(x1, x2, y)
if colorbar:
plt.colorbar(ax=axes)
def errorplot(y=None, error=None, x=None, lower=None, upper=None,
color=(0,0,0,1), fillcolor=(0,0,0,0.4), axes=None, **kwargs):
if axes is None:
axes = plt.gca()
# Default inputs
if x is None:
x = np.arange(np.size(y))
# Parse errors (lower=lower/error/upper, upper=upper/error/lower)
if lower is None:
if error is not None:
lower = error
elif upper is not None:
lower = upper
if upper is None:
if error is not None:
upper = error
elif lower is not None:
upper = lower
# Plot errors
if (lower is not None) and (upper is not None):
l = y - lower
u = y + upper
axes.fill_between(x,
l,
u,
facecolor=fillcolor,
edgecolor=(0, 0, 0, 0),
linewidth=1,
interpolate=True)
# Plot function
axes.plot(x, y, color=color, **kwargs)
def plotmatrix(X):
"""
Creates a matrix of marginal plots.
On diagonal, are marginal plots of each variable. Off-diagonal plot (i,j)
shows the joint marginal density of x_i and x_j.
"""
return X.plotmatrix()
def _pdf_t(mu, s2, nu, axes=None, scale=4, color='k'):
"""
"""
if axes is None:
axes = plt.gca()
s = np.sqrt(s2)
x = np.linspace(mu-scale*s, mu+scale*s, num=100)
y2 = (x-mu)**2 / s2
lpdf = random.t_logpdf(y2, np.log(s2), nu, 1)
p = np.exp(lpdf)
return axes.plot(x, p, color=color)
def _pdf_gamma(a, b, axes=None, scale=4, color='k'):
"""
"""
if axes is None:
axes = plt.gca()
if np.size(a) != 1 or np.size(b) != 1:
raise ValueError("Parameters must be scalars")
mean = a/b
v = scale*np.sqrt(a/b**2)
m = max(0, mean-v)
n = mean + v
x = np.linspace(m, n, num=100)
logx = np.log(x)
lpdf = random.gamma_logpdf(b*x,
logx,
a*logx,
a*np.log(b),
special.gammaln(a))
p = np.exp(lpdf)
return axes.plot(x, p, color=color)
def _contour_t(mu, Cov, nu, axes=None, scale=4, transpose=False, colors='k'):
"""
"""
if axes is None:
axes = plt.gca()
if np.shape(mu) != (2,) or np.shape(Cov) != (2,2) or np.shape(nu) != ():
print(np.shape(mu), np.shape(Cov), np.shape(nu))
raise ValueError("Only 2-d t-distribution allowed")
if transpose:
mu = mu[[1,0]]
Cov = Cov[np.ix_([1,0],[1,0])]
s = np.sqrt(np.diag(Cov))
x0 = np.linspace(mu[0]-scale*s[0], mu[0]+scale*s[0], num=100)
x1 = np.linspace(mu[1]-scale*s[1], mu[1]+scale*s[1], num=100)
X0X1 = misc.grid(x0, x1)
Y = X0X1 - mu
L = linalg.chol(Cov)
logdet_Cov = linalg.chol_logdet(L)
Z = linalg.chol_solve(L, Y)
Z = linalg.inner(Y, Z, ndim=1)
lpdf = random.t_logpdf(Z, logdet_Cov, nu, 2)
p = np.exp(lpdf)
shape = (np.size(x0), np.size(x1))
X0 = np.reshape(X0X1[:,0], shape)
X1 = np.reshape(X0X1[:,1], shape)
P = np.reshape(p, shape)
return axes.contour(X0, X1, P, colors=colors)
def _contour_gaussian_gamma(mu, s2, a, b, axes=None, transpose=False):
"""
"""
pass
|
SalemAmeen/bayespy
|
bayespy/plot.py
|
Python
|
mit
| 35,411
|
[
"Gaussian"
] |
9a927c2ed137b318bbe293d4a1a36c52a1f4e8b23bc514b59e18f6332fc8e841
|
# Copyright 2005-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Author(s): Brian Harring (ferringb@gentoo.org)
import os as _os
import sys
from portage.cache import template
from portage import os
from portage.proxy.lazyimport import lazyimport
lazyimport(globals(),
'portage.exception:PortageException',
'portage.util:apply_permissions,ensure_dirs',
)
del lazyimport
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
long = int
class FsBased(template.database):
"""template wrapping fs needed options, and providing _ensure_access as a way to
attempt to ensure files have the specified owners/perms"""
def __init__(self, *args, **config):
for x, y in (("gid", -1), ("perms", -1)):
if x in config:
# Since Python 3.4, chown requires int type (no proxies).
setattr(self, "_" + x, int(config[x]))
del config[x]
else:
setattr(self, "_"+x, y)
super(FsBased, self).__init__(*args, **config)
if self.label.startswith(os.path.sep):
# normpath.
self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
def _ensure_access(self, path, mtime=-1):
"""returns true or false if it's able to ensure that path is properly chmod'd and chowned.
if mtime is specified, attempts to ensure that's correct also"""
try:
apply_permissions(path, gid=self._gid, mode=self._perms)
if mtime != -1:
mtime=long(mtime)
os.utime(path, (mtime, mtime))
except (PortageException, EnvironmentError):
return False
return True
def _ensure_dirs(self, path=None):
"""with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
if path:
path = os.path.dirname(path)
base = self.location
else:
path = self.location
base='/'
for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
base = os.path.join(base,dir)
if ensure_dirs(base):
# We only call apply_permissions if ensure_dirs created
# a new directory, so as not to interfere with
# permissions of existing directories.
mode = self._perms
if mode == -1:
mode = 0
mode |= 0o755
apply_permissions(base, mode=mode, gid=self._gid)
def _prune_empty_dirs(self):
all_dirs = []
for parent, dirs, files in os.walk(self.location):
for x in dirs:
all_dirs.append(_os.path.join(parent, x))
while all_dirs:
try:
_os.rmdir(all_dirs.pop())
except OSError:
pass
def gen_label(base, label):
"""if supplied label is a path, generate a unique label based upon label, and supplied base path"""
if label.find(os.path.sep) == -1:
return label
label = label.strip("\"").strip("'")
label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
tail = os.path.split(label)[1]
return "%s-%X" % (tail, abs(label.__hash__()))
|
ptisserand/portage
|
pym/portage/cache/fs_template.py
|
Python
|
gpl-2.0
| 2,810
|
[
"Brian"
] |
69e384bd650b475d75dee78eed9a4e1612967b7b8b2bfd7b033393ad6a520484
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
JahnTeller distortion analysis.
"""
import os
import sys
import warnings
from typing import Any, Dict, Optional, Tuple, Union, cast
import numpy as np
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.local_env import (
LocalStructOrderParams,
get_neighbors_of_site_with_index,
)
from pymatgen.core.periodic_table import Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class JahnTellerAnalyzer:
"""
Will attempt to classify if structure *may* be Jahn-Teller active.
Class currently uses datafile of hard-coded common Jahn-Teller
active ions.
If structure is annotated with magnetic moments, will estimate
if structure may be high-spin or low-spin.
Class aims for more false-positives than false-negatives.
"""
def __init__(self):
"""
Init for JahnTellerAnalyzer.
"""
self.spin_configs = {
"oct": { # key is number of d electrons
0: {"high": {"e_g": 0, "t_2g": 0}, "default": "high"},
1: {"high": {"e_g": 0, "t_2g": 1}, "default": "high"}, # weak J-T
2: {"high": {"e_g": 0, "t_2g": 2}, "default": "high"}, # weak
3: {"high": {"e_g": 0, "t_2g": 3}, "default": "high"}, # no J-T
4: {
"high": {"e_g": 1, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 4},
"default": "high",
}, # strong high, weak low
5: {
"high": {"e_g": 2, "t_2g": 3},
"low": {"e_g": 0, "t_2g": 5},
"default": "low",
}, # no high, weak low
6: {
"high": {"e_g": 2, "t_2g": 4},
"low": {"e_g": 0, "t_2g": 6},
"default": "high",
}, # weak high, no low
7: {
"high": {"e_g": 2, "t_2g": 5},
"low": {"e_g": 1, "t_2g": 6},
"default": "low",
}, # weak high, strong low
8: {"high": {"e_g": 2, "t_2g": 6}, "default": "high"}, # no
9: {"high": {"e_g": 3, "t_2g": 6}, "default": "high"}, # strong
10: {"high": {"e_g": 4, "t_2g": 6}, "default": "high"},
},
"tet": { # no low spin observed experimentally in tetrahedral, all weak J-T
0: {"high": {"e": 0, "t_2": 0}, "default": "high"},
1: {"high": {"e": 1, "t_2": 0}, "default": "high"},
2: {"high": {"e": 2, "t_2": 0}, "default": "high"},
3: {"high": {"e": 2, "t_2": 1}, "default": "high"},
4: {"high": {"e": 2, "t_2": 2}, "default": "high"},
5: {"high": {"e": 2, "t_2": 3}, "default": "high"},
6: {"high": {"e": 3, "t_2": 3}, "default": "high"},
7: {"high": {"e": 4, "t_2": 3}, "default": "high"},
8: {"high": {"e": 4, "t_2": 4}, "default": "high"},
9: {"high": {"e": 4, "t_2": 5}, "default": "high"},
10: {"high": {"e": 4, "t_2": 6}, "default": "high"},
},
}
def get_analysis_and_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Tuple[Dict, Structure]:
"""Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1) and decorated structure
"""
structure = structure.get_primitive_structure()
if calculate_valences:
bva = BVAnalyzer()
structure = bva.get_oxi_state_decorated_structure(structure)
# no point testing multiple equivalent sites, doesn't make any difference to analysis
# but makes returned
symmetrized_structure = SpacegroupAnalyzer(structure).get_symmetrized_structure()
# to detect structural motifs of a given site
op = LocalStructOrderParams(["oct", "tet"])
# dict of site index to the Jahn-Teller analysis of that site
jt_sites = []
non_jt_sites = []
for indices in symmetrized_structure.equivalent_indices:
idx = indices[0]
site = symmetrized_structure[idx]
# only interested in sites with oxidation states
if isinstance(site.specie, Species) and site.specie.element.is_transition_metal:
# get motif around site
order_params = op.get_order_parameters(symmetrized_structure, idx)
if order_params[0] > order_params[1] and order_params[0] > op_threshold:
motif = "oct"
motif_order_parameter = order_params[0]
elif order_params[1] > op_threshold:
motif = "tet"
motif_order_parameter = order_params[1]
else:
motif = "unknown"
motif_order_parameter = None
if motif in ["oct", "tet"]:
motif = cast(Literal["oct", "tet"], motif) # mypy needs help
# guess spin of metal ion
if guesstimate_spin and "magmom" in site.properties:
# estimate if high spin or low spin
magmom = site.properties["magmom"]
spin_state = self._estimate_spin_state(site.specie, motif, magmom)
else:
spin_state = "unknown"
magnitude = self.get_magnitude_of_effect_from_species(site.specie, spin_state, motif)
if magnitude != "none":
ligands = get_neighbors_of_site_with_index(structure, idx, approach="min_dist", delta=0.15)
ligand_bond_lengths = [ligand.distance(structure[idx]) for ligand in ligands]
ligands_species = list({str(ligand.specie) for ligand in ligands})
ligand_bond_length_spread = max(ligand_bond_lengths) - min(ligand_bond_lengths)
def trim(f):
"""
Avoid storing to unreasonable precision, hurts readability.
"""
return float(f"{f:.4f}")
# to be Jahn-Teller active, all ligands have to be the same
if len(ligands_species) == 1:
jt_sites.append(
{
"strength": magnitude,
"motif": motif,
"motif_order_parameter": trim(motif_order_parameter),
"spin_state": spin_state,
"species": str(site.specie),
"ligand": ligands_species[0],
"ligand_bond_lengths": [trim(length) for length in ligand_bond_lengths],
"ligand_bond_length_spread": trim(ligand_bond_length_spread),
"site_indices": indices,
}
)
# store reasons for not being J-T active
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": "Not Jahn-Teller active for this electronic configuration.",
}
)
else:
non_jt_sites.append(
{
"site_indices": indices,
"strength": "none",
"reason": f"motif is {motif}",
}
)
# perform aggregation of all sites
if jt_sites:
analysis = {"active": True} # type: Dict[str, Any]
# if any site could exhibit 'strong' Jahn-Teller effect
# then mark whole structure as strong
strong_magnitudes = [site["strength"] == "strong" for site in jt_sites]
if any(strong_magnitudes):
analysis["strength"] = "strong"
else:
analysis["strength"] = "weak"
analysis["sites"] = jt_sites
return analysis, structure
return {"active": False, "sites": non_jt_sites}, structure
def get_analysis(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Dict:
"""
Convenience method, uses get_analysis_and_structure method.
Obtain an analysis of a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
analysis of structure, with key 'strength' which may be 'none', 'strong',
'weak', or 'unknown' (Default value = 0.1)
"""
return self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)[0]
def is_jahn_teller_active(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> bool:
"""
Convenience method, uses get_analysis_and_structure method.
Check if a given structure and if it may be Jahn-Teller
active or not. This is a heuristic, and may give false positives and
false negatives (false positives are preferred).
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
boolean, True if might be Jahn-Teller active, False if not
"""
active = False
try:
analysis = self.get_analysis(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
active = analysis["active"]
except Exception as e:
warnings.warn(f"Error analyzing {structure.composition.reduced_formula}: {e}")
return active
def tag_structure(
self,
structure: Structure,
calculate_valences: bool = True,
guesstimate_spin: bool = False,
op_threshold: float = 0.1,
) -> Structure:
"""
Convenience method, uses get_analysis_and_structure method.
Add a "possible_jt_active" site property on Structure.
Args:
structure: input structure
calculate_valences: whether to attempt to calculate valences or not, structure
should have oxidation states to perform analysis (Default value = True)
guesstimate_spin: whether to guesstimate spin state from magnetic moments
or not, use with caution (Default value = False)
op_threshold: threshold for order parameter above which to consider site
to match an octahedral or tetrahedral motif, since Jahn-Teller structures
can often be
quite distorted, this threshold is smaller than one might expect
Returns:
Decorated Structure, will be in primitive setting.
"""
try:
analysis, structure = self.get_analysis_and_structure(
structure,
calculate_valences=calculate_valences,
guesstimate_spin=guesstimate_spin,
op_threshold=op_threshold,
)
jt_sites = [False] * len(structure)
if analysis["active"]:
for site in analysis["sites"]:
for index in site["site_indices"]:
jt_sites[index] = True
structure.add_site_property("possible_jt_active", jt_sites)
return structure
except Exception as e:
warnings.warn(f"Error analyzing {structure.composition.reduced_formula}: {e}")
return structure
@staticmethod
def _get_number_of_d_electrons(species: Species) -> float:
"""
Get number of d electrons of a species.
Args:
species: Species object
Returns: Number of d electrons.
"""
# TODO: replace with more generic Hund's rule algorithm?
# taken from get_crystal_field_spin
elec = species.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(f"Invalid element {species.symbol} for crystal field calculation.")
nelectrons = int(elec[-1][2] + elec[-2][2] - species.oxi_state)
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(f"Invalid oxidation state {species.oxi_state} for element {species.symbol}")
return nelectrons
def get_magnitude_of_effect_from_species(self, species: Union[str, Species], spin_state: str, motif: str) -> str:
"""
Get magnitude of Jahn-Teller effect from provided species, spin state and motif.
Args:
species: e.g. Fe2+
spin_state: "high" or "low"
motif: "oct" or "tet"
Returns: "none", "weak" or "strong
"""
magnitude = "none"
sp = get_el_sp(species)
# has to be Species; we need to know the oxidation state
if isinstance(sp, Species) and sp.element.is_transition_metal:
d_electrons = self._get_number_of_d_electrons(sp)
if motif in self.spin_configs:
if spin_state not in self.spin_configs[motif][d_electrons]:
spin_state = self.spin_configs[motif][d_electrons]["default"]
spin_config = self.spin_configs[motif][d_electrons][spin_state]
magnitude = JahnTellerAnalyzer.get_magnitude_of_effect_from_spin_config(motif, spin_config)
else:
warnings.warn("No data for this species.")
return magnitude
@staticmethod
def get_magnitude_of_effect_from_spin_config(motif: str, spin_config: Dict[str, float]) -> str:
"""
Roughly, the magnitude of Jahn-Teller distortion will be:
* in octahedral environments, strong if e_g orbitals
unevenly occupied but weak if t_2g orbitals unevenly
occupied
* in tetrahedral environments always weaker
Args:
motif: "oct" or "tet"
spin_config: dict of 'e' (e_g) and 't' (t2_g)
with number of electrons in each state
Returns: "none", "weak" or "strong"
"""
magnitude = "none"
if motif == "oct":
e_g = spin_config["e_g"]
t_2g = spin_config["t_2g"]
if (e_g % 2 != 0) or (t_2g % 3 != 0):
magnitude = "weak"
if e_g % 2 == 1:
magnitude = "strong"
elif motif == "tet":
e = spin_config["e"]
t_2 = spin_config["t_2"]
if (e % 3 != 0) or (t_2 % 2 != 0):
magnitude = "weak"
return magnitude
@staticmethod
def _estimate_spin_state(
species: Union[str, Species], motif: Literal["oct", "tet"], known_magmom: float
) -> Literal["undefined", "low", "high", "unknown"]:
"""Simple heuristic to estimate spin state. If magnetic moment
is sufficiently close to that predicted for a given spin state,
we assign it that state. If we only have data for one spin
state then that's the one we use (e.g. we assume all tetrahedral
complexes are high-spin, since this is typically the case).
Args:
species: str or Species
motif ("oct" | "tet"): Tetrahedron or octahedron crystal site coordination
known_magmom: magnetic moment in Bohr magnetons
Returns:
"undefined" (if only one spin state possible), "low", "high" or "unknown"
"""
mu_so_high = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="high")
mu_so_low = JahnTellerAnalyzer.mu_so(species, motif=motif, spin_state="low")
if mu_so_high == mu_so_low:
return "undefined" # undefined or only one spin state possible
if mu_so_high is None:
return "low"
if mu_so_low is None:
return "high"
diff = mu_so_high - mu_so_low
# WARNING! this heuristic has not been robustly tested or benchmarked
# using 'diff*0.25' as arbitrary measure, if known magmom is
# too far away from expected value, we don't try to classify it
if known_magmom > mu_so_high or abs(mu_so_high - known_magmom) < diff * 0.25:
return "high"
if known_magmom < mu_so_low or abs(mu_so_low - known_magmom) < diff * 0.25:
return "low"
return "unknown"
@staticmethod
def mu_so(
species: Union[str, Species], motif: Literal["oct", "tet"], spin_state: Literal["high", "low"]
) -> Optional[float]:
"""Calculates the spin-only magnetic moment for a
given species. Only supports transition metals.
Args:
species: Species
motif ("oct" | "tet"): Tetrahedron or octahedron crystal site coordination
spin_state ("low" | "high"): Whether the species is in a high or low spin state
Returns:
float: Spin-only magnetic moment in Bohr magnetons or None if
species crystal field not defined
"""
try:
sp = get_el_sp(species)
n = sp.get_crystal_field_spin(coordination=motif, spin_config=spin_state)
# calculation spin-only magnetic moment for this number of unpaired spins
return np.sqrt(n * (n + 2))
except AttributeError:
return None
|
vorwerkc/pymatgen
|
pymatgen/analysis/magnetism/jahnteller.py
|
Python
|
mit
| 20,898
|
[
"CRYSTAL",
"pymatgen"
] |
9a8bf630645b1b12a6671b8b15061d030e37dcedd7586d7a02721a299d160ad4
|
"""Alignment with SNAP: http://snap.cs.berkeley.edu/
"""
import os
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.ngsalign import novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
TODO: Use streaming with new development version of SNAP to feed into
structural variation preparation de-duplication.
"""
pair_file = pair_file if pair_file else ""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
assert not data.get("align_split"), "Split alignments not supported with SNAP"
snap = config_utils.get_program("snap", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
resources = config_utils.get_resources("snap", data["config"])
rg_info = novoalign.get_rg_info(names)
is_paired = bam.is_paired(fastq_file) if fastq_file.endswith(".bam") else pair_file
if not utils.file_exists(out_file):
with postalign.tobam_cl(data, out_file, is_paired) as (tobam_cl, tx_out_file):
cmd_name = "paired" if is_paired else "single"
cmd = ("{snap} {cmd_name} {index_dir} {fastq_file} {pair_file} "
"-R '{rg_info}' -t {num_cores} -M -o -sam - | ")
do.run(cmd.format(**locals()) + tobam_cl, "SNAP alignment: %s" % names["sample"])
data["work_bam"] = out_file
return data
def align_bam(bam_file, index_dir, names, align_dir, data):
return align(bam_file, None, index_dir, names, align_dir, data)
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "snap_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to snap reference directory, using standard layout.
"""
snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap")
assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir
return snap_dir
|
gifford-lab/bcbio-nextgen
|
bcbio/ngsalign/snap.py
|
Python
|
mit
| 2,111
|
[
"Galaxy"
] |
58edf1719a611e4e1e257f1ca03f371a05c30453e5e2abd76d7b572e8678d2a9
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implement AST convertor to Elastic Search DSL."""
from invenio.base.globals import cfg
from invenio_query_parser.ast import (
AndOp, KeywordOp, OrOp,
NotOp, Keyword, Value,
SingleQuotedValue,
DoubleQuotedValue,
RegexValue, RangeOp,
ValueQuery, EmptyQuery,
GreaterOp, GreaterEqualOp,
LowerOp, LowerEqualOp
)
from invenio_query_parser.visitor import make_visitor
class ElasticSearchDSL(object):
"""Implement visitor to create Elastic Search DSL."""
visitor = make_visitor()
# pylint: disable=W0613,E0102
def __init__(self):
"""Provide a dictinary mapping invenio keywords
to elasticsearch fields as a list
eg. {"author": ["author.last_name, author.first_name"]}
"""
self.keyword_dict = cfg['SEARCH_ELASTIC_KEYWORD_MAPPING']
def map_keyword_to_fields(self, keyword):
"""Convert keyword to keyword list for searches
Map keyword to elasticsearch fields if needed
"""
if self.keyword_dict:
res = self.keyword_dict.get(keyword)
return res if res else [str(keyword)]
return [str(keyword)]
@visitor(AndOp)
def visit(self, node, left, right):
return {'bool': {'must': [left, right]}}
@visitor(OrOp)
def visit(self, node, left, right):
return {'bool': {'should': [left, right]}}
@visitor(NotOp)
def visit(self, node, op):
return {'bool': {'must_not': [op]}}
@visitor(KeywordOp)
def visit(self, node, left, right):
if callable(right):
return right(left)
raise RuntimeError("Not supported second level operation.")
@visitor(ValueQuery)
def visit(self, node, op):
return op(['_all'])
@visitor(Keyword)
def visit(self, node):
return self.map_keyword_to_fields(node.value)
@visitor(Value)
def visit(self, node):
return lambda keyword: {
'multi_match': {
'query': node.value,
'fields': keyword
}
}
@visitor(SingleQuotedValue)
def visit(self, node):
return lambda keyword: {
'multi_match': {
'query': node.value,
'type': 'phrase',
'fields': keyword
}
}
@visitor(DoubleQuotedValue)
def visit(self, node):
def _f(keyword):
if (len(keyword) > 1):
return {"bool":
{"should": [{"term": {k: str(node.value)}}
for k in keyword]}}
else:
return {'term': {keyword[0]: node.value}}
return _f
@visitor(RegexValue)
def visit(self, node):
def _f(keyword):
if len(keyword) > 1:
res = {"bool": {"should": []}}
res["bool"]["should"] = [{'regexp': {k: node.value}}
for k in keyword]
elif keyword[0] != "_all":
res = {'regexp': {keyword[0]: node.value}}
else:
raise RuntimeError("Not supported regex search for all fields")
return res
return _f
@visitor(RangeOp)
def visit(self, node, left, right):
condition = {}
if left:
condition['gte'] = left(None)["multi_match"]["query"]
if right:
condition['lte'] = right(None)["multi_match"]["query"]
def _f(keyword):
if len(keyword) > 1:
res = {"bool": {"should": []}}
res["bool"]["should"] = [{'range': {k: condition}}
for k in keyword]
else:
res = {'range': {keyword[0]: condition}}
return res
return _f
@visitor(EmptyQuery)
def visit(self, node):
return {
"match_all": {}
}
@staticmethod
def _operators(node, condition):
def _f(keyword):
if len(keyword) > 1:
res = {"bool": {"should": []}}
res["bool"]["should"] = [{'range': {k: condition}}
for k in keyword]
else:
res = {'range': {keyword[0]: condition}}
return res
return _f
@visitor(GreaterOp)
def visit(self, node, value_fn):
condition = {"gt": value_fn(None)["multi_match"]["query"]}
return self._operators(node, condition)
@visitor(LowerOp)
def visit(self, node, value_fn):
condition = {"lt": value_fn(None)["multi_match"]["query"]}
return self._operators(node, condition)
@visitor(GreaterEqualOp)
def visit(self, node, value_fn):
condition = {"gte": value_fn(None)["multi_match"]["query"]}
return self._operators(node, condition)
@visitor(LowerEqualOp)
def visit(self, node, value_fn):
condition = {"lte": value_fn(None)["multi_match"]["query"]}
return self._operators(node, condition)
# pylint: enable=W0612,E0102
|
chokribr/invenio
|
invenio/modules/search/walkers/elasticsearch.py
|
Python
|
gpl-2.0
| 5,833
|
[
"VisIt"
] |
88e9bb668e4f8e882cd73756964a62ff218f69b321a4ad7bf7723f395637c923
|
import io
import pickle
import tempfile
import typing as t
from contextlib import contextmanager
from copy import copy
from copy import deepcopy
import pytest
from werkzeug import datastructures as ds
from werkzeug import http
from werkzeug.exceptions import BadRequestKeyError
class TestNativeItermethods:
def test_basic(self):
class StupidDict:
def keys(self, multi=1):
return iter(["a", "b", "c"] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(
zip(iter(self.keys(multi=multi)), iter(self.values(multi=multi)))
)
d = StupidDict()
expected_keys = ["a", "b", "c"]
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
assert list(d.keys()) == expected_keys
assert list(d.values()) == expected_values
assert list(d.items()) == expected_items
assert list(d.keys(2)) == expected_keys * 2
assert list(d.values(2)) == expected_values * 2
assert list(d.items(2)) == expected_items * 2
class _MutableMultiDictTests:
storage_class: t.Type["ds.MultiDict"]
def test_pickle(self):
cls = self.storage_class
def create_instance(module=None):
if module is None:
d = cls()
else:
old = cls.__module__
cls.__module__ = module
d = cls()
cls.__module__ = old
d.setlist(b"foo", [1, 2, 3, 4])
d.setlist(b"bar", b"foo bar baz".split())
return d
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = create_instance()
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
assert type(ud) == type(d)
assert ud == d
alternative = pickle.dumps(create_instance("werkzeug"), protocol)
assert pickle.loads(alternative) == d
ud[b"newkey"] = b"bla"
assert ud != d
def test_multidict_dict_interop(self):
# https://github.com/pallets/werkzeug/pull/2043
md = self.storage_class([("a", 1), ("a", 2)])
assert dict(md)["a"] != [1, 2]
assert dict(md)["a"] == 1
assert dict(md) == {**md} == {"a": 1}
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [
("a", 1),
("b", 2),
("a", 2),
("d", 3),
("a", 1),
("a", 3),
("d", 4),
("c", 3),
]
md = self.storage_class(mapping)
# simple getitem gives the first value
assert md["a"] == 1
assert md["c"] == 3
with pytest.raises(KeyError):
md["e"]
assert md.get("a") == 1
# list getitem
assert md.getlist("a") == [1, 2, 1, 3]
assert md.getlist("d") == [3, 4]
# do not raise if key not found
assert md.getlist("x") == []
# simple setitem overwrites all values
md["a"] = 42
assert md.getlist("a") == [42]
# list setitem
md.setlist("a", [1, 2, 3])
assert md["a"] == 1
assert md.getlist("a") == [1, 2, 3]
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist("a", l1)
del l1[:]
assert md["a"] == 1
# setdefault, setlistdefault
assert md.setdefault("u", 23) == 23
assert md.getlist("u") == [23]
del md["u"]
md.setlist("u", [-1, -2])
# delitem
del md["u"]
with pytest.raises(KeyError):
md["u"]
del md["d"]
assert md.getlist("d") == []
# keys, values, items, lists
assert list(sorted(md.keys())) == ["a", "b", "c"]
assert list(sorted(md.keys())) == ["a", "b", "c"]
assert list(sorted(md.values())) == [1, 2, 3]
assert list(sorted(md.values())) == [1, 2, 3]
assert list(sorted(md.items())) == [("a", 1), ("b", 2), ("c", 3)]
assert list(sorted(md.items(multi=True))) == [
("a", 1),
("a", 2),
("a", 3),
("b", 2),
("c", 3),
]
assert list(sorted(md.items())) == [("a", 1), ("b", 2), ("c", 3)]
assert list(sorted(md.items(multi=True))) == [
("a", 1),
("a", 2),
("a", 3),
("b", 2),
("c", 3),
]
assert list(sorted(md.lists())) == [("a", [1, 2, 3]), ("b", [2]), ("c", [3])]
assert list(sorted(md.lists())) == [("a", [1, 2, 3]), ("b", [2]), ("c", [3])]
# copy method
c = md.copy()
assert c["a"] == 1
assert c.getlist("a") == [1, 2, 3]
# copy method 2
c = copy(md)
assert c["a"] == 1
assert c.getlist("a") == [1, 2, 3]
# deepcopy method
c = md.deepcopy()
assert c["a"] == 1
assert c.getlist("a") == [1, 2, 3]
# deepcopy method 2
c = deepcopy(md)
assert c["a"] == 1
assert c.getlist("a") == [1, 2, 3]
# update with a multidict
od = self.storage_class([("a", 4), ("a", 5), ("y", 0)])
md.update(od)
assert md.getlist("a") == [1, 2, 3, 4, 5]
assert md.getlist("y") == [0]
# update with a regular dict
md = c
od = {"a": 4, "y": 0}
md.update(od)
assert md.getlist("a") == [1, 2, 3, 4]
assert md.getlist("y") == [0]
# pop, poplist, popitem, popitemlist
assert md.pop("y") == 0
assert "y" not in md
assert md.poplist("a") == [1, 2, 3, 4]
assert "a" not in md
assert md.poplist("missing") == []
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [("b", 2), ("c", 3)]
popped = md.popitemlist()
assert popped in [("b", [2]), ("c", [3])]
# type conversion
md = self.storage_class({"a": "4", "b": ["2", "3"]})
assert md.get("a", type=int) == 4
assert md.getlist("b", type=int) == [2, 3]
# repr
md = self.storage_class([("a", 1), ("a", 2), ("b", 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add("c", "42")
md.add("c", "23")
assert md.getlist("c") == ["42", "23"]
md.add("c", "blah")
assert md.getlist("c", type=int) == [42, 23]
# setdefault
md = self.storage_class()
md.setdefault("x", []).append(42)
md.setdefault("x", []).append(23)
assert md["x"] == [42, 23]
# to dict
md = self.storage_class()
md["foo"] = 42
md.add("bar", 1)
md.add("bar", 2)
assert md.to_dict() == {"foo": 42, "bar": 1}
assert md.to_dict(flat=False) == {"foo": [42], "bar": [1, 2]}
# popitem from empty dict
with pytest.raises(KeyError):
self.storage_class().popitem()
with pytest.raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with pytest.raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md["foo"] = 42
md.setlist("foo", [1, 2])
assert md.getlist("foo") == [1, 2]
class _ImmutableDictTests:
storage_class: t.Type[dict]
def test_follows_dict_interface(self):
cls = self.storage_class
data = {"foo": 1, "bar": 2, "baz": 3}
d = cls(data)
assert d["foo"] == 1
assert d["bar"] == 2
assert d["baz"] == 3
assert sorted(d.keys()) == ["bar", "baz", "foo"]
assert "foo" in d
assert "foox" not in d
assert len(d) == 3
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({"a": 1})
with pytest.raises(TypeError):
immutable.pop("a")
mutable = immutable.copy()
mutable.pop("a")
assert "a" in immutable
assert mutable is not immutable
assert copy(immutable) is immutable
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({"a": 1, "b": 2})
immutable2 = cls({"a": 2, "b": 2})
x = {immutable}
assert immutable in x
assert immutable2 not in x
x.discard(immutable)
assert immutable not in x
assert immutable2 not in x
x.add(immutable2)
assert immutable not in x
assert immutable2 in x
x.add(immutable)
assert immutable in x
assert immutable2 in x
class TestImmutableTypeConversionDict(_ImmutableDictTests):
storage_class = ds.ImmutableTypeConversionDict
class TestImmutableMultiDict(_ImmutableDictTests):
storage_class = ds.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({"a": [1, 2], "b": 2})
immutable2 = cls({"a": [1], "b": 2})
x = {immutable}
assert immutable in x
assert immutable2 not in x
x.discard(immutable)
assert immutable not in x
assert immutable2 not in x
x.add(immutable2)
assert immutable not in x
assert immutable2 in x
x.add(immutable)
assert immutable in x
assert immutable2 in x
class TestImmutableDict(_ImmutableDictTests):
storage_class = ds.ImmutableDict
class TestImmutableOrderedMultiDict(_ImmutableDictTests):
storage_class = ds.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([("a", 1), ("b", 1), ("a", 2)])
b = self.storage_class([("a", 1), ("a", 2), ("b", 1)])
assert hash(a) != hash(b)
class TestMultiDict(_MutableMultiDictTests):
storage_class = ds.MultiDict
def test_multidict_pop(self):
def make_d():
return self.storage_class({"foo": [1, 2, 3, 4]})
d = make_d()
assert d.pop("foo") == 1
assert not d
d = make_d()
assert d.pop("foo", 32) == 1
assert not d
d = make_d()
assert d.pop("foos", 32) == 32
assert d
with pytest.raises(KeyError):
d.pop("foos")
def test_multidict_pop_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = [("a", "b"), ("a", "c")]
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(KeyError):
md.pop("empty")
def test_multidict_popitem_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = []
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(BadRequestKeyError):
md.popitem()
def test_setlistdefault(self):
md = self.storage_class()
assert md.setlistdefault("u", [-1, -2]) == [-1, -2]
assert md.getlist("u") == [-1, -2]
assert md["u"] == -1
def test_iter_interfaces(self):
mapping = [
("a", 1),
("b", 2),
("a", 2),
("d", 3),
("a", 1),
("a", 3),
("d", 4),
("c", 3),
]
md = self.storage_class(mapping)
assert list(zip(md.keys(), md.listvalues())) == list(md.lists())
assert list(zip(md, md.listvalues())) == list(md.lists())
assert list(zip(md.keys(), md.listvalues())) == list(md.lists())
def test_getitem_raise_badrequestkeyerror_for_empty_list_value(self):
mapping = [("a", "b"), ("a", "c")]
md = self.storage_class(mapping)
md.setlistdefault("empty", [])
with pytest.raises(KeyError):
md["empty"]
class TestOrderedMultiDict(_MutableMultiDictTests):
storage_class = ds.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add("foo", "bar")
assert len(d) == 1
d.add("foo", "baz")
assert len(d) == 1
assert list(d.items()) == [("foo", "bar")]
assert list(d) == ["foo"]
assert list(d.items(multi=True)) == [("foo", "bar"), ("foo", "baz")]
del d["foo"]
assert not d
assert len(d) == 0
assert list(d) == []
d.update([("foo", 1), ("foo", 2), ("bar", 42)])
d.add("foo", 3)
assert d.getlist("foo") == [1, 2, 3]
assert d.getlist("bar") == [42]
assert list(d.items()) == [("foo", 1), ("bar", 42)]
expected = ["foo", "bar"]
assert list(d.keys()) == expected
assert list(d) == expected
assert list(d.keys()) == expected
assert list(d.items(multi=True)) == [
("foo", 1),
("foo", 2),
("bar", 42),
("foo", 3),
]
assert len(d) == 2
assert d.pop("foo") == 1
assert d.pop("blafasel", None) is None
assert d.pop("blafasel", 42) == 42
assert len(d) == 1
assert d.poplist("bar") == [42]
assert not d
assert d.get("missingkey") is None
d.add("foo", 42)
d.add("foo", 23)
d.add("bar", 2)
d.add("foo", 42)
assert d == ds.MultiDict(d)
id = self.storage_class(d)
assert d == id
d.add("foo", 2)
assert d != id
d.update({"blah": [1, 2, 3]})
assert d["blah"] == 1
assert d.getlist("blah") == [1, 2, 3]
# setlist works
d = self.storage_class()
d["foo"] = 42
d.setlist("foo", [1, 2])
assert d.getlist("foo") == [1, 2]
with pytest.raises(BadRequestKeyError):
d.pop("missing")
with pytest.raises(BadRequestKeyError):
d["missing"]
# popping
d = self.storage_class()
d.add("foo", 23)
d.add("foo", 42)
d.add("foo", 1)
assert d.popitem() == ("foo", 23)
with pytest.raises(BadRequestKeyError):
d.popitem()
assert not d
d.add("foo", 23)
d.add("foo", 42)
d.add("foo", 1)
assert d.popitemlist() == ("foo", [23, 42, 1])
with pytest.raises(BadRequestKeyError):
d.popitemlist()
# Unhashable
d = self.storage_class()
d.add("foo", 23)
pytest.raises(TypeError, hash, d)
def test_iterables(self):
a = ds.MultiDict((("key_a", "value_a"),))
b = ds.MultiDict((("key_b", "value_b"),))
ab = ds.CombinedMultiDict((a, b))
assert sorted(ab.lists()) == [("key_a", ["value_a"]), ("key_b", ["value_b"])]
assert sorted(ab.listvalues()) == [["value_a"], ["value_b"]]
assert sorted(ab.keys()) == ["key_a", "key_b"]
assert sorted(ab.lists()) == [("key_a", ["value_a"]), ("key_b", ["value_b"])]
assert sorted(ab.listvalues()) == [["value_a"], ["value_b"]]
assert sorted(ab.keys()) == ["key_a", "key_b"]
def test_get_description(self):
data = ds.OrderedMultiDict()
with pytest.raises(BadRequestKeyError) as exc_info:
data["baz"]
assert "baz" not in exc_info.value.get_description()
exc_info.value.show_exception = True
assert "baz" in exc_info.value.get_description()
with pytest.raises(BadRequestKeyError) as exc_info:
data.pop("baz")
exc_info.value.show_exception = True
assert "baz" in exc_info.value.get_description()
exc_info.value.args = ()
assert "baz" not in exc_info.value.get_description()
class TestTypeConversionDict:
storage_class = ds.TypeConversionDict
def test_value_conversion(self):
d = self.storage_class(foo="1")
assert d.get("foo", type=int) == 1
def test_return_default_when_conversion_is_not_possible(self):
d = self.storage_class(foo="bar")
assert d.get("foo", default=-1, type=int) == -1
def test_propagate_exceptions_in_conversion(self):
d = self.storage_class(foo="bar")
switch = {"a": 1}
with pytest.raises(KeyError):
d.get("foo", type=lambda x: switch[x])
class TestCombinedMultiDict:
storage_class = ds.CombinedMultiDict
def test_basic_interface(self):
d1 = ds.MultiDict([("foo", "1")])
d2 = ds.MultiDict([("bar", "2"), ("bar", "3")])
d = self.storage_class([d1, d2])
# lookup
assert d["foo"] == "1"
assert d["bar"] == "2"
assert d.getlist("bar") == ["2", "3"]
assert sorted(d.items()) == [("bar", "2"), ("foo", "1")]
assert sorted(d.items(multi=True)) == [("bar", "2"), ("bar", "3"), ("foo", "1")]
assert "missingkey" not in d
assert "foo" in d
# type lookup
assert d.get("foo", type=int) == 1
assert d.getlist("bar", type=int) == [2, 3]
# get key errors for missing stuff
with pytest.raises(KeyError):
d["missing"]
# make sure that they are immutable
with pytest.raises(TypeError):
d["foo"] = "blub"
# copies are mutable
d = d.copy()
d["foo"] = "blub"
# make sure lists merges
md1 = ds.MultiDict((("foo", "bar"), ("foo", "baz")))
md2 = ds.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
assert list(x.lists()) == [("foo", ["bar", "baz", "blafasel"])]
# make sure dicts are created properly
assert x.to_dict() == {"foo": "bar"}
assert x.to_dict(flat=False) == {"foo": ["bar", "baz", "blafasel"]}
def test_length(self):
d1 = ds.MultiDict([("foo", "1")])
d2 = ds.MultiDict([("bar", "2")])
assert len(d1) == len(d2) == 1
d = self.storage_class([d1, d2])
assert len(d) == 2
d1.clear()
assert len(d1) == 0
assert len(d) == 1
class TestHeaders:
storage_class = ds.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add("Content-Type", "text/plain")
headers.add("X-Foo", "bar")
assert "x-Foo" in headers
assert "Content-type" in headers
with pytest.raises(ValueError):
headers.add("X-Example", "foo\r\n bar")
headers["Content-Type"] = "foo/bar"
assert headers["Content-Type"] == "foo/bar"
assert len(headers.getlist("Content-Type")) == 1
# list conversion
assert headers.to_wsgi_list() == [("Content-Type", "foo/bar"), ("X-Foo", "bar")]
assert str(headers) == "Content-Type: foo/bar\r\nX-Foo: bar\r\n\r\n"
assert str(self.storage_class()) == "\r\n"
# extended add
headers.add("Content-Disposition", "attachment", filename="foo")
assert headers["Content-Disposition"] == "attachment; filename=foo"
headers.add("x", "y", z='"')
assert headers["x"] == r'y; z="\""'
# string conversion
headers.add("a", 1)
assert headers["a"] == "1"
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class(
[
("Content-Type", "text/plain"),
("X-Foo", "bar"),
("X-Bar", "1"),
("X-Bar", "2"),
]
)
assert headers.getlist("x-bar") == ["1", "2"]
assert headers.get("x-Bar") == "1"
assert headers.get("Content-Type") == "text/plain"
assert headers.setdefault("X-Foo", "nope") == "bar"
assert headers.setdefault("X-Bar", "nope") == "1"
assert headers.setdefault("X-Baz", "quux") == "quux"
assert headers.setdefault("X-Baz", "nope") == "quux"
headers.pop("X-Baz")
# newlines are not allowed in values
with pytest.raises(ValueError):
self.storage_class([("X-Example", "foo\r\n bar")])
# type conversion
assert headers.get("x-bar", type=int) == 1
assert headers.getlist("x-bar", type=int) == [1, 2]
# list like operations
assert headers[0] == ("Content-Type", "text/plain")
assert headers[:1] == self.storage_class([("Content-Type", "text/plain")])
del headers[:2]
del headers[-1]
assert headers == self.storage_class([("X-Bar", "1")])
def test_copying(self):
a = self.storage_class([("foo", "bar")])
b = a.copy()
a.add("foo", "baz")
assert a.getlist("foo") == ["bar", "baz"]
assert b.getlist("foo") == ["bar"]
def test_popping(self):
headers = self.storage_class([("a", 1)])
# headers object expect string values. If a non string value
# is passed, it tries converting it to a string
assert headers.pop("a") == "1"
assert headers.pop("b", "2") == "2"
with pytest.raises(KeyError):
headers.pop("c")
def test_set_arguments(self):
a = self.storage_class()
a.set("Content-Disposition", "useless")
a.set("Content-Disposition", "attachment", filename="foo")
assert a["Content-Disposition"] == "attachment; filename=foo"
def test_reject_newlines(self):
h = self.storage_class()
for variation in "foo\nbar", "foo\r\nbar", "foo\rbar":
with pytest.raises(ValueError):
h["foo"] = variation
with pytest.raises(ValueError):
h.add("foo", variation)
with pytest.raises(ValueError):
h.add("foo", "test", option=variation)
with pytest.raises(ValueError):
h.set("foo", variation)
with pytest.raises(ValueError):
h.set("foo", "test", option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set("X-Foo-Poo", "bleh")
h.set("Content-Type", "application/whocares")
h.set("X-Forwarded-For", "192.168.0.123")
h[:] = [(k, v) for k, v in h if k.startswith("X-")]
assert list(h) == [("X-Foo-Poo", "bleh"), ("X-Forwarded-For", "192.168.0.123")]
def test_bytes_operations(self):
h = self.storage_class()
h.set("X-Foo-Poo", "bleh")
h.set("X-Whoops", b"\xff")
h.set(b"X-Bytes", b"something")
assert h.get("x-foo-poo", as_bytes=True) == b"bleh"
assert h.get("x-whoops", as_bytes=True) == b"\xff"
assert h.get("x-bytes") == "something"
def test_extend(self):
h = self.storage_class([("a", "0"), ("b", "1"), ("c", "2")])
h.extend(ds.Headers([("a", "3"), ("a", "4")]))
assert h.getlist("a") == ["0", "3", "4"]
h.extend(b=["5", "6"])
assert h.getlist("b") == ["1", "5", "6"]
h.extend({"c": "7", "d": ["8", "9"]}, c="10")
assert h.getlist("c") == ["2", "7", "10"]
assert h.getlist("d") == ["8", "9"]
with pytest.raises(TypeError):
h.extend({"x": "x"}, {"x": "x"})
def test_update(self):
h = self.storage_class([("a", "0"), ("b", "1"), ("c", "2")])
h.update(ds.Headers([("a", "3"), ("a", "4")]))
assert h.getlist("a") == ["3", "4"]
h.update(b=["5", "6"])
assert h.getlist("b") == ["5", "6"]
h.update({"c": "7", "d": ["8", "9"]})
assert h.getlist("c") == ["7"]
assert h.getlist("d") == ["8", "9"]
h.update({"c": "10"}, c="11")
assert h.getlist("c") == ["11"]
with pytest.raises(TypeError):
h.extend({"x": "x"}, {"x": "x"})
def test_setlist(self):
h = self.storage_class([("a", "0"), ("b", "1"), ("c", "2")])
h.setlist("b", ["3", "4"])
assert h[1] == ("b", "3")
assert h[-1] == ("b", "4")
h.setlist("b", [])
assert "b" not in h
h.setlist("d", ["5"])
assert h["d"] == "5"
def test_setlistdefault(self):
h = self.storage_class([("a", "0"), ("b", "1"), ("c", "2")])
assert h.setlistdefault("a", ["3"]) == ["0"]
assert h.setlistdefault("d", ["4", "5"]) == ["4", "5"]
def test_to_wsgi_list(self):
h = self.storage_class()
h.set("Key", "Value")
for key, value in h.to_wsgi_list():
assert key == "Key"
assert value == "Value"
def test_to_wsgi_list_bytes(self):
h = self.storage_class()
h.set(b"Key", b"Value")
for key, value in h.to_wsgi_list():
assert key == "Key"
assert value == "Value"
def test_equality(self):
# test equality, given keys are case insensitive
h1 = self.storage_class()
h1.add("X-Foo", "foo")
h1.add("X-Bar", "bah")
h1.add("X-Bar", "humbug")
h2 = self.storage_class()
h2.add("x-foo", "foo")
h2.add("x-bar", "bah")
h2.add("x-bar", "humbug")
assert h1 == h2
class TestEnvironHeaders:
storage_class = ds.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
"HTTP_CONTENT_TYPE": "text/html",
"CONTENT_TYPE": "text/html",
"HTTP_CONTENT_LENGTH": "0",
"CONTENT_LENGTH": "0",
"HTTP_ACCEPT": "*",
"wsgi.version": (1, 0),
}
headers = self.storage_class(broken_env)
assert headers
assert len(headers) == 3
assert sorted(headers) == [
("Accept", "*"),
("Content-Length", "0"),
("Content-Type", "text/html"),
]
assert not self.storage_class({"wsgi.version": (1, 0)})
assert len(self.storage_class({"wsgi.version": (1, 0)})) == 0
assert 42 not in headers
def test_skip_empty_special_vars(self):
env = {"HTTP_X_FOO": "42", "CONTENT_TYPE": "", "CONTENT_LENGTH": ""}
headers = self.storage_class(env)
assert dict(headers) == {"X-Foo": "42"}
env = {"HTTP_X_FOO": "42", "CONTENT_TYPE": "", "CONTENT_LENGTH": "0"}
headers = self.storage_class(env)
assert dict(headers) == {"X-Foo": "42", "Content-Length": "0"}
def test_return_type_is_str(self):
headers = self.storage_class({"HTTP_FOO": "\xe2\x9c\x93"})
assert headers["Foo"] == "\xe2\x9c\x93"
assert next(iter(headers)) == ("Foo", "\xe2\x9c\x93")
def test_bytes_operations(self):
foo_val = "\xff"
h = self.storage_class({"HTTP_X_FOO": foo_val})
assert h.get("x-foo", as_bytes=True) == b"\xff"
assert h.get("x-foo") == "\xff"
class TestHeaderSet:
storage_class = ds.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add("foo")
hs.add("bar")
assert "Bar" in hs
assert hs.find("foo") == 0
assert hs.find("BAR") == 1
assert hs.find("baz") < 0
hs.discard("missing")
hs.discard("foo")
assert hs.find("foo") < 0
assert hs.find("bar") == 0
with pytest.raises(IndexError):
hs.index("missing")
assert hs.index("bar") == 0
assert hs
hs.clear()
assert not hs
class TestImmutableList:
storage_class = ds.ImmutableList
def test_list_hashable(self):
data = (1, 2, 3, 4)
store = self.storage_class(data)
assert hash(data) == hash(store)
assert data != store
def make_call_asserter(func=None):
"""Utility to assert a certain number of function calls.
:param func: Additional callback for each function call.
.. code-block:: python
assert_calls, func = make_call_asserter()
with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert calls[0] == count
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class TestCallbackDict:
storage_class = ds.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter()
initial = {"a": "foo", "b": "bar"}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, "callback triggered by read-only method"):
# read-only methods
dct["a"]
dct.get("a")
pytest.raises(KeyError, lambda: dct["x"])
assert "a" in dct
list(iter(dct))
dct.copy()
with assert_calls(0, "callback triggered without modification"):
# methods that may write but don't
dct.pop("z", None)
dct.setdefault("a")
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter()
initial = {"a": "foo", "b": "bar"}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, "callback not triggered by write method"):
# always-write methods
dct["z"] = 123
dct["z"] = 123 # must trigger again
del dct["z"]
dct.pop("b", None)
dct.setdefault("x")
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, "callback triggered by failed del"):
pytest.raises(KeyError, lambda: dct.__delitem__("x"))
with assert_calls(0, "callback triggered by failed pop"):
pytest.raises(KeyError, lambda: dct.pop("x"))
class TestCacheControl:
def test_repr(self):
cc = ds.RequestCacheControl([("max-age", "0"), ("private", "True")])
assert repr(cc) == "<RequestCacheControl max-age='0' private='True'>"
def test_set_none(self):
cc = ds.ResponseCacheControl([("max-age", "0")])
assert cc.no_cache is None
cc.no_cache = None
assert cc.no_cache is None
class TestContentSecurityPolicy:
def test_construct(self):
csp = ds.ContentSecurityPolicy([("font-src", "'self'"), ("media-src", "*")])
assert csp.font_src == "'self'"
assert csp.media_src == "*"
policies = [policy.strip() for policy in csp.to_header().split(";")]
assert "font-src 'self'" in policies
assert "media-src *" in policies
def test_properties(self):
csp = ds.ContentSecurityPolicy()
csp.default_src = "* 'self' quart.com"
csp.img_src = "'none'"
policies = [policy.strip() for policy in csp.to_header().split(";")]
assert "default-src * 'self' quart.com" in policies
assert "img-src 'none'" in policies
class TestAccept:
storage_class = ds.Accept
def test_accept_basic(self):
accept = self.storage_class(
[("tinker", 0), ("tailor", 0.333), ("soldier", 0.667), ("sailor", 1)]
)
# check __getitem__ on indices
assert accept[3] == ("tinker", 0)
assert accept[2] == ("tailor", 0.333)
assert accept[1] == ("soldier", 0.667)
assert accept[0], ("sailor", 1)
# check __getitem__ on string
assert accept["tinker"] == 0
assert accept["tailor"] == 0.333
assert accept["soldier"] == 0.667
assert accept["sailor"] == 1
assert accept["spy"] == 0
# check quality method
assert accept.quality("tinker") == 0
assert accept.quality("tailor") == 0.333
assert accept.quality("soldier") == 0.667
assert accept.quality("sailor") == 1
assert accept.quality("spy") == 0
# check __contains__
assert "sailor" in accept
assert "spy" not in accept
# check index method
assert accept.index("tinker") == 3
assert accept.index("tailor") == 2
assert accept.index("soldier") == 1
assert accept.index("sailor") == 0
with pytest.raises(ValueError):
accept.index("spy")
# check find method
assert accept.find("tinker") == 3
assert accept.find("tailor") == 2
assert accept.find("soldier") == 1
assert accept.find("sailor") == 0
assert accept.find("spy") == -1
# check to_header method
assert accept.to_header() == "sailor,soldier;q=0.667,tailor;q=0.333,tinker;q=0"
# check best_match method
assert (
accept.best_match(["tinker", "tailor", "soldier", "sailor"], default=None)
== "sailor"
)
assert (
accept.best_match(["tinker", "tailor", "soldier"], default=None)
== "soldier"
)
assert accept.best_match(["tinker", "tailor"], default=None) == "tailor"
assert accept.best_match(["tinker"], default=None) is None
assert accept.best_match(["tinker"], default="x") == "x"
def test_accept_wildcard(self):
accept = self.storage_class([("*", 0), ("asterisk", 1)])
assert "*" in accept
assert accept.best_match(["asterisk", "star"], default=None) == "asterisk"
assert accept.best_match(["star"], default=None) is None
def test_accept_keep_order(self):
accept = self.storage_class([("*", 1)])
assert accept.best_match(["alice", "bob"]) == "alice"
assert accept.best_match(["bob", "alice"]) == "bob"
accept = self.storage_class([("alice", 1), ("bob", 1)])
assert accept.best_match(["alice", "bob"]) == "alice"
assert accept.best_match(["bob", "alice"]) == "bob"
def test_accept_wildcard_specificity(self):
accept = self.storage_class([("asterisk", 0), ("star", 0.5), ("*", 1)])
assert accept.best_match(["star", "asterisk"], default=None) == "star"
assert accept.best_match(["asterisk", "star"], default=None) == "star"
assert accept.best_match(["asterisk", "times"], default=None) == "times"
assert accept.best_match(["asterisk"], default=None) is None
def test_accept_equal_quality(self):
accept = self.storage_class([("a", 1), ("b", 1)])
assert accept.best == "a"
class TestMIMEAccept:
@pytest.mark.parametrize(
("values", "matches", "default", "expect"),
[
([("text/*", 1)], ["text/html"], None, "text/html"),
([("text/*", 1)], ["image/png"], "text/plain", "text/plain"),
([("text/*", 1)], ["image/png"], None, None),
(
[("*/*", 1), ("text/html", 1)],
["image/png", "text/html"],
None,
"text/html",
),
(
[("*/*", 1), ("text/html", 1)],
["image/png", "text/plain"],
None,
"image/png",
),
(
[("*/*", 1), ("text/html", 1), ("image/*", 1)],
["image/png", "text/html"],
None,
"text/html",
),
(
[("*/*", 1), ("text/html", 1), ("image/*", 1)],
["text/plain", "image/png"],
None,
"image/png",
),
(
[("text/html", 1), ("text/html; level=1", 1)],
["text/html;level=1"],
None,
"text/html;level=1",
),
],
)
def test_mime_accept(self, values, matches, default, expect):
accept = ds.MIMEAccept(values)
match = accept.best_match(matches, default=default)
assert match == expect
class TestLanguageAccept:
@pytest.mark.parametrize(
("values", "matches", "default", "expect"),
(
([("en-us", 1)], ["en"], None, "en"),
([("en", 1)], ["en_US"], None, "en_US"),
([("en-GB", 1)], ["en-US"], None, None),
([("de_AT", 1), ("de", 0.9)], ["en"], None, None),
([("de_AT", 1), ("de", 0.9), ("en-US", 0.8)], ["de", "en"], None, "de"),
([("de_AT", 0.9), ("en-US", 1)], ["en"], None, "en"),
([("en-us", 1)], ["en-us"], None, "en-us"),
([("en-us", 1)], ["en-us", "en"], None, "en-us"),
([("en-GB", 1)], ["en-US", "en"], "en-US", "en"),
([("de_AT", 1)], ["en-US", "en"], "en-US", "en-US"),
([("aus-EN", 1)], ["aus"], None, "aus"),
([("aus", 1)], ["aus-EN"], None, "aus-EN"),
),
)
def test_best_match_fallback(self, values, matches, default, expect):
accept = ds.LanguageAccept(values)
best = accept.best_match(matches, default=default)
assert best == expect
class TestFileStorage:
storage_class = ds.FileStorage
def test_mimetype_always_lowercase(self):
file_storage = self.storage_class(content_type="APPLICATION/JSON")
assert file_storage.mimetype == "application/json"
@pytest.mark.parametrize("data", [io.StringIO("one\ntwo"), io.BytesIO(b"one\ntwo")])
def test_bytes_proper_sentinel(self, data):
# iterate over new lines and don't enter an infinite loop
storage = self.storage_class(data)
idx = -1
for idx, _line in enumerate(storage):
assert idx < 2
assert idx == 1
@pytest.mark.parametrize("stream", (tempfile.SpooledTemporaryFile, io.BytesIO))
def test_proxy_can_access_stream_attrs(self, stream):
"""``SpooledTemporaryFile`` doesn't implement some of
``IOBase``. Ensure that ``FileStorage`` can still access the
attributes from the backing file object.
https://github.com/pallets/werkzeug/issues/1344
https://github.com/python/cpython/pull/3249
"""
file_storage = self.storage_class(stream=stream())
for name in ("fileno", "writable", "readable", "seekable"):
assert hasattr(file_storage, name)
def test_save_to_pathlib_dst(self, tmp_path):
src = tmp_path / "src.txt"
src.write_text("test")
dst = tmp_path / "dst.txt"
with src.open("rb") as f:
storage = self.storage_class(f)
storage.save(dst)
assert dst.read_text() == "test"
def test_save_to_bytes_io(self):
storage = self.storage_class(io.BytesIO(b"one\ntwo"))
dst = io.BytesIO()
storage.save(dst)
assert dst.getvalue() == b"one\ntwo"
def test_save_to_file(self, tmp_path):
path = tmp_path / "file.data"
storage = self.storage_class(io.BytesIO(b"one\ntwo"))
with path.open("wb") as dst:
storage.save(dst)
with path.open("rb") as src:
assert src.read() == b"one\ntwo"
@pytest.mark.parametrize("ranges", ([(0, 1), (-5, None)], [(5, None)]))
def test_range_to_header(ranges):
header = ds.Range("byes", ranges).to_header()
r = http.parse_range_header(header)
assert r.ranges == ranges
@pytest.mark.parametrize(
"ranges", ([(0, 0)], [(None, 1)], [(1, 0)], [(0, 1), (-5, 10)])
)
def test_range_validates_ranges(ranges):
with pytest.raises(ValueError):
ds.Range("bytes", ranges)
|
pallets/werkzeug
|
tests/test_datastructures.py
|
Python
|
bsd-3-clause
| 39,436
|
[
"TINKER"
] |
d96f4fe973f781eb660c69695e4808bf4729d762dd5323af3b29c43372af54c7
|
from eventlet import hubs
from eventlet.support import greenlets as greenlet
__all__ = ['Event']
class NOT_USED:
def __repr__(self):
return 'NOT_USED'
NOT_USED = NOT_USED()
class Event(object):
"""An abstraction where an arbitrary number of coroutines
can wait for one event from another.
Events are similar to a Queue that can only hold one item, but differ
in two important ways:
1. calling :meth:`send` never unschedules the current greenthread
2. :meth:`send` can only be called once; create a new event to send again.
They are good for communicating results between coroutines, and
are the basis for how
:meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
is implemented.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def baz(b):
... evt.send(b + 1)
...
>>> _ = eventlet.spawn_n(baz, 3)
>>> evt.wait()
4
"""
_result = None
_exc = None
def __init__(self):
self._waiters = set()
self.reset()
def __str__(self):
params = (self.__class__.__name__, hex(id(self)),
self._result, self._exc, len(self._waiters))
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
def reset(self):
# this is kind of a misfeature and doesn't work perfectly well,
# it's better to create a new event rather than reset an old one
# removing documentation so that we don't get new use cases for it
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
self._result = NOT_USED
self._exc = None
def ready(self):
""" Return true if the :meth:`wait` call will return immediately.
Used to avoid waiting for things that might take a while to time out.
For example, you can put a bunch of events into a list, and then visit
them all repeatedly, calling :meth:`ready` until one returns ``True``,
and then you can :meth:`wait` on that one."""
return self._result is not NOT_USED
def has_exception(self):
return self._exc is not None
def has_result(self):
return self._result is not NOT_USED and self._exc is None
def poll(self, notready=None):
if self.ready():
return self.wait()
return notready
# QQQ make it return tuple (type, value, tb) instead of raising
# because
# 1) "poll" does not imply raising
# 2) it's better not to screw up caller's sys.exc_info() by default
# (e.g. if caller wants to calls the function in except or finally)
def poll_exception(self, notready=None):
if self.has_exception():
return self.wait()
return notready
def poll_result(self, notready=None):
if self.has_result():
return self.wait()
return notready
def wait(self):
"""Wait until another coroutine calls :meth:`send`.
Returns the value the other coroutine passed to
:meth:`send`.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def wait_on():
... retval = evt.wait()
... print "waited for", retval
>>> _ = eventlet.spawn(wait_on)
>>> evt.send('result')
>>> eventlet.sleep(0)
waited for result
Returns immediately if the event has already
occured.
>>> evt.wait()
'result'
"""
current = greenlet.getcurrent()
if self._result is NOT_USED:
self._waiters.add(current)
try:
return hubs.get_hub().switch()
finally:
self._waiters.discard(current)
if self._exc is not None:
current.throw(*self._exc)
return self._result
def send(self, result=None, exc=None):
"""Makes arrangements for the waiters to be woken with the
result and then returns immediately to the parent.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def waiter():
... print 'about to wait'
... result = evt.wait()
... print 'waited for', result
>>> _ = eventlet.spawn(waiter)
>>> eventlet.sleep(0)
about to wait
>>> evt.send('a')
>>> eventlet.sleep(0)
waited for a
It is an error to call :meth:`send` multiple times on the same event.
>>> evt.send('whoops')
Traceback (most recent call last):
...
AssertionError: Trying to re-send() an already-triggered event.
Use :meth:`reset` between :meth:`send` s to reuse an event object.
"""
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
self._result = result
if exc is not None and not isinstance(exc, tuple):
exc = (exc, )
self._exc = exc
hub = hubs.get_hub()
if self._waiters:
hub.schedule_call_global(
0, self._do_send, self._result, self._exc, self._waiters.copy())
def _do_send(self, result, exc, waiters):
while waiters:
waiter = waiters.pop()
if waiter in self._waiters:
if exc is None:
waiter.switch(result)
else:
waiter.throw(*exc)
def send_exception(self, *args):
"""Same as :meth:`send`, but sends an exception to waiters."""
# the arguments and the same as for greenlet.throw
return self.send(None, args)
|
JeremyGrosser/python-eventlet
|
eventlet/event.py
|
Python
|
mit
| 5,664
|
[
"VisIt"
] |
b3b91a2fd1f9abf1487e866bc9f1854d5b499f688125ced2064128cf564bb468
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# .. _salt-fingering-example:
#
# .. py:currentmodule:: dolfin_adjoint
#
# Generalised stability analysis of double-diffusive salt fingering
# =================================================================
#
# .. sectionauthor:: Patrick E. Farrell <patrick.farrell@maths.ox.ac.uk>
#
# This demo solves example 4.2 of :cite:`farrell2012c`.
#
# Background
# **********
#
# In the ocean, the diffusivity coefficient of temperature is approximately two
# orders of magnitude larger than the diffusivity coefficient of salinity.
# Suppose warm salty water lies above colder, less salty water. If a parcel of
# warm salty water sinks downwards into the colder region, the heat of the
# parcel will diffuse away much faster than its salt, thus making the parcel
# denser, and causing it to sink further. Similarly, if a parcel of cold, less
# salty water rises into the warmer region, it will gain heat from its
# surroundings much faster than it will gain salinity, making the parcel more
# buoyant. This phenomenon is referred to as ''salt fingering''
# :cite:`stern1960` and has been observed in many real-world oceanographic
# contexts :cite:`turner1985`.
#
# Ozgokmen and Esenkov :cite:`ozgokmen1998b` used a numerical model to
# investigate asymmetry in the growth of salt fingers caused by nonlinearities
# in the equation of state. In this work, we investigate the stability of the
# proposed configuration to small perturbations. Generalised stability theory
# is an extension of asymptotic linear stability theory to finite time horizons,
# and requires computing the singular value decomposition of the model
# *propagator*, whose action requires the solution of the tangent linear and
# adjoint models.
#
# Problem definition
# ******************
#
# The equations describing the system are the two-dimensional
# vorticity-streamfunction formulation of the time-dependent Navier--Stokes
# equations, coupled to two advection equations for temperature and salinity:
#
# .. math::
# \frac{\partial \zeta}{\partial t} + \nabla^{\perp} \psi \cdot \nabla \zeta &= \frac{\textrm{Ra}}{\textrm{Pr}}\left(\frac{\partial T}{\partial x} - \frac{1}{R_{\rho}^0} \frac{\partial S}{\partial x}\right) + \nabla^2 \zeta, \\
# \frac{\partial T}{\partial t} + \nabla^{\perp} \psi \cdot \nabla T &= \frac{1}{\textrm{Pr}} \nabla^2 T, \\
# \frac{\partial S}{\partial t} + \nabla^{\perp} \psi \cdot \nabla S &= \frac{1}{\textrm{Sc}} \nabla^2 S, \\
# \nabla^2 \psi &= \zeta,
#
# where :math:`\zeta` is the vorticity, :math:`\psi` is the streamfunction,
# :math:`T` is the temperature, :math:`S` is the salinity, and :math:`\textrm{Ra}`,
# :math:`\textrm{Sc}`, :math:`\textrm{Pr}` and :math:`{R_{\rho}^0}` are nondimensional parameters.
# Periodic boundary conditions are applied on the left and right boundaries.
# The configuration consists of two well-mixed layers (i.e., of homogeneous
# temperature and salinity) separated by an interface. To activate the
# instability, :cite:`ozgokmen1998b` add a sinusoidal perturbation to the initial
# salinity field.
#
# Implementation
# **************
#
# We start our implementation by importing the :py:mod:`dolfin` and
# :py:mod:`dolfin_adjoint` modules
from dolfin import *
from dolfin_adjoint import *
# Next we create a 50 x 50 regular mesh of the rectangle :math:`[0, 1] \times
# [0, 2]`. This mesh is quite coarse so that the demo runs in approximately ten
# minutes; for production computations, this might be run at 300 x 300 or 500 x
# 500.
mesh = RectangleMesh(0, 0, 1, 2, 50, 50)
# Computing the singular value decomposition of the propagator requires many
# actions of the propagator, the operator that maps perturbations in the input
# to perturbations in the output at some finite time later. (The propagator is
# typically dense, and so the SVD is computed matrix-free.) Each action requires
# the solution of the tangent linear and adjoint systems. Since the same
# equations are solved over and over for each action, dolfin-adjoint can
# optionally cache the LU factorizations to greatly speed up subsequent
# propagator actions.
parameters["adjoint"]["cache_factorizations"] = True
# Here we enforce the periodic boundary conditions that map the right-hand
# boundary to the left-hand boundary. The :py:func:`inside` function indicates
# which boundary is to be mapped *to* (here the left); the :py:func:`map`
# function maps from the right-hand boundary to the left-hand boundary.
class PeriodicBoundary(SubDomain):
def inside(self, x, on_boundary):
return x[0] == 0.0 and on_boundary
def map(self, x, y):
y[0] = x[0] - 1
y[1] = x[1]
pbc = PeriodicBoundary()
# Now we declare our function spaces. Since the vorticity-streamfunction
# formulation no longer has a divergence constraint, we can use piecewise linear
# Galerkin finite elements for every prognostic field, without concern for
# inf-sup stability conditions.
V = FunctionSpace(mesh, "CG", 1, constrained_domain=pbc)
P = FunctionSpace(mesh, "CG", 1, constrained_domain=pbc)
T = FunctionSpace(mesh, "CG", 1, constrained_domain=pbc)
S = FunctionSpace(mesh, "CG", 1, constrained_domain=pbc)
Z = MixedFunctionSpace([V, P, T, S])
# We impose that the streamfunction is zero on the top and bottom.
streamfunction_bc_top = DirichletBC(Z.sub(1), 0.0, "on_boundary && near(x[1], 2.0)")
streamfunction_bc_bot = DirichletBC(Z.sub(1), 0.0, "on_boundary && near(x[1], 0.0)")
bcs = [streamfunction_bc_top, streamfunction_bc_bot]
# Set parameters for the timestepping (implicit midpoint) and
# values of the nondimensional parameters.
dt = Constant(0.001)
endT = 0.05
theta = 0.5
Ra = Constant(1*10**6)
Pr = Constant(7)
Sc = Constant(700)
Rrho = Constant(1.8)
# Now we configure the initial conditions of :cite:`ozgokmen1998b`.
# Since we want to investigate the stability of perturbations to
# salinity, we will configure the model so that it propagates a
# scalar field called "InitialSalinity" to a scalar field called
# "FinalSalinity". Therefore the steps involved in setting up the
# initial condition are:
#
# 1. Project the initial salinity field to the salinity function space
# 2. Project that field and the initial conditions for vorticity and
# temperature into the mixed function space, while simultaneously
# solving for the streamfunction.
def get_ic():
class InitialSalinity(Expression):
def eval(self, values, x):
# salinity initial condition: salty on top, fresh on the bottom, and a wavy
# interface in between
if x[1] > 1.1 + 0.016*cos(10*pi*x[0]):
values[0] = 1.0
elif x[1] < 0.9 + 0.016*cos(10*pi*x[0]):
values[0] = 0.0
else:
values[0] = 5*(x[1]-0.016*cos(10*pi*x[0])) - 4.5
class InitialTemperature(Expression):
def eval(self, values, x):
# temperature initial condition: warm on top, cool on bottom
if x[1] > 1.1:
values[0] = 1.0
elif x[1] < 0.9:
values[0] = 0.0
else:
values[0] = 5*x[1] - 4.5
salinity_ic = interpolate(InitialSalinity(), S, name="InitialSalinity")
zeta = Constant(0) # initially at rest
t = InitialTemperature()
s = salinity_ic
z_test = TestFunction(Z)
(zeta_test, p_test, t_test, s_test) = split(z_test)
z = Function(Z, name="State")
(zeta_trial, p_trial, t_trial, s_trial) = split(z)
# project zeta, t, s; solve for the streamfunction p
a = (inner(zeta_test, zeta_trial)*dx +
inner(t_test, t_trial)*dx +
inner(s_test, s_trial)*dx +
inner(grad(p_test), grad(p_trial))*dx)
L = (inner(zeta_test, zeta)*dx +
inner(t_test, t)*dx +
inner(s_test, s)*dx -
inner(p_test, zeta)*dx)
F = a - L
solve(F == 0, z, bcs, solver_parameters={"newton_solver": {"linear_solver": "lu"}})
return z
#
# .. image:: salinity-ic.png
# :scale: 100
# :align: center
# Finally, once we have the mixed function state (zeta, p, t, s) at the end of
# the run, we need to project out the salinity. dolfin-adjoint considers whole
# functions, not parts of mixed function spaces, and hence the final salinity
# component must be projected to the salinity space to ensure that the model is
# seen as a map from the initial salinity to the final salinity.
def project_salinity(z_final):
s = project(split(z_final)[-1], S, name="FinalSalinity")
return s
# The main loop of the forward model. Compute the initial conditions, advance
# the equations forward in time, and then compute the final salinity.
def main():
# This function takes the theta-weighted average of the old
# and new values at a timestep. This is used in the timestepping
# later.
def cn(old, new):
return (1-theta)*old + theta*new
# Define the :math:`\nabla^\perp` operator (the 2D equivalent of
# the cross product) and advection flux operators.
def grad_perp(field):
x = grad(field)
return as_vector([-x[1], x[0]])
def J(test, stream, tracer):
return -inner(grad(test), tracer*(grad_perp(stream)))*dx
z_old = get_ic()
(zeta_old, p_old, t_old, s_old) = split(z_old)
store(z_old, time=0.0)
z_test = TestFunction(Z)
(zeta_test, p_test, t_test, s_test) = split(z_test)
z = Function(Z, name="NextState")
(zeta, p, t, s) = split(z)
t_cn = cn(t_old, t)
s_cn = cn(s_old, s)
zeta_cn = cn(zeta_old, zeta)
time = 0.0
while time < endT:
F = (inner((zeta - zeta_old)/dt, zeta_test)*dx
+ (1-theta)* J(zeta_test, p_old, zeta_old)
+ (theta) * J(zeta_test, p, zeta)
- Ra*(1.0/Pr) * inner(zeta_test, grad(t_cn)[0] - (1.0/Rrho)*grad(s_cn)[0])*dx
+ inner(grad(zeta_test), grad(zeta_cn))*dx
+ inner((t - t_old)/dt, t_test)*dx
+ (1-theta)* J(t_test, p_old, t_old)
+ (theta) * J(t_test, p, t)
+ (1.0/Pr) * inner(grad(t_test), grad(t_cn))*dx
+ inner((s - s_old)/dt, s_test)*dx
+ (1-theta)* J(s_test, p_old, s_old)
+ (theta) * J(s_test, p, s)
+ (1.0/Sc) * inner(grad(s_test), grad(s_cn))*dx
+ inner(grad(p_test), grad(p))*dx
+ inner(p_test, zeta)*dx)
solve(F == 0, z, bcs=bcs, J=derivative(F, z), solver_parameters=
{"newton_solver": {"maximum_iterations": 20, "linear_solver": "mumps"}})
z_old.assign(z)
time += float(dt)
store(z_old, time=time)
s = project_salinity(z_old)
# I/O functions for the forward and stability runs. First, define a function to
# perform the I/O during the forward run. These PVD files store the forward
# simulation results for visualisation in paraview.
zeta_pvd = File("results/velocity.pvd")
p_pvd = File("results/streamfunction.pvd")
t_pvd = File("results/temperature.pvd")
s_pvd = File("results/salinity.pvd")
def store(z, time):
if MPI.rank(mpi_comm_world()) == 0:
info_blue("Storing variables at t=%s" % time)
(u, p, t, s) = z.split()
u.rename("Velocity", "u")
p.rename("Pressure", "p")
t.rename("Temperature", "t")
s.rename("Salinity", "s")
zeta_pvd << (u, time)
p_pvd << (p, time)
t_pvd << (t, time)
s_pvd << (s, time)
# Next, the I/O function for the output of the generalised stability analysis
# (gst stands for generalised stability theory).
s_in_pvd = File("results/gst_input_s.pvd")
s_out_pvd = File("results/gst_output_s.pvd")
def store_gst(z, io, i):
if io == "input":
z.rename("SalinityIn%d" % i, "gst_in_%d" % i)
s_in_pvd << (z, float(i))
f = File("results/gst_input_%s.xdmf" % i)
f << z
elif io == "output":
z.rename("SalinityOut%d" % i, "gst_out_%d" % i)
s_out_pvd << (z, float(i))
f = File("results/gst_output_%s.xdmf" % i)
f << z
if __name__ == "__main__":
# First, run the forward model, building the graph:
z = main()
# Now take the singular value decomposition of the propagator that maps
# perturbations to initial salinity forwards in time to perturbations in final
# salinity. This requires that libadjoint was compiled with support for SLEPc:
gst = compute_gst("InitialSalinity", "FinalSalinity", nsv=2)
# Now fetch the results of the SVD:
for i in range(gst.ncv):
(sigma, u, v) = gst.get_gst(i, return_vectors=True)
print "Singular value: ", sigma
store_gst(v, "input", i)
store_gst(u, "output", i)
# The example code can be found in ``examples/salt-fingering`` in the ``dolfin-adjoint``
# source tree, and executed as follows:
# .. code-block:: bash
# $ mpiexec -n 4 python salt-fingering.py
# ...
# 1 EPS nconv=2 Values (Errors) 1.13047e+06GST calculation took 17 multiplications of L^*L.
# GST calculation took 17 multiplications of L^*L.
# Singular value: 1063.23627036
# Singular value: 1062.77728405
# The fact that the singular values are greater than 1 indicates that the system
# is unstable to the perturbations identified.
# This image shows the leading initial perturbation and the arising final perturbation.
# The perturbation selectively promotes the growth of some fingers, and retards the
# growth of others.
# .. image:: salinity-combined.png
# :scale: 100
# :align: center
# .. rubric:: References
# .. bibliography:: /documentation/salt-fingering/salt-fingering.bib
# :cited:
# :labelprefix: 6E-
|
pf4d/dolfin-adjoint
|
examples/salt-fingering/salt-fingering.py
|
Python
|
lgpl-3.0
| 13,660
|
[
"ParaView"
] |
b1e62a39f40b2d175a60fb96270ae57fd107c5a9f0b4b1ba3ecfe70bba44e51f
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from peacock.Input.ParamsTable import ParamsTable
from peacock.utils import Testing, InputTesting
from peacock.Input.ParameterInfo import ParameterInfo
from peacock.Input.BlockInfo import BlockInfo
from PyQt5.QtWidgets import QFileDialog, QApplication
from mock import patch
class Tests(Testing.PeacockTester):
qapp = QApplication([])
def setUp(self):
super(Tests, self).setUp()
self.table = None
self.changed = 0
self.block_list_requested = 0
self.block_children = ["child0", "child1", "child2"]
def commentsChanged(self):
self.comments_changed += 1
def createParam(self, name, value="", cpp_type="string", options=[], required=False, user_added=False, basic_type="String"):
p = ParameterInfo(None, name)
p.value = value
p.cpp_type = cpp_type
p.basic_type = basic_type
p.options = options
p.required = required
p.user_added = user_added
return p
def needBlockList(self, w, blocks):
self.block_list_requested += 1
for b in blocks:
w.setWatchedBlockList(b, self.block_children)
def onChanged(self):
self.changed += 1
def createTable(self, params):
b = BlockInfo(None, "/Foo")
for p in params:
b.addParameter(p)
tmap = {"VariableName": ["/Variables", "/AuxVariables"]}
t = ParamsTable(b, params, tmap)
t.resize(480, 480)
t.addName("some name")
t.addName("some name") # shouldn't be a problem
t.addUserParam("user_param")
t.needBlockList.connect(lambda paths: self.needBlockList(t, paths))
t.changed.connect(self.onChanged)
t.updateWatchers()
if params:
self.assertEqual(self.block_list_requested, 1)
t.show()
return t
def createParams(self):
params = []
options = ["option_0", "option_1", "option_2"]
params.append(self.createParam("p0"))
params.append(self.createParam("p1", value="some val", required=True))
params.append(self.createParam("p2", cpp_type="FileName"))
params.append(self.createParam("p3", cpp_type="FileNameNoExtension"))
params.append(self.createParam("p4", cpp_type="MeshFileName"))
params.append(self.createParam("p5", options=options))
params.append(self.createParam("p7", cpp_type="vector", options=options, basic_type="Array"))
params.append(self.createParam("p8"))
params.append(self.createParam("p9", cpp_type="VariableName"))
params.append(self.createParam("p10", cpp_type="vector<VariableName>", basic_type="Array"))
return params
def testEmpty(self):
b = BlockInfo(None, "/Foo")
t = ParamsTable(b, [], {})
t.needBlockList.connect(lambda paths: self.needBlockList(t, paths))
self.assertEqual(t.rowCount(), 0)
t.setWatchedBlockList("/Bar", [])
def testParamRename(self):
t = self.createTable(self.createParams())
row = t.findRow("user_param")
self.assertEqual(t.block.getParamInfo("user_param"), None)
InputTesting.changeTableCell(t, "user_param", 0, "new_param")
new_row = t.findRow("new_param")
self.assertEqual(row, new_row)
t.save()
self.assertNotEqual(t.block.getParamInfo("new_param"), None)
InputTesting.changeTableCell(t, "new_param", 0, "new_param1")
new_row = t.findRow("new_param1")
self.assertEqual(row, new_row)
self.assertNotEqual(t.block.getParamInfo("new_param"), None)
t.reset()
self.assertNotEqual(t.block.getParamInfo("new_param"), None)
new_row = t.findRow("new_param1")
self.assertEqual(new_row, -1)
new_row = t.findRow("new_param")
self.assertEqual(new_row, row)
def testParamRemoved(self):
t = self.createTable(self.createParams())
t.save()
count_before = t.rowCount()
row = t.findRow("user_param")
InputTesting.clickTableButton(t, "user_param", 2)
self.assertEqual(t.rowCount(), count_before - 1)
self.assertNotEqual(t.block.getParamInfo("user_param"), None)
new_row = t.findRow("user_param")
self.assertEqual(new_row, -1)
t.reset()
self.assertNotEqual(t.block.getParamInfo("user_param"), None)
new_row = t.findRow("user_param")
self.assertEqual(new_row, row)
InputTesting.clickTableButton(t, "user_param", 2)
t.save()
self.assertEqual(t.block.getParamInfo("user_param"), None)
def testParamAdded(self):
t = self.createTable(self.createParams())
t.save()
count_before = t.rowCount()
t.addUserParam("new_param")
count_after = t.rowCount()
self.assertEqual(count_before+1, count_after)
row = t.findRow("new_param")
self.assertEqual(row, count_after-1)
self.assertEqual(t.block.getParamInfo("new_param"), None)
t.reset()
row = t.findRow("new_param")
self.assertEqual(row, -1)
count_after = t.rowCount()
self.assertEqual(count_before, count_after)
t.addUserParam("new_param")
t.save()
self.assertNotEqual(t.block.getParamInfo("new_param"), None)
def getItem(self, t, param, col):
row = t.findRow(param)
item = t.item(row, col)
return item
def changeParam(self, t, param, col, new_val, final_value=None, button=False):
if col == 1 or col == 3:
InputTesting.changeTableCell(t, param, col, new_val)
elif col == 2:
if button:
InputTesting.clickTableButton(t, param, col)
else:
InputTesting.changeTableCombo(t, param, col, new_val)
t.save()
p = t.block.getParamInfo(param)
self.assertNotEqual(p, None)
if final_value is None:
final_value = new_val
if col == 1 or col == 2:
self.assertEqual(p.value, final_value)
else:
self.assertEqual(p.comments, final_value)
def testParamChanged(self):
t = self.createTable(self.createParams())
self.changeParam(t, "p0", 1, "new_value")
self.changeParam(t, "p5", 2, "option_1")
self.changeParam(t, "p5", 2, "option_2")
self.changeParam(t, "p7", 2, "option_1")
self.changeParam(t, "p7", 2, "option_2", "option_1 option_2")
self.changeParam(t, "p7", 2, "option_0", "option_1 option_2 option_0")
self.changeParam(t, "p9", 2, "child1")
self.changeParam(t, "p9", 2, "child2")
self.changeParam(t, "p10", 2, "child1")
self.changeParam(t, "p10", 2, "child2", "child1 child2")
def testParamComments(self):
t = self.createTable(self.createParams())
self.changeParam(t, "p0", 3, "some comments")
self.changeParam(t, "p0", 3, "more comments")
self.changeParam(t, "p1", 3, "f")
@patch.object(QFileDialog, "getOpenFileName")
def testFiles(self, mock_file):
mock_file.return_value = (None, None)
t = self.createTable(self.createParams())
self.changeParam(t, "p2", 2, "", button=True)
mock_file.return_value = ("foo", "filter")
self.changeParam(t, "p2", 2, "foo", button=True)
mock_file.return_value = ("bar", "filter")
self.changeParam(t, "p2", 2, "bar", button=True)
mock_file.return_value = ("foo", "filter")
self.changeParam(t, "p3", 2, "foo", button=True)
mock_file.return_value = ("bar", "filter")
self.changeParam(t, "p3", 2, "bar", button=True)
def testWatchers(self):
t = self.createTable(self.createParams())
row = t.findRow("p9")
combo = t.cellWidget(row, 2)
self.assertEqual(combo.count(), 7)
self.block_children = []
t.updateWatchers()
self.assertEqual(combo.count(), 1)
if __name__ == '__main__':
Testing.run_tests()
|
nuclear-wizard/moose
|
python/peacock/tests/input_tab/ParamsTable/test_ParamsTable.py
|
Python
|
lgpl-2.1
| 8,324
|
[
"MOOSE"
] |
589435f89ac4d73afc483c504da112e0cd96a311711936214a33f7a256edf403
|
from nose.plugins.attrib import attr
from unittest import skipIf
import tempfile
import os
from openmoltools import utils
import numpy as np
import mdtraj as md
from distutils.spawn import find_executable
import tarfile
import pickle
import os
import numpy as np
@skipIf(find_executable('obabel') is None, 'You need obabel installed to run this test')
def _tester_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb(charge_method="bcc"):
with utils.enter_temp_directory():
tar_filename = utils.get_data_filename("chemicals/freesolv/freesolve_v0.3.tar.bz2")
tar = tarfile.open(tar_filename, mode="r:bz2")
tar.extractall()
tar.close()
database = pickle.load(open("./v0.3/database.pickle"))
for key in database:
for directory in ["mol2files_gaff", "mol2files_sybyl"]:
gaff_filename = os.path.abspath("./v0.3/%s/%s.mol2" % (directory, key))
cmd = """sed -i "s/<0>/LIG/" %s""" % gaff_filename
os.system(cmd) # Have to remove the <0> because it leads to invalid XML in the forcefield files.
t_gaff = md.load(gaff_filename)
with utils.enter_temp_directory():
yield utils.tag_description(lambda : utils.test_molecule("LIG", gaff_filename, charge_method=charge_method), "Testing freesolv %s %s with charge model %s" % (directory, key, charge_method))
@attr("slow")
def test_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb():
_tester_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb()
# Faster version because it skips AM1-BCC
def test_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb_nobcc():
_tester_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb(charge_method=None)
|
jchodera/openmoltools
|
openmoltools/tests/test_freesolv.py
|
Python
|
gpl-2.0
| 1,799
|
[
"MDTraj"
] |
f5254b102356136a998b4fb0f47e1bec31f2f27615540dec05c7ac411128550a
|
# -*- coding: utf-8 -*-
import numpy as np
import abel
import matplotlib.pyplot as plt
IM = np.loadtxt("data/VMI_art1.txt.bz2")
legendre_orders = [0, 2, 4] # Legendre polynomial orders
proj_angles = np.arange(0, np.pi/2, np.pi/10) # projection angles in 10 degree steps
radial_step = 1 # pixel grid
smoothing = 1 # smoothing 1/e-width for Gaussian convolution smoothing
threshold = 0.2 # threshold for normalization of higher order Newton spheres
clip=0 # clip first vectors (smallest Newton spheres) to avoid singularities
# linbasex method - center ensures image has odd square shape
# - speed and anisotropy parameters evaluated by method
LIM = abel.Transform(IM, method='linbasex', center='convolution',
center_options=dict(square=True),
transform_options=dict(basis_dir=None,
proj_angles=proj_angles, radial_step=radial_step,
smoothing=smoothing, threshold=threshold, clip=clip,
return_Beta=True, verbose=True))
# hansenlaw method - speed and anisotropy parameters evaluated by integration
HIM = abel.Transform(IM, method="hansenlaw", center='convolution',
center_options=dict(square=True),
angular_integration=True)
# alternative derivation of anisotropy parameters via integration
rrange = [(20, 50), (60, 80), (85, 100), (125, 155), (185, 205), (220, 240)]
Beta, Amp, rr, intensity, theta =\
abel.tools.vmi.radial_integration(HIM.transform, radial_ranges=rrange)
plt.figure(figsize=(12, 6))
ax0 = plt.subplot2grid((2,4), (0,0))
ax3 = plt.subplot2grid((2,4), (1,0))
ax1 = plt.subplot2grid((2,4), (0,1), colspan=2, rowspan=2)
ax2 = plt.subplot2grid((2,4), (0,3), sharex=ax1, rowspan=2)
ax0.imshow(LIM.transform, vmin=0, vmax=LIM.transform.max()*2/3)
ax0.set_aspect('equal')
ax0.axis('off')
ax0.invert_yaxis()
ax0.set_title("linbasex")
ax3.imshow(HIM.transform, vmin=0, vmax=HIM.transform[200:].max()*1/5)
ax3.axis('off')
#ax3.axis(xmin=750, xmax=850, ymin=420, ymax=620)
ax3.invert_yaxis()
ax3.set_aspect('equal')
ax3.set_title("hansenlaw")
ax1.plot(LIM.radial, LIM.Beta[0], 'r-', label='linbasex')
ax1.plot(HIM.angular_integration[0],
HIM.angular_integration[1]/HIM.angular_integration[1].max(),
'b-', label='hansenlaw')
ax1.legend(loc=0, labelspacing=0.1, frameon=False, numpoints=1, fontsize=10)
proj_angles *= 100/np.pi
ax1.set_title("Beta0 norm an={} un={} inc={} sig={} th={}".
format(proj_angles.astype(int), legendre_orders,
radial_step, smoothing, threshold), fontsize=10)
ax1.axis(ymin=-0.1, ymax=1.2)
ax1.set_xlabel("radial coordinate (pixels)")
ax2.plot(LIM.radial, LIM.Beta[1], 'r-', label='linbasex')
beta = np.transpose(Beta)
ax2.errorbar(x=rr, y=beta[0], yerr=beta[1], color='b', lw=2, fmt='o',
label='hansenlaw')
ax2.set_title(r"$\beta$-parameter (Beta2 norm)", fontsize=10)
ax2.legend(loc=0, labelspacing=0.1, frameon=False, numpoints=1, fontsize=10)
ax2.axis(xmax=300, ymin=-1.0, ymax=1.0)
ax2.set_xlabel("radial coordinate (pixels)")
plt.savefig("plot_example_linbasex_hansenlaw.png", dpi=100)
plt.show()
|
stggh/PyAbel
|
examples/example_linbasex_hansenlaw.py
|
Python
|
mit
| 3,185
|
[
"Gaussian"
] |
0b43f3529a01f6d1399211c027f61c1170806f8ac23ee0df368c673b25ad69fe
|
"""
Quantum ESPRESSO basic parser
Author: Evgeny Blokhin
TODO: check ibrav settings, parsing might be wrong
"""
from __future__ import division
import os
import datetime, time
from numpy import dot, array, transpose, linalg
from tilde.parsers import Output
from tilde.core.electron_structure import Ebands
from ase import Atoms
from ase.data import chemical_symbols
from ase.units import Bohr, Rydberg
class QuantumESPRESSO(Output):
def __init__(self, filename):
Output.__init__(self, filename)
cur_folder = os.path.dirname(filename)
self.related_files.append(filename)
self.info['framework'] = 0x4
self.info['finished'] = 0x1
self.info['ansatz'] = 0x2
# taken from trunk/Modules/funct.f90
xc_internal_map = {
"pw" : {'name': "PW_LDA", 'type': [0x1], 'setup': ["sla+pw+nogx+nogc" ] },
"pz" : {'name': "PZ_LDA", 'type': [0x1], 'setup': ["sla+pz+nogx+nogc" ] },
"bp" : {'name': "Becke-Perdew grad.corr.", 'type': [0x2], 'setup': ["b88+p86+nogx+nogc" ] },
"pw91" : {'name': "PW91", 'type': [0x2], 'setup': ["sla+pw+ggx+ggc" ] },
"blyp" : {'name': "BLYP", 'type': [0x2], 'setup': ["sla+b88+lyp+blyp" ] },
"pbe" : {'name': "PBE", 'type': [0x2], 'setup': ["sla+pw+pbx+pbc", "sla+pw+pbe+pbe"] },
"revpbe" : {'name': "revPBE", 'type': [0x2], 'setup': ["sla+pw+rpb+pbc", "sla+pw+rpb+pbe"] },
"pw86pbe" : {'name': "PW86+PBE", 'type': [0x2], 'setup': ["sla+pw+pw86+pbc", "sla+pw+pw86+pbe"] },
"b86bpbe" : {'name': "B86b+PBE", 'type': [0x2], 'setup': ["sla+pw+b86b+pbc", "sla+pw+b86b+pbe"] },
"pbesol" : {'name': "PBEsol", 'type': [0x2], 'setup': ["sla+pw+psx+psc" ] },
"q2d" : {'name': "PBEQ2D", 'type': [0x2], 'setup': ["sla+pw+q2dx+q2dc" ] },
"hcth" : {'name': "HCTH/120", 'type': [0x2], 'setup': ["nox+noc+hcth+hcth" ] },
"olyp" : {'name': "OLYP", 'type': [0x2], 'setup': ["nox+lyp+optx+blyp" ] },
"wc" : {'name': "Wu-Cohen", 'type': [0x2], 'setup': ["sla+pw+wcx+pbc", "sla+pw+wcx+pbe"] },
"sogga" : {'name': "SOGGA", 'type': [0x2], 'setup': ["sla+pw+sox+pbc", "sla+pw+sox+pbe"] },
"optbk88" : {'name': "optB88", 'type': [0x2], 'setup': ["sla+pw+obk8+p86" ] },
"optb86b" : {'name': "optB86", 'type': [0x2], 'setup': ["sla+pw+ob86+p86" ] },
"ev93" : {'name': "Engel-Vosko", 'type': [0x2], 'setup': ["sla+pw+evx+nogc" ] },
"tpss" : {'name': "TPSS", 'type': [0x3], 'setup': ["sla+pw+tpss+tpss" ] },
"m06l" : {'name': "M06L", 'type': [0x3], 'setup': ["nox+noc+m6lx+m6lc" ] },
"tb09" : {'name': "TB09", 'type': [0x3], 'setup': ["sla+pw+tb09+tb09" ] },
"pbe0" : {'name': "PBE0", 'type': [0x2, 0x4], 'setup': ["pb0x+pw+pb0x+pbc", "pb0x+pw+pb0x+pbe"] },
"hse" : {'name': "HSE06", 'type': [0x2, 0x4], 'setup': ["sla+pw+hse+pbc", "sla+pw+hse+pbe"] },
"b3lyp" : {'name': "B3LYP", 'type': [0x2, 0x4], 'setup': ["b3lp+vwn+b3lp+b3lp" ] },
"gaupbe" : {'name': "Gau-PBE", 'type': [0x2, 0x4], 'setup': ["sla+pw+gaup+pbc", "sla+pw+gaup+pbe"] },
"vdw-df" : {'name': "vdW-DF", 'type': [0x2, 0x7], 'setup': ["sla+pw+rpb+vdw1" ] },
"vdw-df2" : {'name': "vdW-DF2", 'type': [0x2, 0x7], 'setup': ["sla+pw+rw86+vdw2" ] },
"vdw-df-c09" : {'name': "vdW-DF-C09", 'type': [0x2, 0x7], 'setup': ["sla+pw+c09x+vdw1" ] },
"vdw-df2-c09" : {'name': "vdW-DF2-C09", 'type': [0x2, 0x7], 'setup': ["sla+pw+c09x+vdw2" ] },
"vdw-df-cx" : {'name': "vdW-DF-cx", 'type': [0x2, 0x7], 'setup': ["sla+pw+cx13+vdW1" ] },
"vdw-df-obk8" : {'name': "vdW-DF-obk8", 'type': [0x2, 0x7], 'setup': ["sla+pw+obk8+vdw1" ] },
"vdw-df-ob86" : {'name': "vdW-DF-ob86", 'type': [0x2, 0x7], 'setup': ["sla+pw+ob86+vdw1" ] },
"vdw-df2-b86r" : {'name': "vdW-DF2-B86R", 'type': [0x2, 0x7], 'setup': ["sla+pw+b86r+vdw2" ] },
"rvv10" : {'name': "rVV10", 'type': [0x2, 0x7], 'setup': ["sla+pw+rw86+pbc+vv10", "sla+pw+rw86+pbe+vv10"] },
"hf" : {'name': "Hartree-Fock", 'type': [0x5], 'setup': ["hf+noc+nogx+nogc" ] },
"vdw-df3" : {'name': "vdW-DF3", 'type': [0x2, 0x7], 'setup': ["sla+pw+rw86+vdw3" ] },
"vdw-df4" : {'name': "vdW-DF4", 'type': [0x2, 0x7], 'setup': ["sla+pw+rw86+vdw4" ] },
"gaup" : {'name': "Gau-PBE", 'type': [0x2, 0x4], 'setup': ["sla+pw+gaup+pbc", "sla+pw+gaup+pbe"] },
}
self.data = open(filename).readlines()
atomic_data, cell_data, pos_data, symbol_data, alat = None, [], [], [], 0
e_last = None
kpts, eigs_columns, tot_k = [], [], 0
for n in range(len(self.data)):
cur_line = self.data[n]
if "This run was terminated on" in cur_line:
self.info['finished'] = 0x2
elif " Program PWSCF" in cur_line and " starts " in cur_line:
ver_str = cur_line.strip().replace('Program PWSCF', '')
ver_str = ver_str[ : ver_str.find(' starts ') ].strip()
if ver_str.startswith("v."): ver_str = ver_str[2:]
self.info['prog'] = ver_str
elif cur_line.startswith(" celldm"):
if not alat:
alat = float(cur_line.split()[1]) * Bohr
if not alat: alat = 1
elif cur_line.startswith(" crystal axes:"):
cell_data = [x.split()[3:6] for x in self.data[n + 1:n + 4]]
cell_data = array([[float(col) for col in row] for row in cell_data])
elif cur_line.startswith(" site n."):
if len(pos_data): continue
while True:
n += 1
next_line = self.data[n].split()
if not next_line: break
pos_data.append([float(x) for x in next_line[-4:-1]])
symbol = next_line[1].strip('0123456789').split('_')[0]
if not symbol in chemical_symbols and len(symbol) > 1: symbol = symbol[:-1]
symbol_data.append(symbol)
pos_data = array(pos_data)*alat
atomic_data = Atoms(symbol_data, pos_data, cell=cell_data*alat, pbc=(1,1,1))
elif "CELL_PARAMETERS" in cur_line:
for i in range(3):
n += 1
next_line = self.data[n].split()
if not next_line: break
cell_data[i][:] = list(map(float, next_line))
else:
mult = 1
if "bohr" in cur_line: mult = Bohr
elif "alat" in cur_line: mult = alat
atomic_data.set_cell(cell_data*mult, scale_atoms=True)
elif "ATOMIC_POSITIONS" in cur_line:
coord_flag = cur_line.split('(')[-1].strip()
for i in range(len(pos_data)):
n += 1
next_line = self.data[n].split()
pos_data[i][:] = list(map(float, next_line[1:4]))
if not atomic_data: continue
if coord_flag=='alat)':
atomic_data.set_positions(pos_data*alat)
elif coord_flag=='bohr)':
atomic_data.set_positions(pos_data*Bohr)
elif coord_flag=='angstrom)':
atomic_data.set_positions(pos_data)
else:
atomic_data.set_scaled_positions(pos_data)
elif cur_line.startswith("! total energy"):
self.info['energy'] = float(cur_line.split()[-2]) * Rydberg
elif " Exchange-correlation" in cur_line:
if self.info['H']: continue
xc_str = cur_line.split('=')[-1].strip()
xc_parts = xc_str[ : xc_str.find("(") ].split()
if len(xc_parts) == 1: xc_parts = xc_parts[0].split('+')
if len(xc_parts) < 4: xc_parts = [ '+'.join(xc_parts) ]
xc_parts = [x.lower().strip("-'\"") for x in xc_parts]
if len(xc_parts) == 1:
try:
self.info['H'] = xc_internal_map[xc_parts[0]]['name']
self.info['H_types'].extend( xc_internal_map[xc_parts[0]]['type'] )
except KeyError:
self.info['H'] = xc_parts[0]
else:
xc_parts = '+'.join(xc_parts)
match = [ i for i in list(xc_internal_map.values()) if xc_parts in i['setup'] ]
if match:
self.info['H'] = match[0]['name']
self.info['H_types'].extend( match[0]['type'] )
else:
self.info['H'] = xc_parts
elif "PWSCF :" in cur_line:
if "WALL" in cur_line or "wall" in cur_line:
d = cur_line.split("CPU")[-1].replace("time", "").replace(",", "")
if d.find("s") > 0: d = d[ : d.find("s") + 1 ]
elif d.find("m") > 0: d = d[ : d.find("m") + 1 ]
elif d.find("h") > 0: d = d[ : d.find("h") + 1 ]
d = d.strip().replace(" ", "")
fmt = ""
if 's' in d: fmt = "%S.%fs"
if 'm' in d: fmt = "%Mm" + fmt
if 'h' in d: fmt = "%Hh" + fmt
if 'd' in d: fmt = "%dd" + fmt # FIXME for months!
d = time.strptime(d, fmt)
# to comply with python 2.6
td = datetime.timedelta(days=d.tm_mday, hours=d.tm_hour, minutes=d.tm_min, seconds=d.tm_sec)
self.info['duration'] = "%2.2f" % ( (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 3.6e9 )
self.info['finished'] = 0x2
elif "End of self-consistent calculation" in cur_line or "End of band structure calculation" in cur_line:
e_last = None
kpts, eigs_columns, tot_k = [], [], 0
eigs_collect, eigs_failed = False, False
eigs_spin_warning = False
if not atomic_data: eigs_failed = True
while not eigs_failed:
n += 1
next_line = self.data[n]
if eigs_collect:
next_line = next_line.split()
if next_line:
try: eigs_columns[-1] += list(map(float, next_line))
except ValueError: eigs_failed = True
else: eigs_collect = False
continue
if "Ry" in next_line or "CPU" in next_line:
eigs_failed = True
elif " k =" in next_line:
tot_k += 1
coords = next_line.strip().replace("k =", "")[:21]
try: kpts.append(list(map(float, [coords[0:7], coords[7:14], coords[14:21]])))
except ValueError: eigs_failed = True
eigs_collect = True
eigs_columns.append([])
n += 1
elif "highest occupied level" in next_line:
e_last = float(next_line.split()[-1])
break
elif "highest occupied, lowest unoccupied" in next_line:
e_last = float(next_line.split()[-2])
break
elif "Fermi energy" in next_line:
e_last = float(next_line.split()[-2])
break
elif " SPIN UP " in next_line or " SPIN DOWN " in next_line:
self.info['spin'] = True
eigs_spin_warning = True
# Only the last set is taken
if kpts and eigs_columns:
if eigs_spin_warning:
self.warning('Attention! Spin states are currently not supported! Only spin down projection is considered.') # FIXME
tot_k /= 2
self.info['k'] = str(tot_k) + ' pts/BZ'
if e_last is None:
self.warning('Warning: highest occupied state not found!')
else:
if not eigs_failed:
band_obj = {'ticks': [], 'abscissa': [], 'stripes': []}
d = 0.0
bz_vec_ref = [0, 0, 0]
k_shape = linalg.inv( atomic_data.cell ).transpose()
for k in kpts:
bz_vec_cur = dot( k, k_shape )
bz_vec_dir = list(map(sum, list(zip(bz_vec_cur, bz_vec_ref))))
bz_vec_ref = bz_vec_cur
d += linalg.norm( bz_vec_dir )
band_obj['abscissa'].append(d)
band_obj['stripes'] = (transpose(eigs_columns) - e_last).tolist()
self.electrons['bands'] = Ebands(band_obj)
else: self.warning('Error: incorrect bands data!')
if atomic_data: self.structures.append(atomic_data)
# NB we have absolutely no guarantee this input fits --- is there a better solution?
first_check = os.path.join(cur_folder, filename.replace('.' + filename.split('.')[-1], '') + '.in')
if os.path.exists(first_check):
self.related_files.append(first_check)
self.info['input'] = open(first_check).read()
else:
candidates = []
for i in os.listdir(cur_folder):
if i.endswith(".in") or i.endswith(".inp") or i.endswith(".input"):
candidates.append(i)
if not candidates: self.warning('No input found!')
elif len(candidates) > 1: self.warning('Ambiguous inputs found: %s' % (", ".join(candidates)))
else:
self.related_files.append(os.path.join(cur_folder, candidates[0]))
self.info['input'] = open(os.path.join(cur_folder, candidates[0])).read()
@staticmethod
def fingerprints(test_string):
if ("pwscf" in test_string or "PWSCF" in test_string) and " Current dimensions of program " in test_string:
return True
return False
|
tilde-lab/tilde
|
tilde/parsers/QuantumESPRESSO/QuantumESPRESSO.py
|
Python
|
mit
| 15,615
|
[
"ASE",
"CRYSTAL",
"Quantum ESPRESSO"
] |
5d2b1508e1e41edf8506220b376c341897b00261c8319e478cc231c2cfdc0bc5
|
import unittest
import netCDF4
import os
test_ncdump="""netcdf ubyte {
dimensions:
d = 2 ;
variables:
byte ub(d) ;
ub:_Unsigned = "true" ;
byte sb(d) ;
// global attributes:
:_Format = "classic" ;
}
"""
test_ncdump2="""netcdf ubyte {
dimensions:
d = 2 ;
variables:
byte ub(d) ;
ub:_Unsigned = "true" ;
byte sb(d) ;
// global attributes:
:_Format = "classic" ;
data:
ub = 0, -1 ;
sb = -128, 127 ;
}
"""
class Test_CDL(unittest.TestCase):
"""
Test import/export of CDL
"""
def setUp(self):
f=netCDF4.Dataset('ubyte.nc')
f.tocdl(outfile='ubyte.cdl',data=True)
f.close()
def test_tocdl(self):
# treated as unsigned integers.
f=netCDF4.Dataset('ubyte.nc')
assert(f.tocdl() == test_ncdump)
assert(f.tocdl(data=True) == test_ncdump2)
f.close()
def test_fromcdl(self):
f1=netCDF4.Dataset.fromcdl('ubyte.cdl',ncfilename='ubyte2.nc')
f2=netCDF4.Dataset('ubyte.nc')
assert(f1.variables.keys() == f2.variables.keys())
assert(f1.filepath() == 'ubyte2.nc')
assert(f1.dimensions.keys() == f2.dimensions.keys())
assert(len(f1.dimensions['d']) == len(f2.dimensions['d']))
assert((f1['ub'][:] == f2['ub'][:]).all())
assert((f1['sb'][:] == f2['sb'][:]).all())
f1.close(); f2.close()
os.remove('ubyte2.nc')
def tearDown(self):
# Remove the temporary files
os.remove('ubyte.cdl')
if __name__ == '__main__':
unittest.main()
|
Unidata/netcdf4-python
|
test/tst_cdl.py
|
Python
|
mit
| 1,517
|
[
"NetCDF"
] |
32eb8336df4fe1b75a87ab1a10bc6df8442c40fda168ec68409f9dc30ce38544
|
# $Id$
#
# Copyright (C) 2015 Novartis Institute of BioMedical Research
# All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
""" This is a rough coverage test of the python wrapper for FilterCatalogs
it is intended to be shallow but broad.
"""
from __future__ import print_function
import doctest, unittest, os
import pickle
from rdkit import RDConfig
from rdkit.RDLogger import logger
logger = logger()
from rdkit import Chem
from rdkit.Chem import rdfiltercatalog
from rdkit.Chem import FilterCatalog, rdMolDescriptors
from rdkit.Chem.FilterCatalog import FilterCatalogParams
from rdkit.Chem.FilterCatalog import FilterMatchOps
from rdkit import DataStructs
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(rdfiltercatalog))
return tests
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0FilterCatalogEntry(self):
matcher = FilterCatalog.SmartsMatcher("Aromatic carbon chain")
self.assertTrue(not matcher.IsValid())
pat = Chem.MolFromSmarts("c:c:c:c:c")
matcher.SetPattern(pat)
matcher.SetMinCount(1)
entry = FilterCatalog.FilterCatalogEntry("Bar", matcher)
if FilterCatalog.FilterCatalogCanSerialize():
pickle = entry.Serialize()
else:
pickle = None
self.assertTrue(entry.GetDescription() == "Bar")
self.assertTrue(matcher.GetMinCount() == 1)
self.assertTrue(matcher.GetMaxCount() == 2**32 - 1)
self.assertTrue(matcher.IsValid())
entry.SetDescription("Foo")
self.assertTrue(entry.GetDescription() == "Foo")
mol = Chem.MolFromSmiles("c1ccccc1")
self.assertTrue(matcher.HasMatch(mol))
matcher = FilterCatalog.SmartsMatcher(pat)
self.assertEqual(str(matcher), "Unnamed SmartsMatcher")
self.assertTrue(matcher.GetMinCount() == 1)
self.assertTrue(matcher.HasMatch(mol))
matches = matcher.GetMatches(mol)
matcher = FilterCatalog.ExclusionList()
matcher.SetExclusionPatterns([matcher])
self.assertTrue(not matcher.HasMatch(mol))
#pat = Chem.MolFromSmarts("c:c:c:c:c")
#entry.SetOnPattern(pat)
#entry.SetOffPatterns([pat,pat,pat])
#self.assertTrue(not entry.HasMatch(pat))
def test1FilterMatchOps(self):
mol = Chem.MolFromSmiles("c1ccccc1")
pat = Chem.MolFromSmarts("c:c:c:c:c")
matcher = FilterCatalog.SmartsMatcher("Five aromatic carbons", pat)
self.assertTrue(matcher.GetMinCount() == 1)
self.assertTrue(matcher.HasMatch(mol))
matches = matcher.GetMatches(mol)
matcher2 = FilterCatalog.ExclusionList()
matcher2.SetExclusionPatterns([matcher])
self.assertTrue(not matcher2.HasMatch(mol))
and_match = FilterMatchOps.And(matcher, matcher2)
self.assertTrue(not and_match.HasMatch(mol))
not_match = FilterMatchOps.Not(and_match)
self.assertTrue(not_match.HasMatch(mol))
or_match = FilterMatchOps.Or(matcher, matcher2)
self.assertTrue(or_match.HasMatch(mol))
print(and_match)
print(or_match)
print(not_match)
def test2FilterCatalogTest(self):
tests = ((FilterCatalogParams.FilterCatalogs.PAINS_A, 16),
(FilterCatalogParams.FilterCatalogs.PAINS_B, 55),
(FilterCatalogParams.FilterCatalogs.PAINS_C, 409),
(FilterCatalogParams.FilterCatalogs.PAINS, 409 + 16 + 55))
for catalog_idx, num in tests:
params = FilterCatalog.FilterCatalogParams()
print("*" * 44)
print("Testing:", catalog_idx, int(catalog_idx))
self.assertTrue(params.AddCatalog(catalog_idx))
catalog1 = FilterCatalog.FilterCatalog(params)
if FilterCatalog.FilterCatalogCanSerialize():
pkl = catalog1.Serialize()
catalog2 = FilterCatalog.FilterCatalog(pkl)
catalog3 = pickle.loads(pickle.dumps(catalog1))
catalogs = [catalog1, catalog2, catalog3]
else:
catalogs = [catalog1]
self.failUnlessRaises(RuntimeError, lambda: pickle.dumps(catalog1))
catalogs.append(FilterCatalog.FilterCatalog(catalog_idx))
for index, catalog in enumerate(catalogs):
self.assertEqual(catalog.GetNumEntries(), num)
if catalog_idx in [FilterCatalogParams.FilterCatalogs.PAINS_A,
FilterCatalogParams.FilterCatalogs.PAINS]:
# http://chemistrycompass.com/chemsearch/58909/
mol = Chem.MolFromSmiles("O=C(Cn1cnc2c1c(=O)n(C)c(=O)n2C)N/N=C/c1c(O)ccc2c1cccc2")
entry = catalog.GetFirstMatch(mol)
for key in entry.GetPropList():
if key == "Reference":
self.assertEquals(
entry.GetProp(key), "Baell JB, Holloway GA. New Substructure Filters for "
"Removal of Pan Assay Interference Compounds (PAINS) "
"from Screening Libraries and for Their Exclusion in "
"Bioassays. J Med Chem 53 (2010) 2719D40. "
"doi:10.1021/jm901137j.")
elif key == "Scope":
self.assertEquals(entry.GetProp(key), "PAINS filters (family A)")
self.assertEqual(entry.GetDescription(), "hzone_phenol_A(479)")
result = catalog.GetMatches(mol)
self.assertEquals(len(result), 1)
for entry in result:
for filtermatch in entry.GetFilterMatches(mol):
self.assertEquals(str(filtermatch.filterMatch), "hzone_phenol_A(479)")
atomPairs = [tuple(x) for x in filtermatch.atomPairs]
self.assertEquals(atomPairs, [(0, 23), (1, 22), (2, 20), (3, 19), (4, 25), (5, 24),
(6, 18), (7, 17), (8, 16), (9, 21)])
elif catalog_idx == FilterCatalogParams.FilterCatalogs.PAINS_B:
mol = Chem.MolFromSmiles("FC(F)(F)Oc1ccc(NN=C(C#N)C#N)cc1") # CHEMBL457504
entry = catalog.GetFirstMatch(mol)
self.assertTrue(entry)
self.assertEquals(entry.GetDescription(), "cyano_imine_B(17)")
elif catalog_idx == FilterCatalogParams.FilterCatalogs.PAINS_C:
mol = Chem.MolFromSmiles("O=C1C2OC2C(=O)c3cc4CCCCc4cc13") # CHEMBL476649
entry = catalog.GetFirstMatch(mol)
self.assertTrue(entry)
self.assertEquals(entry.GetDescription(), "keto_keto_gamma(5)")
def test3ExclusionFilter(self):
mol = Chem.MolFromSmiles("c1ccccc1")
pat = Chem.MolFromSmarts("c:c:c:c:c")
matcher = FilterCatalog.SmartsMatcher("Five aromatic carbons", pat)
self.assertTrue(matcher.GetMinCount() == 1)
self.assertTrue(matcher.HasMatch(mol))
matches = matcher.GetMatches(mol)
exclusionFilter = FilterCatalog.ExclusionList()
exclusionFilter.AddPattern(matcher)
self.assertFalse(exclusionFilter.HasMatch(mol))
matches2 = exclusionFilter.GetMatches(mol)
self.assertTrue(matches)
self.assertFalse(matches2)
def test4CountTests(self):
matcher = FilterCatalog.SmartsMatcher("Carbon", "[#6]", 0, 2)
m = Chem.MolFromSmiles("N")
self.assertTrue(matcher.HasMatch(m))
m = Chem.MolFromSmiles("C")
self.assertTrue(matcher.HasMatch(m))
m = Chem.MolFromSmiles("CC")
self.assertTrue(matcher.HasMatch(m))
m = Chem.MolFromSmiles("CCC")
self.assertFalse(matcher.HasMatch(m))
matcher = FilterCatalog.SmartsMatcher("Carbon", "[#6]", 1, 2)
m = Chem.MolFromSmiles("N")
self.assertFalse(matcher.HasMatch(m))
def testZinc(self):
params = FilterCatalog.FilterCatalogParams(FilterCatalogParams.FilterCatalogs.ZINC)
catalog = FilterCatalog.FilterCatalog(params)
self.assertTrue(catalog.GetNumEntries())
m = Chem.MolFromSmiles("C" * 41)
entry = catalog.GetFirstMatch(m)
self.assertTrue(entry.GetDescription(), "Non-Hydrogen_atoms")
m = Chem.MolFromSmiles("CN" * 20)
entry = catalog.GetFirstMatch(m)
self.assertEquals(catalog.GetFirstMatch(m), None)
def testSmartsMatcherAPI(self):
sm = FilterCatalog.SmartsMatcher("Too many carbons", "[#6]", 40 + 1)
sm2 = FilterCatalog.SmartsMatcher("ok # carbons", "[#6]", 0, 40)
sm3 = FilterCatalog.FilterMatchOps.Not(sm2)
m = Chem.MolFromSmiles("C" * 40)
self.assertFalse(sm.HasMatch(m))
self.assertTrue(sm2.HasMatch(m))
self.assertFalse(sm3.HasMatch(m))
m = Chem.MolFromSmiles("C" * 41)
self.assertTrue(sm.HasMatch(m))
self.assertFalse(sm2.HasMatch(m))
self.assertTrue(sm3.HasMatch(m))
def testAddEntry(self):
sm = FilterCatalog.SmartsMatcher("Too many carbons", "[#6]", 40 + 1)
entry = FilterCatalog.FilterCatalogEntry("Bar", sm)
fc = FilterCatalog.FilterCatalog()
fc.AddEntry(entry)
del entry
del fc
def testRemoveEntry(self):
params = FilterCatalog.FilterCatalogParams(FilterCatalogParams.FilterCatalogs.ZINC)
catalog = FilterCatalog.FilterCatalog(params)
entry = catalog.GetEntryWithIdx(10)
desc = entry.GetDescription()
count = 0
descs = set([catalog.GetEntryWithIdx(i).GetDescription()
for i in range(catalog.GetNumEntries())])
for i in range(catalog.GetNumEntries()):
if catalog.GetEntryWithIdx(i).GetDescription() == desc:
count += 1
print("Count", count)
sz = catalog.GetNumEntries()
print("*" * 44)
self.assertTrue(catalog.RemoveEntry(entry))
del entry
self.assertTrue(catalog.GetNumEntries() == sz - 1)
descs2 = set([catalog.GetEntryWithIdx(i).GetDescription()
for i in range(catalog.GetNumEntries())])
print(descs - descs2)
newcount = 0
for i in range(catalog.GetNumEntries()):
if catalog.GetEntryWithIdx(i).GetDescription() == desc:
newcount += 1
self.assertEquals(count, newcount + 1)
def testPyFilter(self):
class MyFilterMatcher(FilterCatalog.FilterMatcher):
def IsValid(self):
return True
def HasMatch(self, mol):
return True
def GetMatches(self, mol, vect):
v = FilterCatalog.MatchTypeVect()
v.append(FilterCatalog.IntPair(1, 1))
match = FilterCatalog.FilterMatch(self, v)
vect.append(match)
return True
func = MyFilterMatcher("FilterMatcher")
self.assertEquals(func.GetName(), "FilterMatcher")
mol = Chem.MolFromSmiles("c1ccccc1")
self.assertEquals(func.HasMatch(mol), True)
or_match = FilterMatchOps.Or(func, func)
self.assertEquals([[tuple(x) for x in filtermatch.atomPairs]
for filtermatch in or_match.GetMatches(mol)], [[(1, 1)], [(1, 1)]])
not_match = FilterMatchOps.Not(func)
print(not_match)
self.assertEquals(not_match.HasMatch(mol), False)
# test memory
del func
self.assertEquals(not_match.HasMatch(mol), False)
self.assertEquals([[tuple(x) for x in filtermatch.atomPairs]
for filtermatch in not_match.GetMatches(mol)], [])
entry = FilterCatalog.FilterCatalogEntry("Bar", MyFilterMatcher("FilterMatcher"))
fc = FilterCatalog.FilterCatalog()
fc.AddEntry(entry)
catalogEntry = fc.GetFirstMatch(mol)
print(catalogEntry.GetDescription())
def testMWFilter(self):
class MWFilter(FilterCatalog.FilterMatcher):
def __init__(self, minMw, maxMw):
FilterCatalog.FilterMatcher.__init__(self, "MW violation")
self.minMw = minMw
self.maxMw = maxMw
def IsValid(self):
return True
def HasMatch(self, mol):
mw = rdMolDescriptors.CalcExactMolWt(mol)
return not self.minMw <= mw <= self.maxMw
entry = FilterCatalog.FilterCatalogEntry("MW Violation", MWFilter(100, 500))
fc = FilterCatalog.FilterCatalog()
fc.AddEntry(entry)
self.assertTrue(entry.GetDescription() == "MW Violation")
mol = Chem.MolFromSmiles("c1ccccc1")
catalogEntry = fc.GetFirstMatch(mol)
def testFilterHierarchyMatcher(self):
# test
root = FilterCatalog.FilterHierarchyMatcher()
sm = h = FilterCatalog.SmartsMatcher("Halogen", "[$([F,Cl,Br,I]-!@[#6]);!$([F,Cl,Br,I]"
"-!@C-!@[F,Cl,Br,I]);!$([F,Cl,Br,I]-[C,S]"
"(=[O,S,N]))]", 1)
root.SetPattern(sm)
def hierarchy(matcher):
node = FilterCatalog.FilterHierarchyMatcher(matcher)
self.assertEquals(matcher.GetName(), node.GetName())
return node
sm = FilterCatalog.SmartsMatcher("Halogen.Aromatic", "[F,Cl,Br,I;$(*-!@c)]")
root.AddChild(hierarchy(sm))
sm = FilterCatalog.SmartsMatcher("Halogen.NotFluorine", "[$([Cl,Br,I]-!@[#6]);!$([Cl,Br,I]"
"-!@C-!@[F,Cl,Br,I]);!$([Cl,Br,I]-[C,S]"
"(=[O,S,N]))]")
node = hierarchy(sm)
halogen_notf_children = [
hierarchy(x)
for x in [
FilterCatalog.SmartsMatcher(
"Halogen.NotFluorine.Aliphatic", "[$([Cl,Br,I]-!@C);!$([Cl,Br,I]"
"-!@C-!@[F,Cl,Br,I]);!$([Cl,Br,I]-[C,S](=[O,S,N]))]"), FilterCatalog.SmartsMatcher(
"Halogen.NotFluorine.Aromatic", "[$([Cl,Br,I]-!@c)]")
]
]
for child in halogen_notf_children:
node.AddChild(child)
root.AddChild(node)
sm = FilterCatalog.SmartsMatcher("Halogen.Bromine",
"[Br;$([Br]-!@[#6]);!$([Br]-!@C-!@[F,Cl,Br,I])"
";!$([Br]-[C,S](=[O,S,N]))]", 1)
node = hierarchy(sm)
halogen_bromine_children = [
hierarchy(x)
for x in [
FilterCatalog.SmartsMatcher(
"Halogen.Bromine.Aliphatic", "[Br;$(Br-!@C);!$(Br-!@C-!@[F,Cl,Br,I]);"
"!$(Br-[C,S](=[O,S,N]))]"), FilterCatalog.SmartsMatcher(
"Halogen.Bromine.Aromatic", "[Br;$(Br-!@c)]"), FilterCatalog.SmartsMatcher(
"Halogen.Bromine.BromoKetone", "[Br;$(Br-[CH2]-C(=O)-[#6])]")
]
]
for child in halogen_bromine_children:
node.AddChild(child)
root.AddChild(node)
m = Chem.MolFromSmiles("CCl")
assert h.HasMatch(m)
res = root.GetMatches(m)
self.assertEquals(len(res), 1)
self.assertEquals([match.filterMatch.GetName() for match in res],
['Halogen.NotFluorine.Aliphatic'])
m = Chem.MolFromSmiles("c1ccccc1Cl")
assert h.HasMatch(m)
res = root.GetMatches(m)
self.assertEquals(len(res), 2)
m = Chem.MolFromSmiles("c1ccccc1Br")
assert h.HasMatch(m)
res = root.GetMatches(m)
self.assertEquals(len(res), 3)
self.assertEquals([match.filterMatch.GetName() for match in res],
['Halogen.Aromatic', 'Halogen.NotFluorine.Aromatic',
'Halogen.Bromine.Aromatic'])
m = Chem.MolFromSmiles("c1ccccc1F")
assert h.HasMatch(m)
res = root.GetMatches(m)
self.assertEquals(len(res), 1)
self.assertEquals([match.filterMatch.GetName() for match in res], ['Halogen.Aromatic'])
m = Chem.MolFromSmiles("CBr")
assert h.HasMatch(m)
res = root.GetMatches(m)
self.assertEquals([match.filterMatch.GetName() for match in res],
['Halogen.NotFluorine.Aliphatic', 'Halogen.Bromine.Aliphatic'])
def testFunctionalGroupHierarchy(self):
fc = FilterCatalog.GetFunctionalGroupHierarchy()
matches = [(Chem.MolFromSmiles("CCl"), ['Halogen.Aliphatic', 'Halogen.NotFluorine.Aliphatic']),
(Chem.MolFromSmiles("c1ccccc1Cl"),
['Halogen.Aromatic', 'Halogen.NotFluorine.Aromatic']),
(Chem.MolFromSmiles("c1ccccc1F"), ['Halogen.Aromatic']), (
Chem.MolFromSmiles("CBr"), ['Halogen.Aliphatic', 'Halogen.NotFluorine.Aliphatic',
'Halogen.Bromine.Aliphatic'])]
catalogs = [fc]
if FilterCatalog.FilterCatalogCanSerialize():
pickle = fc.Serialize()
fc2 = FilterCatalog.FilterCatalog(pickle)
catalogs.append(fc2)
for fc in catalogs:
# test GetMatches API
for mol, res in matches:
entries = list(fc.GetMatches(mol))
for entry in entries:
hits = [match.filterMatch.GetName() for match in entry.GetFilterMatches(mol)]
self.assertEquals(res, hits)
# test GetFilterMatches API
for mol, res in matches:
self.assertEquals(res, [match.filterMatch.GetName() for match in fc.GetFilterMatches(mol)])
def testFlattenedFunctionalGroupHierarchy(self):
queryDefs = FilterCatalog.GetFlattenedFunctionalGroupHierarchy()
items = sorted(queryDefs.items())
matches = [(Chem.MolFromSmiles("CCl"), ['Halogen', 'Halogen.Aliphatic', 'Halogen.NotFluorine',
'Halogen.NotFluorine.Aliphatic']),
(Chem.MolFromSmiles("c1ccccc1Cl"),
['Halogen', 'Halogen.Aromatic', 'Halogen.NotFluorine',
'Halogen.NotFluorine.Aromatic']), (Chem.MolFromSmiles("c1ccccc1F"),
['Halogen', 'Halogen.Aromatic']),
(Chem.MolFromSmiles("CBr"), ['Halogen',
'Halogen.Aliphatic',
'Halogen.Bromine',
'Halogen.Bromine.Aliphatic',
'Halogen.NotFluorine',
'Halogen.NotFluorine.Aliphatic', ])]
# test the normalized groups
for mol, res in matches:
hits = [name for name, pat in items if mol.HasSubstructMatch(pat)]
self.assertEquals(hits, res)
queryDefs = FilterCatalog.GetFlattenedFunctionalGroupHierarchy(normalized=True)
items = sorted(queryDefs.items())
matches = [(Chem.MolFromSmiles("CCl"), ['halogen', 'halogen.aliphatic', 'halogen.notfluorine',
'halogen.notfluorine.aliphatic']),
(Chem.MolFromSmiles("c1ccccc1Cl"),
['halogen', 'halogen.aromatic', 'halogen.notfluorine',
'halogen.notfluorine.aromatic']), (Chem.MolFromSmiles("c1ccccc1F"),
['halogen', 'halogen.aromatic']),
(Chem.MolFromSmiles("CBr"), ['halogen',
'halogen.aliphatic',
'halogen.bromine',
'halogen.bromine.aliphatic',
'halogen.notfluorine',
'halogen.notfluorine.aliphatic', ])]
for mol, res in matches:
hits = [name for name, pat in items if mol.HasSubstructMatch(pat)]
self.assertEquals(hits, res)
if __name__ == '__main__':
unittest.main()
|
rvianello/rdkit
|
Code/GraphMol/FilterCatalog/Wrap/rough_test.py
|
Python
|
bsd-3-clause
| 20,042
|
[
"RDKit"
] |
558b6081df2d543882da25bbca550e20d1c880a51ed41b1f5ec0f7fe52d72ef9
|
"""
Single Bubble Model: Droplet simulations
=========================================
Use the ``TAMOC`` `single_bubble_model` to simulate the trajectory of a light
oil droplet rising through the water column. This script demonstrates the
typical steps involved in running the single bubble model.
It uses the ambient data stored in the file `../test/output/test_bm54.nc`,
created by the `test_ambient` module. Please make sure all tests have
passed before running this script or modify the script to use a different
source of ambient data.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import seawater
from tamoc import single_bubble_model
import numpy as np
if __name__ == '__main__':
# Open an ambient profile object from the netCDF dataset
nc = '../../test/output/test_bm54.nc'
bm54 = ambient.Profile(nc, chem_names='all')
bm54.close_nc()
# Initialize a single_bubble_model.Model object with this data
sbm = single_bubble_model.Model(bm54)
# Create a light oil droplet particle to track
composition = ['benzene', 'toluene', 'ethylbenzene']
drop = dbm.FluidParticle(composition, fp_type=1.)
# Set the mole fractions of each component at release.
mol_frac = np.array([0.4, 0.3, 0.3])
# Specify the remaining particle initial conditions
de = 0.02
z0 = 1000.
T0 = 273.15 + 30.
# Simulate the trajectory through the water column and plot the results
sbm.simulate(drop, z0, de, mol_frac, T0, K_T=1, fdis=1e-8, delta_t=10.)
sbm.post_process()
# Save the simulation to a netCDF file
sbm.save_sim('./drop.nc', '../../test/output/test_bm54.nc',
'Results of ./drops.py script')
# Save the data for importing into Matlab
sbm.save_txt('./drop.txt', '../../test/output/test_bm54.nc',
'Results of ./drops.py script')
|
socolofs/tamoc
|
bin/sbm/drop.py
|
Python
|
mit
| 2,035
|
[
"NetCDF"
] |
f516a16c8fb5bc50c270754b2d2925c72757a63258f90f6484e47f59b8445435
|
#! /usr/bin/env python
import sys
import os
import glob
import re
import yaml
from collections import namedtuple
def expandOsPath(path):
"""
To expand the path with shell variables.
Arguments:
- `path`: path string
"""
return os.path.expanduser(os.path.expandvars(path))
def genFilesWithPattern(pathList, Pattern):
"""
To generate files list on the fly.
Arguments:
- `pathList`: the path of the files
- `Pattern`: pattern like config["input_files"]
"""
pathList.append(Pattern)
Files = glob.glob(expandOsPath(os.path.join(*pathList)))
return Files
def parse_bowtie1_log(s):
total_pattern = re.compile(r"""\#\s+reads\s+processed:\s(?P<total_reads>.+)\s*""", # total_reads
re.VERBOSE)
unique_mapped_pattern = re.compile("""\#\s+reads\s+with\s+at\s+least\s+one\s+reported\s+alignment:\s+(?P<unique_mapped_reads>\S+)\s+\(\S+\)""", # unique_mapped_reads
re.VERBOSE)
multiple_mapped_pattern = re.compile("""\#\s+reads\s+with\s+alignments\s+suppressed\s+due\s+to\s+-m:\s+(?P<multiple_mapped_reads>\d+)\s+\(\S+\)""", #multiple_mapped_reads
re.VERBOSE)
for line in s:
match = total_pattern.match(line)
if match:
total_reads = match.group("total_reads")
match = unique_mapped_pattern.match(line)
if match:
unique_mapped_reads = match.group("unique_mapped_reads")
match = multiple_mapped_pattern.match(line)
if match:
multiple_mapped_reads = match.group("multiple_mapped_reads")
res = namedtuple('res', ['total_reads', 'unique_mapped_reads', 'suppressed_multiple_mapped_reads'])
r = res(total_reads=total_reads,
unique_mapped_reads=unique_mapped_reads,
suppressed_multiple_mapped_reads=multiple_mapped_reads)
return r
def parse_bowtie2_log(s):
total_pattern = re.compile(r"""(?P<total_reads>\d+)\s+reads;\s+of\s+these:""", # total_reads
re.VERBOSE)
unique_mapped_pattern = re.compile("""\s*(?P<unique_mapped_reads>\d+)\s+\(\S+\).+exactly\s+1\s+time""", # unique_mapped_reads
re.VERBOSE)
multiple_mapped_pattern = re.compile("""\s+(?P<multiple_mapped_reads>\d+)\s+\(\S+\).+aligned\s+>1\s+times""", # unique_mapped_reads
re.VERBOSE)
combined_pattern = re.compile("""(\d+)\slines\.+\s(\d+)\sheaders\,\s(\d+)\sunique\,\s(\d+)\smulti\,\s(\d+)\sunmapped\.""")
for line in s:
match = total_pattern.match(line)
if match:
total_reads = match.group("total_reads")
match = unique_mapped_pattern.match(line)
if match:
unique_mapped_reads = match.group("unique_mapped_reads")
match = multiple_mapped_pattern.match(line)
if match:
multiple_mapped_reads = match.group("multiple_mapped_reads")
match = combined_pattern.match(line)
if match:
total_reads = str(int(match.group(1)) - int(match.group(2)))
unique_mapped_reads = match.group(3)
multiple_mapped_reads = match.group(4)
res = namedtuple('res', ['total_reads', 'unique_mapped_reads', 'multiple_mapped_reads'])
r = res(total_reads=total_reads,
unique_mapped_reads=unique_mapped_reads,
multiple_mapped_reads=multiple_mapped_reads)
return r
def parse_rmdup_log(s):
pattern = re.compile(r'\[bam_rmdupse_core\]\s+(?P<dup_reads>\d+)\s/\s\d+', re.VERBOSE)
for line in s:
match = pattern.match(line)
if match:
dup_reads = match.group("dup_reads")
res = namedtuple('res', ['dup_reads'])
r = res(dup_reads=dup_reads)
return r
def parse_phantomPeak_log(s):
NSC_pattern = re.compile(r'.*\(NSC\)\s*(?P<NSC>[+-]*\d*\.\d*).+', re.VERBOSE)
RSC_pattern = re.compile(r'.*\(RSC\)\s*(?P<RSC>[+-]*\d*\.\d*).+', re.VERBOSE)
for line in s:
match = NSC_pattern.match(line)
if match:
print "nscMatch" #eddamend
NSC = match.group("NSC")
match = RSC_pattern.match(line)
if match:
print "rscMatch" #eddamend
RSC = match.group("RSC")
res = namedtuple('res', ['NSC', 'RSC'])
print "nsc:" #eddamend
print NSC #eddamend
print "rsc:" #eddamend
print RSC #eddamend
r = res(NSC=NSC, RSC=RSC)
print "r:" #eddamend
print r #eddamend
return r
def getSummaryFiles(input_type, config, search_paths):
"""
Get all summary files under the folders.
input_type: file types.
config: config loaded from yaml.
"""
input_type = "*" + input_type
files = genFilesWithPattern([config["project_dir"], config["data_dir"]], input_type)
for search_path in search_paths:
files.extend(genFilesWithPattern([config["project_dir"], config["data_dir"], search_path],
input_type))
return files
def getFileId(file_basename):
"""
Remove suffix of the summary file to get file id.
"""
suffixes = ['.fastq.alignment.log', '.fq.alignment.log', '.gz.alignment.log', '.bam.rmdup.log']
suffixes = ['.fastq.alignment.log', '.fq.alignment.log', '.gz.alignment.log', '.bam.rmdup.log', '_rmdup.bam.phantomPeak.log']
for suffix in suffixes:
file_basename = file_basename.replace(suffix, '')
return file_basename
## Search subdirectories under data folder.
#search_paths = ["fastq", "rmdup"]
search_paths = ["Log"]
## Used for final results.
summary_dict = {}
## Load the same config yaml file of the pipeline.
config_name = sys.argv[1]
config_f = open(config_name, "r")
config = yaml.load(config_f)
config_f.close()
if config["aligner"] == "bowtie":
## To be used in debug
# input_files = {".alignment.log":("total_reads", "unique_mapped_reads")}
## Summary files used for summarizing.
input_files = {
".alignment.log":("total_reads", "unique_mapped_reads", "suppressed_multiple_mapped_reads"),
".rmdup.log":("dup_reads"),
".phantomPeak.log":("NSC", "RSC")
}
## Decide the parser here by a dict.
parser_dict = {
".alignment.log": parse_bowtie1_log,
".rmdup.log": parse_rmdup_log,
".phantomPeak.log": parse_phantomPeak_log
}
## Used to assign the output field in output file.
output_header = [
"sample",
"total_reads",
"unique_mapped_reads",
"suppressed_multiple_mapped_reads",
"dup_reads",
"NSC",
"RSC"]
elif config["aligner"] == "bowtie2":
## to be used in debug
# input_files = {".alignment.log":("total_reads", "unique_mapped_reads", "multiple_mapped_reads")}
## Summary files used for summarizing.
input_files = {
".alignment.log":("total_reads", "unique_mapped_reads", "multiple_mapped_reads"),
".rmdup.log":("dup_reads"),
".phantomPeak.log":("NSC", "RSC")
}
## Decide the parser here by a dict.
parser_dict = {
".alignment.log": parse_bowtie2_log,
".rmdup.log": parse_rmdup_log,
".phantomPeak.log": parse_phantomPeak_log
}
## Used to assign the output field in output file.
output_header = [
"sample",
"total_reads",
"unique_mapped_reads",
"multiple_mapped_reads",
"dup_reads",
"NSC",
"RSC"]
## Scan the files to summarize the pipeline.
for input_type, summary_types in input_files.items():
summary_files = getSummaryFiles(input_type, config, search_paths)
if len(summary_files) != 0:
for summary_file in summary_files:
file_id = getFileId(os.path.basename(summary_file))
file_id = re.sub(r'.uniqmapped', r'', file_id)
if file_id not in summary_dict:
summary_dict[file_id] = {'sample':file_id}
input_file = file(summary_file)
lines = input_file.readlines()
input_file.close()
## Here the value of the dict is the parser function!
res = parser_dict[input_type](lines)
## Unpack the results into dict.
for i in range(len(res._fields)):
if res._fields[i] not in output_header:
output_header.append(res._fields[i])
summary_dict[file_id][res._fields[i]] = res[i]
## Output to file, and the columns order is decided by output_header.
output_file = file(config["project_name"]+"_summary_stats.txt", "w")
header_line = "\t".join(output_header) + "\n"
output_file.write(header_line)
for sample in summary_dict.keys():
output_list = []
for stat in output_header:
if stat in summary_dict[sample]:
output_list.append(summary_dict[sample][stat])
else:
output_list.append("NA")
line = "\t".join(output_list) + "\n"
output_file.write(line)
output_file.close()
|
shenlab-sinai/chip-seq_preprocess
|
project/script/results_parser.py
|
Python
|
gpl-2.0
| 8,835
|
[
"Bowtie"
] |
da79b03f4af65a920445547b17ea2f8ad591ce1adeb4b9c837e5f1a276411569
|
"""
Acceptance tests for Studio related to the container page.
The container page is used both for display units, and for
displaying containers within units.
"""
from nose.plugins.attrib import attr
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.component_editor import ComponentEditorView
from ...pages.studio.html_component_editor import HtmlComponentEditorView
from ...pages.studio.utils import add_discussion, drag
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.staff_view import StaffPage
import datetime
from bok_choy.promise import Promise, EmptyPromise
from base_studio_test import ContainerBase
class NestedVerticalTest(ContainerBase):
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with nested verticals.
"""
self.container_title = ""
self.group_a = "Group A"
self.group_b = "Group B"
self.group_empty = "Group Empty"
self.group_a_item_1 = "Group A Item 1"
self.group_a_item_2 = "Group A Item 2"
self.group_b_item_1 = "Group B Item 1"
self.group_b_item_2 = "Group B Item 2"
self.group_a_handle = 0
self.group_a_item_1_handle = 1
self.group_a_item_2_handle = 2
self.group_empty_handle = 3
self.group_b_handle = 4
self.group_b_item_1_handle = 5
self.group_b_item_2_handle = 6
self.group_a_item_1_action_index = 0
self.group_a_item_2_action_index = 1
self.duplicate_label = "Duplicate of '{0}'"
self.discussion_label = "Discussion"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('vertical', 'Test Container').add_children(
XBlockFixtureDesc('vertical', 'Group A').add_children(
XBlockFixtureDesc('html', self.group_a_item_1),
XBlockFixtureDesc('html', self.group_a_item_2)
),
XBlockFixtureDesc('vertical', 'Group Empty'),
XBlockFixtureDesc('vertical', 'Group B').add_children(
XBlockFixtureDesc('html', self.group_b_item_1),
XBlockFixtureDesc('html', self.group_b_item_2)
)
)
)
)
)
)
@attr('shard_1')
class DragAndDropTest(NestedVerticalTest):
"""
Tests of reordering within the container page.
"""
def drag_and_verify(self, source, target, expected_ordering):
self.do_action_and_verify(
lambda (container): drag(container, source, target, 40),
expected_ordering
)
def test_reorder_in_group(self):
"""
Drag Group A Item 2 before Group A Item 1.
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2, self.group_a_item_1]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_2_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_to_top(self):
"""
Drag Group A Item 1 to top level (outside of Group A).
"""
expected_ordering = [{self.container_title: [self.group_a_item_1, self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_a_item_1_handle, self.group_a_handle, expected_ordering)
def test_drag_into_different_group(self):
"""
Drag Group B Item 1 into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_b_item_1, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_item_1_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_group_into_group(self):
"""
Drag Group B into Group A (first element).
"""
expected_ordering = [{self.container_title: [self.group_a, self.group_empty]},
{self.group_a: [self.group_b, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.drag_and_verify(self.group_b_handle, self.group_a_item_1_handle, expected_ordering)
def test_drag_after_addition(self):
"""
Add some components and then verify that drag and drop still works.
"""
group_a_menu = 0
def add_new_components_and_rearrange(container):
# Add a video component to Group 1
add_discussion(container, group_a_menu)
# Duplicate the first item in Group A
container.duplicate(self.group_a_item_1_action_index)
first_handle = self.group_a_item_1_handle
# Drag newly added video component to top.
drag(container, first_handle + 3, first_handle, 40)
# Drag duplicated component to top.
drag(container, first_handle + 2, first_handle, 40)
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering)
@attr('shard_1')
class AddComponentTest(NestedVerticalTest):
"""
Tests of adding a component to the container page.
"""
def add_and_verify(self, menu_index, expected_ordering):
self.do_action_and_verify(
lambda (container): add_discussion(container, menu_index),
expected_ordering
)
def test_add_component_in_group(self):
group_b_menu = 2
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2, self.discussion_label]},
{self.group_empty: []}]
self.add_and_verify(group_b_menu, expected_ordering)
def test_add_component_in_empty_group(self):
group_empty_menu = 1
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: [self.discussion_label]}]
self.add_and_verify(group_empty_menu, expected_ordering)
def test_add_component_in_container(self):
container_menu = 3
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b, self.discussion_label]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.add_and_verify(container_menu, expected_ordering)
@attr('shard_1')
class DuplicateComponentTest(NestedVerticalTest):
"""
Tests of duplicating a component on the container page.
"""
def duplicate_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.duplicate(source_index),
expected_ordering
)
def test_duplicate_first_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_1)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_1_action_index, expected_ordering)
def test_duplicate_second_in_group(self):
duplicate_label = self.duplicate_label.format(self.group_a_item_2)
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, self.group_a_item_2, duplicate_label]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
self.duplicate_and_verify(self.group_a_item_2_action_index, expected_ordering)
def test_duplicate_the_duplicate(self):
first_duplicate_label = self.duplicate_label.format(self.group_a_item_1)
second_duplicate_label = self.duplicate_label.format(first_duplicate_label)
expected_ordering = [
{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_1, first_duplicate_label, second_duplicate_label, self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}
]
def duplicate_twice(container):
container.duplicate(self.group_a_item_1_action_index)
container.duplicate(self.group_a_item_1_action_index + 1)
self.do_action_and_verify(duplicate_twice, expected_ordering)
@attr('shard_1')
class DeleteComponentTest(NestedVerticalTest):
"""
Tests of deleting a component from the container page.
"""
def delete_and_verify(self, source_index, expected_ordering):
self.do_action_and_verify(
lambda (container): container.delete(source_index),
expected_ordering
)
def test_delete_first_in_group(self):
expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},
{self.group_a: [self.group_a_item_2]},
{self.group_b: [self.group_b_item_1, self.group_b_item_2]},
{self.group_empty: []}]
# Group A itself has a delete icon now, so item_1 is index 1 instead of 0.
group_a_item_1_delete_index = 1
self.delete_and_verify(group_a_item_1_delete_index, expected_ordering)
@attr('shard_1')
class EditContainerTest(NestedVerticalTest):
"""
Tests of editing a container.
"""
def modify_display_name_and_verify(self, component):
"""
Helper method for changing a display name.
"""
modified_name = 'modified'
self.assertNotEqual(component.name, modified_name)
component.edit()
component_editor = ComponentEditorView(self.browser, component.locator)
component_editor.set_field_value_and_save('Display Name', modified_name)
self.assertEqual(component.name, modified_name)
def test_edit_container_on_unit_page(self):
"""
Test the "edit" button on a container appearing on the unit page.
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
self.modify_display_name_and_verify(component)
def test_edit_container_on_container_page(self):
"""
Test the "edit" button on a container appearing on the container page.
"""
container = self.go_to_nested_container_page()
self.modify_display_name_and_verify(container)
@attr('shard_1')
class UnitPublishingTest(ContainerBase):
"""
Tests of the publishing control and related widgets on the Unit page.
"""
PUBLISHED_STATUS = "Publishing Status\nPublished (not yet released)"
PUBLISHED_LIVE_STATUS = "Publishing Status\nPublished and Live"
DRAFT_STATUS = "Publishing Status\nDraft (Unpublished changes)"
LOCKED_STATUS = "Publishing Status\nVisible to Staff Only"
RELEASE_TITLE_RELEASED = "RELEASED:"
RELEASE_TITLE_RELEASE = "RELEASE:"
LAST_PUBLISHED = 'Last published'
LAST_SAVED = 'Draft saved on'
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a single HTML child.
"""
self.html_content = '<p><strong>Body of HTML Unit.</strong></p>'
self.courseware = CoursewarePage(self.browser, self.course_id)
past_start_date = datetime.datetime(1974, 6, 22)
self.past_start_date_text = "Jun 22, 1974 at 00:00 UTC"
future_start_date = datetime.datetime(2100, 9, 13)
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Test html', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unlocked Section',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('problem', '<problem></problem>', data=self.html_content)
)
)
),
XBlockFixtureDesc('chapter', 'Section With Locked Unit').add_children(
XBlockFixtureDesc(
'sequential',
'Subsection With Locked Unit',
metadata={'start': past_start_date.isoformat()}
).add_children(
XBlockFixtureDesc(
'vertical',
'Locked Unit',
metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('discussion', '', data=self.html_content)
)
)
),
XBlockFixtureDesc(
'chapter',
'Unreleased Section',
metadata={'start': future_start_date.isoformat()}
).add_children(
XBlockFixtureDesc('sequential', 'Unreleased Subsection').add_children(
XBlockFixtureDesc('vertical', 'Unreleased Unit')
)
)
)
def test_publishing(self):
"""
Scenario: The publish title changes based on whether or not draft content exists
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the title in the Publish information box is "Published and Live"
And the Publish button is disabled
And the last published text contains "Last published"
And the last saved text contains "Last published"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the last saved text contains "Draft saved on"
And the Publish button is enabled
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And the last published text contains "Last published"
And the last saved text contains "Last published"
"""
unit = self.go_to_unit_page()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
# Start date set in course fixture to 1970.
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, 'Jan 01, 1970 at 00:00 UTC\nwith Section "Test Section"'
)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
# Should not be able to click on Publish action -- but I don't know how to test that it is not clickable.
# TODO: continue discussion with Muhammad and Jay about this.
# Add a component to the page so it will have unpublished changes.
add_discussion(unit)
self._verify_publish_title(unit, self.DRAFT_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_SAVED)
unit.publish_action.click()
unit.wait_for_ajax()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self._verify_last_published_and_saved(unit, self.LAST_PUBLISHED, self.LAST_PUBLISHED)
def test_discard_changes(self):
"""
Scenario: The publish title changes after "Discard Changes" is clicked
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the Discard Changes button is disabled
And I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And the Discard Changes button is enabled
And when I click the Discard Changes button
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._verify_publish_title(unit, self.DRAFT_STATUS)
unit.discard_changes()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
def test_view_live_no_changes(self):
"""
Scenario: "View Live" shows published content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
Then the View Live button is enabled
And when I click on the View Live button
Then I see the published content in LMS
"""
unit = self.go_to_unit_page()
self._view_published_version(unit)
self._verify_components_visible(['html'])
def test_view_live_changes(self):
"""
Scenario: "View Live" does not show draft content in LMS
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click on the View Live button
Then I see the published content in LMS
And I do not see the unpublished component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
self._view_published_version(unit)
self._verify_components_visible(['html'])
self.assertEqual(self.html_content, self.courseware.xblock_component_html_content(0))
def test_view_live_after_publish(self):
"""
Scenario: "View Live" shows newly published content
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And when I add a component to the unit
And when I click the Publish button
And when I click on the View Live button
Then I see the newly published component
"""
unit = self.go_to_unit_page()
add_discussion(unit)
unit.publish_action.click()
self._view_published_version(unit)
self._verify_components_visible(['html', 'discussion'])
def test_initially_unlocked_visible_to_students(self):
"""
Scenario: An unlocked unit with release date in the past is visible to students
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
Then the unit has a warning that it is visible to students
And it is marked as "RELEASED" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASED, self.past_start_date_text + '\n' + 'with Section "Unlocked Section"'
)
self._view_published_version(unit)
self._verify_student_view_visible(['problem'])
def test_locked_visible_to_staff_only(self):
"""
Scenario: After locking a unit with release date in the past, it is only visible to staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I select "Hide from students"
Then the unit does not have a warning that it is visible to students
And the unit does not display inherited staff lock
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Unlocked Section", "Unlocked Subsection", "Unlocked Unit")
checked = unit.toggle_staff_lock()
self.assertTrue(checked)
self.assertFalse(unit.currently_visible_to_students)
self.assertFalse(unit.shows_inherited_staff_lock())
self._verify_publish_title(unit, self.LOCKED_STATUS)
self._view_published_version(unit)
# Will initially be in staff view, locked component should be visible.
self._verify_components_visible(['problem'])
# Switch to student view and verify not visible
self._verify_student_view_locked()
def test_initially_locked_not_visible_to_students(self):
"""
Scenario: A locked unit with release date in the past is not visible to students
Given I have a published locked unit with release date in the past
When I go to the unit page in Studio
Then the unit does not have a warning that it is visible to students
And it is marked as "RELEASE" with release date in the past visible
And when I click on the View Live Button
And when I view the course as a student
Then I do not see any content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
self._verify_publish_title(unit, self.LOCKED_STATUS)
self.assertFalse(unit.currently_visible_to_students)
self._verify_release_date_info(
unit, self.RELEASE_TITLE_RELEASE,
self.past_start_date_text + '\n' + 'with Subsection "Subsection With Locked Unit"'
)
self._view_published_version(unit)
self._verify_student_view_locked()
def test_unlocked_visible_to_all(self):
"""
Scenario: After unlocking a unit with release date in the past, it is visible to both students and staff
Given I have a published unlocked unit with release date in the past
When I go to the unit page in Studio
And when I deselect "Hide from students"
Then the unit does have a warning that it is visible to students
And when I click on the View Live Button
Then I see the content in the unit when logged in as staff
And when I view the course as a student
Then I see the content in the unit
"""
unit = self.go_to_unit_page("Section With Locked Unit", "Subsection With Locked Unit", "Locked Unit")
checked = unit.toggle_staff_lock()
self.assertFalse(checked)
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self.assertTrue(unit.currently_visible_to_students)
self._view_published_version(unit)
# Will initially be in staff view, components always visible.
self._verify_components_visible(['discussion'])
# Switch to student view and verify visible.
self._verify_student_view_visible(['discussion'])
def test_explicit_lock_overrides_implicit_subsection_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
subsection = self.outline.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_explicit_lock_overrides_implicit_section_lock_information(self):
"""
Scenario: A unit's explicit staff lock hides its inherited subsection staff lock information
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section
When I visit the unit page
Then the unit page shows its inherited staff lock
And I enable explicit staff locking
Then the unit page does not show its inherited staff lock
And when I disable explicit staff locking
Then the unit page now shows its inherited staff lock
"""
self.outline.visit()
self.outline.expand_all_subsections()
section = self.outline.section_at(0)
unit = section.subsection_at(0).unit_at(0)
section.set_staff_lock(True)
unit_page = unit.go_to()
self._verify_explicit_lock_overrides_implicit_lock_information(unit_page)
def test_published_unit_with_draft_child(self):
"""
Scenario: A published unit with a draft child can be published
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of the only component
Then the content changes
And the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see the changed content in LMS
"""
modified_content = 'modified content'
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlComponentEditorView(self.browser, component.locator).set_content_and_save(modified_content)
self.assertEqual(component.student_content, modified_content)
self._verify_publish_title(unit, self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertTrue(modified_content in self.courseware.xblock_component_html_content(0))
def test_cancel_does_not_create_draft(self):
"""
Scenario: Editing a component and then canceling does not create a draft version (TNL-399)
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And edit the content of an HTML component and then press cancel
Then the content does not change
And the title in the Publish information box is "Published and Live"
And when I reload the page
Then the title in the Publish information box is "Published and Live"
"""
unit = self.go_to_unit_page()
component = unit.xblocks[1]
component.edit()
HtmlComponentEditorView(self.browser, component.locator).set_content_and_cancel("modified content")
self.assertEqual(component.student_content, "Body of HTML Unit.")
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self.browser.refresh()
unit.wait_for_page()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
def test_delete_child_in_published_unit(self):
"""
Scenario: A published unit can be published again after deleting a child
Given I have a published unit with no unpublished changes
When I go to the unit page in Studio
And delete the only component
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published and Live"
And when I click the View Live button
Then I see an empty unit in LMS
"""
unit = self.go_to_unit_page()
unit.delete(0)
self._verify_publish_title(unit, self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
self._verify_publish_title(unit, self.PUBLISHED_LIVE_STATUS)
self._view_published_version(unit)
self.assertEqual(0, self.courseware.num_xblock_components)
def test_published_not_live(self):
"""
Scenario: The publish title displays correctly for units that are not live
Given I have a published unit with no unpublished changes that releases in the future
When I go to the unit page in Studio
Then the title in the Publish information box is "Published (not yet released)"
And when I add a component to the unit
Then the title in the Publish information box is "Draft (Unpublished changes)"
And when I click the Publish button
Then the title in the Publish information box is "Published (not yet released)"
"""
unit = self.go_to_unit_page('Unreleased Section', 'Unreleased Subsection', 'Unreleased Unit')
self._verify_publish_title(unit, self.PUBLISHED_STATUS)
add_discussion(unit)
self._verify_publish_title(unit, self.DRAFT_STATUS)
unit.publish_action.click()
unit.wait_for_ajax()
self._verify_publish_title(unit, self.PUBLISHED_STATUS)
def _view_published_version(self, unit):
"""
Goes to the published version, then waits for the browser to load the page.
"""
unit.view_published_version()
self.assertEqual(len(self.browser.window_handles), 2)
self.courseware.wait_for_page()
def _verify_and_return_staff_page(self):
"""
Verifies that the browser is on the staff page and returns a StaffPage.
"""
page = StaffPage(self.browser, self.course_id)
EmptyPromise(page.is_browser_on_page, 'Browser is on staff page in LMS').fulfill()
return page
def _verify_student_view_locked(self):
"""
Verifies no component is visible when viewing as a student.
"""
self._verify_and_return_staff_page().toggle_staff_view()
self.assertEqual(0, self.courseware.num_xblock_components)
def _verify_student_view_visible(self, expected_components):
"""
Verifies expected components are visible when viewing as a student.
"""
self._verify_and_return_staff_page().toggle_staff_view()
self._verify_components_visible(expected_components)
def _verify_components_visible(self, expected_components):
"""
Verifies the expected components are visible (and there are no extras).
"""
self.assertEqual(len(expected_components), self.courseware.num_xblock_components)
for index, component in enumerate(expected_components):
self.assertEqual(component, self.courseware.xblock_component_type(index))
def _verify_release_date_info(self, unit, expected_title, expected_date):
"""
Verifies how the release date is displayed in the publishing sidebar.
"""
self.assertEqual(expected_title, unit.release_title)
self.assertEqual(expected_date, unit.release_date)
def _verify_publish_title(self, unit, expected_title):
"""
Waits for the publish title to change to the expected value.
"""
def wait_for_title_change():
return (unit.publish_title == expected_title, unit.publish_title)
Promise(wait_for_title_change, "Publish title incorrect. Found '" + unit.publish_title + "'").fulfill()
def _verify_last_published_and_saved(self, unit, expected_published_prefix, expected_saved_prefix):
"""
Verifies that last published and last saved messages respectively contain the given strings.
"""
self.assertTrue(expected_published_prefix in unit.last_published_text)
self.assertTrue(expected_saved_prefix in unit.last_saved_text)
def _verify_explicit_lock_overrides_implicit_lock_information(self, unit_page):
"""
Verifies that a unit with inherited staff lock does not display inherited information when explicitly locked.
"""
self.assertTrue(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertFalse(unit_page.shows_inherited_staff_lock())
unit_page.toggle_staff_lock(inherits_staff_lock=True)
self.assertTrue(unit_page.shows_inherited_staff_lock())
# TODO: need to work with Jay/Christine to get testing of "Preview" working.
# def test_preview(self):
# unit = self.go_to_unit_page()
# add_discussion(unit)
# unit.preview()
# self.assertEqual(2, self.courseware.num_xblock_components)
# self.assertEqual('html', self.courseware.xblock_component_type(0))
# self.assertEqual('discussion', self.courseware.xblock_component_type(1))
|
UQ-UQx/edx-platform_lti
|
common/test/acceptance/tests/studio/test_studio_container.py
|
Python
|
agpl-3.0
| 35,463
|
[
"VisIt"
] |
195983e10d230d5018bf524140823d8a224ef304d41d6938375e8043f6b3a140
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.