text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# encoding: utf-8
"""
Uniweb validator project
checksites.py
Copyright (c) 2009 Brian Shumate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os, sys
import httplib2
import urllib2
import re
from BeautifulSoup import BeautifulSoup
failed = 0
passed = 0
sitelist = "http://www.utexas.edu/world/univ/state/"
total = 0
w3chtml = "http://validator.w3.org/check?uri="
page = urllib2.urlopen(sitelist)
soup = BeautifulSoup(page)
for anchor in soup.findAll('a', href=re.compile(r'\.edu')):
h = httplib2.Http(".cache")
resp, content = h.request(w3chtml + anchor['href'], "GET")
validator = BeautifulSoup(content)
if validator.find('h3', 'invalid'):
print anchor['href'] + ' FAIL'
failed += 1
total += 1
else:
print anchor['href'] + ' PASS'
passed += 1
total += 1
print 'passed: ' + passed
print 'failed: ' + failed
print 'total: ' + total | brianshumate/uniweb | share/checksites.py | Python | bsd-2-clause | 1,913 | [
"Brian"
] | ea46d362aa0f0901fd9ac5e83849b29d2c79a281ae934cebd8c36c2ea39737e3 |
#!/usr/bin/env python
import numpy as np
import dendrogenous as dg
import ete3
from dendrogenous.settings import Settings as SettingsClass
import dendrogenous.utils
import os
import io
import re
import pymysql
import subprocess
import glob
import sqlite3
import logging
import numpy
from Bio import Phylo
from Bio import SeqIO
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
class Dendrogenous():
"""
Tree generation class with methods for all the steps required to generate
phylogenies.
Relies on a pre-existing directory structure
Generally the idea of running this class to to instantiate it with
a seq_record e.g.
current = dendrogenous(seq)
current.generate_phylogeny()
Where generate phylogeny will check the current state of the tree
and
"""
def __init__(self, seq_record, settings):
"""
Initialise the class
"""
if not isinstance(settings, SettingsClass):
raise ValueError("Supplied settings is not Settings class")
if not isinstance(seq_record, SeqRecord):
raise ValueError("Supplied sequence is not SeqRecord class")
self.settings = settings
self.seq_name = seq_record.id
## seq_record with correct name
#self.seq_record = seq_record
#self.seq_record.id = self.seq_name
self.logger = logging.getLogger(self.settings.logger_name)
self.seed = ">{0}\n{1}\n".format(self.seq_name,
seq_record.seq)
self.seq_hits = os.path.join(self.settings.dir_paths['blast_hits'],
self.seq_name) + '.fas' # file containing blast hits
self.aligned_seqs = os.path.join(self.settings.dir_paths['alignment'], self.seq_name + ".afa")
self.masked_seqs = os.path.join(self.settings.dir_paths['mask'], self.seq_name + ".mask")
self.phylogeny = os.path.join(self.settings.dir_paths['tree'], self.seq_name + ".tre")
self.named_phylogeny = os.path.join(self.settings.dir_paths['named'], self.seq_name + ".named_tre")
self.logger.info("{}: Class Initialised".format(self.seq_name))
def _dependency(self, dependency_file, dependency_method):
"""
Check dependency file exists and if it doesn't run the
method that generates it
"""
if not os.path.exists(dependency_file):
dependency_method()
def _check_output(self, expected_file):
"""
Check the specified expected file
has been created and is not empty
raising a PipeError if it has not
"""
if not os.path.exists(expected_file):
raise dg.utils.PipeError("Expected file does not exist: {}".format(expected_file))
else:
if not os.path.getsize(expected_file) > 0:
maformed_file = os.path.join(self.settings.dir_paths['ERROR'], os.path.basename(expected_file))
os.rename(expected_file, malformed_file)
raise dg.utils.PipeError("Expected file is empty: {}".format(expected_file))
def _mask_check(self):
"""
Returns the length of the mask in the mask file
Designed for testing if the automated masking needs rerun with different settings
"""
with open(self.masked_seqs, 'rU') as mask_fh:
sample_seq = next(SeqIO.parse(mask_fh, "fasta"))
return len(sample_seq.seq)
def _get_species_name(self, leaf, db_cursor):
"""
Query the open database to convert a leaf name (a protein_ID) to
the appropriate species name.
"""
if leaf == self.seq_name:
taxa_name = "SEED SEQUENCE [{}]".format(leaf)
else:
query = "SELECT species FROM cider WHERE protein_ID='{}'".format(leaf)
db_cursor.execute(query)
returned_taxa_name = db_cursor.fetchone()
if returned_taxa_name is None:
taxa_name = 'UNKNOWN TAXA [{}]'.format(leaf)
self.logger.warning(\
"{0}: NameError | Protein ID ({1}) is missing species information".format(\
self.seq_name, leaf))
else:
taxa_name = "{0} [{1}]".format(returned_taxa_name[0],
leaf)
return taxa_name
def _blast(self, genome):
"""
Blast seed sequence against db using BLASTP
"""
blast_settings = self.settings.blast_settings
blastp_path = self.settings.binary_paths['blastp']
blast_cmd = "{0} -db {1} " \
" -evalue {2} -max_target_seqs {3}" \
" -outfmt 5".format(blastp_path,
genome,
blast_settings['evalue'],
blast_settings['num_seqs'])
blast_output = dg.utils.execute_cmd(blast_cmd, input_str=self.seed, output_str=True)
return blast_output
def _parse_blast(self, blast_output):
"""
Parses a blast_output files and queries the
MySQL DB to recover the appropriate sequences
"""
# no get with default vals as this is a mandatory part of the
# settings.json - use them to connect to mysql server
con = pymysql.connect(**self.settings.dbconfig)
cur = con.cursor()
# Parse the xml output returns an iterator of blast record
# objects over the xml blastoutput file
blast_hits = NCBIXML.parse(io.StringIO(blast_output))
hit_id_set = set()
for blast_record in blast_hits:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
hit_id_set.add(alignment.hit_def)
hit_records = []
for hit_id in hit_id_set:
cur.execute("SELECT sequence FROM cider "
"WHERE protein_ID='{0}'".format(hit_id))
sequence = cur.fetchone()
if sequence is None:
self.logger.warning("Blast hit protein_ID not in db: {}".format(hit_id))
continue
sequence_record = SeqRecord(\
Seq(sequence[0], IUPAC.protein), id=hit_id, description='')
hit_records.append(sequence_record)
con.close()
return hit_records
def get_seqs(self):
"""
Get similar sequences to seed by blasting genomes and parsing the output
"""
num_hits = 0
with open(self.seq_hits, 'a') as out_fh:
for genome in self.settings.genomes:
blast_output = self._blast(genome)
blast_hits = self._parse_blast(blast_output)
num_hits += len(blast_hits)
SeqIO.write(blast_hits, out_fh, 'fasta')
# add the actual seq seed to blast hits so it
# will be in phylogenies
out_fh.write(self.seed)
self._check_output(self.seq_hits)
if num_hits < self.settings.minimums['min_seqs']:
seq_fail_file = os.path.join(self.settings.dir_paths['blast_fail'], self.seq_name + ".insufficient_hits")
os.rename(self.seq_hits, seq_fail_file)
self._check_output(seq_fail_file)
raise dg.utils.GetSeqFail()
self.logger.info("{}: BLAST Seqs Created".format(self.seq_name))
def align(self):
"""
Align input seqs using kalign
"""
self._dependency(self.seq_hits, self.get_seqs)
kalign_path = self.settings.binary_paths['kalign']
align_cmd = "{0} -i {1} -o {2}".format(kalign_path,
self.seq_hits,
self.aligned_seqs)
dg.utils.execute_cmd(align_cmd)
self._check_output(self.aligned_seqs)
self.logger.info("{}: Alignment Created".format(self.seq_name))
def mask(self):
'''
Mask input alignment using trimal.
Method reruns trimal with automated setting if mask is too short
If mask is still too short it is moved to fail directory
'''
self._dependency(self.aligned_seqs, self.align)
trimal_path = self.settings.binary_paths['trimal']
mask_cmd = "{0} -in {1} -out {2} -nogaps".format(trimal_path,
self.aligned_seqs,
self.masked_seqs)
dg.utils.execute_cmd(mask_cmd, debug=True)
# check if mask is big enough
mask_length = self._mask_check()
# if too few sites rerun with automated setting
if mask_length < self.settings.minimums['min_sites']:
remask_cmd = "{0} -in {1}" \
" -out {2} -automated1".format(trimal_path,
self.aligned_seqs,
self.masked_seqs)
dg.utils.execute_cmd(remask_cmd)
mask_length = self._mask_check()
# if still too short move to fail pile
if mask_length < self.settings.minimums['min_sites']:
mask_fail_file = os.path.join(self.settings.dir_paths['mask_fail'],
self.seq_name + ".mask_too_short")
os.rename(self.masked_seqs, mask_fail_file)
# status is False if the mask fails
self._check_output(mask_fail_file)
raise dg.utils.MaskFail()
else:
self._check_output(self.masked_seqs)
else:
self._check_output(self.masked_seqs)
self.logger.info("{}: Mask Created".format(self.seq_name))
def estimate_phylogeny(self):
'''Generate phylogeny from masked seqs using FastTree2'''
self._dependency(self.masked_seqs, self.mask)
fasttree_path = self.settings.binary_paths['FastTree']
phylogeny_cmd = "{0} -bionj -slow"\
" -quiet -out {1} {2}".format(fasttree_path,
self.phylogeny,
self.masked_seqs)
dg.utils.execute_cmd(phylogeny_cmd)
self._check_output(self.phylogeny)
self.logger.info("{}: Phylogeny Created".format(self.seq_name))
def name_phylogeny(self):
"""
Name sequences on a phylogeny
"""
self._dependency(self.phylogeny, self.estimate_phylogeny)
# parse tree using biopython parser
parsed_tree = ete3.Tree(self.phylogeny)#, 'newick')
# generate tree_name dict using database
con = pymysql.connect(**self.settings.dbconfig)
cur = con.cursor()
tree_rename_dict = {re.escape(leaf.name): self._get_species_name(leaf.name, cur) \
for leaf in parsed_tree.get_leaves()}
con.close()
# read tree in as text for easy search replace
# this could maybe be done more elegantly using the already
# ETE parsed tree object
with open(self.phylogeny, 'r') as phy_fh:
tree_text = phy_fh.read()
patterns = re.compile("|".join(tree_rename_dict.keys()))
renaming = lambda m: tree_rename_dict[re.escape(m.group(0))]
renamed_tree = patterns.sub(renaming, tree_text)
with open(self.named_phylogeny, 'w') as named_phy_fh:
named_phy_fh.write(renamed_tree)
self._check_output(self.named_phylogeny)
self.logger.info("{}: Phylogeny Named".format(self.seq_name))
def build_named_phylogeny(self):
"""
Runner method for class
"""
try:
self.name_phylogeny()
except dg.utils.GetSeqFail:
self.logger.warning("{}: SeqFail | too few blastp hits for alignment".format(self.seq_name))
except dg.utils.MaskFail:
self.logger.warning("{}: MaskFail | too few sites hits after mask".format(self.seq_name))
except dg.utils.PipeError as E:
self.logger.error("!!Error in phylogeny generation for {0}: {1}".format(self.seq_name, E.msg))
return
class BuildTraining():
"""
Build full training X and y
"""
def __init__(self, named_label_definitions, label_locations):
"""
Class to build full training set,
named_label_definitions - full name definitions of characters
related to each class i.e. "{'endosymbiont' : ['chlorella', 'archaeplastida...'
label_locations: folder for each labelled class {'endosymbiont': 'endosymbiont/trees}
"""
self.taxaparse = ete3.ncbi_taxonomy.NCBITaxa()
self.named_label_definitions = named_label_definitions
self.label_locations = label_locations
def check_class_loss(self):
"""
Make sure all class labels are included in the various class dicts
"""
raise NotImplemented
def translate_categories(self, categories):
"""
translate category names to taxid
"""
for key, value in categories.items():
taxids = self.taxaparse.get_name_translator(categories[key]).values()
categories[key] = \
set([x[0] for x in taxids])
return categories
def encode_labels(self, named_label_definitions):
"""
Numerically encode class labels
"""
label_encoding = {name: index for index, name in enumerate(sorted(named_label_definitions))}
return label_encoding
def build_training(self):
"""
Build full X and y matrix
"""
encoded_labels = self.encode_labels(self.named_label_definitions)
taxid_label_definitions = self.translate_categories(self.named_label_definitions)
X = []
y = []
for label, loc in self.label_locations.items():
parser = LabelParser(label, loc)
X_part, y_part = parser.build_subX_y(taxid_label_definitions,
encoded_labels)
X = X + X_part
y = y + y_part
return np.vstack(X), np.vstack(y), encoded_labels
class BuildTest():
"""
Build full test X
"""
def __init__(self, named_label_definitions, test_dir):
"""
Class to build full training set,
named_label_definitions - full name definitions of characters
related to each class i.e. "{'endosymbiont' : ['chlorella', 'archaeplastida...'
test_dir: folder for unlabelled test data {'endosymbiont': 'endosymbiont/trees}
"""
self.taxaparse = ete3.ncbi_taxonomy.NCBITaxa()
self.named_label_definitions = named_label_definitions
self.test_dir = test_dir
def translate_categories(self, categories):
"""
translate category names to taxid
"""
for key, value in categories.items():
taxids = self.taxaparse.get_name_translator(categories[key]).values()
categories[key] = \
set([x[0] for x in taxids])
return categories
def encode_labels(self, named_label_definitions):
"""
Numerically encode class labels
"""
label_encoding = {name: index for index, name in enumerate(sorted(named_label_definitions))}
return label_encoding
def build_test(self):
"""
Build test X
"""
encoded_labels = self.encode_labels(self.named_label_definitions)
taxid_label_definitions = self.translate_categories(self.named_label_definitions)
parser = LabelParser('test', self.test_dir)
X_test = parser.build_X_test(taxid_label_definitions,
encoded_labels)
return X_test
class LabelParser():
"""
Parse a folder of phylogenies
"""
def __init__(self, label, location):
"""
Provide a label and location
of folder of trees to parse
"""
self.loc = location
self.label = label
self.trees = glob.glob(os.path.join(self.loc, '*tre'))
def parse_folder(self, label_definitions, label_encoding):
"""
Parse all phylogenies in tree list
"""
class_vector = []
for tree in self.trees:
parser = TreeParser(tree)
class_vector.append(parser.get_tree_vector(label_definitions, label_encoding))
return class_vector
def build_subX_y(self, label_definitions, label_encoding):
vectors = self.parse_folder(label_definitions, label_encoding)
y = [label_encoding[self.label] for x in range(len(vectors))]
return vectors, y
def build_X_test(self, label_definitions, label_encoding):
vectors = self.parse_folder(label_definitions, label_encoding)
return np.vstack(vectors)
class TreeParser():
"""
Parse a folder of unlabelled, or a directory
hierarchy of labelled, newick phylogenies
using a supplied set of labels
"""
def __init__(self, tree, seed_node_indicator='SEED'):
"""
TreeParser is mainly a class to generate a data vector
for a given named phylogeny.
tree - the tree filename itself
seed_node_indicator - the string that indicates the seed node
defaults to 'SEED'
"""
self.tree = ete3.Tree(tree)
self.taxaparse = ete3.ncbi_taxonomy.NCBITaxa()
self.seed_node = self._find_seed(seed_node_indicator)
def _find_seed(self, seed_node_indicator):
"""
Iterate over nodes and find seed node based
on seed node indicator word
"""
for i in self.tree:
if i.name.split()[0] == seed_node_indicator:
return i
# if tree has finished iterating over and
# hasn't returned raise error
raise ValueError("Seed node not found")
def get_n_nearest_nodes(self, n_neighbours):
"""
Get closest n nodes to seed in phylogeny
returning a list of (node name, distance betwee seed and node) tuples
"""
curr_node = self.seed_node
neighbours = []
# while we have too few neighbours and there are more
# parent nodes
while len(neighbours) < n_neighbours and curr_node.up:
curr_node = curr_node.up
children = curr_node.get_leaves()
for node in children:
if node is not self.seed_node:
neighbours.append((node, self.seed_node.get_distance(node)))
neighbours = sorted(neighbours, key=lambda x: x[1])
# if there are more than n.neighbours then take closest n to
# seed
if len(neighbours) > n_neighbours:
neighbours = neighbours[:n_neighbours]
return neighbours
def get_lineages(self, nearest_nodes):
"""
Get taxonomy for the n nearest nodes
in the form of a list of tuples of
the node and its distance from the seed """
lineages = []
for name, distance in nearest_nodes:
lineages.append((self.get_taxonomy(name), distance))
return lineages
def get_taxonomy(self, node_label):
"""
Return a taxonomic lineage from a tree label name
doesn't use fuzzy as its very slow and names
should be right
"""
species_name = ' '.join(node_label.name.split()[:2])
taxids = self.taxaparse.get_name_translator([species_name])
taxid = taxids[species_name]
lineage = self.taxaparse.get_lineage(taxid[0])
return lineage
def category_lineage_lookup(self, lineage, label_definitions):
"""
Lookup of a specific lineage returning
the category label for that lineage
"""
for rank in lineage[0]:
for key, value in label_definitions.items():
if rank in value:
return key, lineage[1]
return "unknown", lineage[1]
def get_tree_vector(self, label_definitions, label_encodings,
n_neighbours=10):
"""
Generate the tree vector divided by category
label_encoding is a dict of {'labelname': label_index}
Both must include "unknown"
label_definitions - an ordered dict of class label names and a list of
corresponding taxids
{bacteria: (bacterial taxids),
host: (alveolate taxa labels),
...}
label_encoding - dict containg the label names as keys and values
as the encoded label name
{'bacteria': 1, 'unknown': 4}
n_neighbours - number of nearest neighbours to look at (default 10)
"""
if len(label_definitions) != len(label_encodings): raise ValueError("label defs and encoding must contain the same"\
"keys")
nodes = self.get_n_nearest_nodes(n_neighbours)
taxa_lineages = self.get_lineages(nodes)
tree_vector = [0.0 for x in range(len(label_definitions))]
for lineage in taxa_lineages:
label, distance = self.category_lineage_lookup(lineage,
label_definitions)
# take reciprocal of distance plus a small fudge factor
# to prevent infinite division
distance = 1/(distance + 1e-10)
label_index = label_encodings[label]
tree_vector[label_index] += distance
return tree_vector
| fmaguire/dendrogenous | dendrogenous/core.py | Python | bsd-3-clause | 21,903 | [
"BLAST",
"Biopython"
] | e6e17a1a455b9a693360eea7ffd1c3677a14f84b9900350620ba2b7a609d5161 |
from __future__ import absolute_import
import ast
import itertools
from types import FunctionType
from myhdl._util import _flatten
from myhdl._enum import EnumType
from myhdl._Signal import SignalType
class Data():
pass
def _getCellVars(symdict, arg):
gens = _flatten(arg)
data = Data()
data.symdict = symdict
v = _GetCellVars(data)
for gen in gens:
v.visit(gen.ast)
return list(data.objset)
class _GetCellVars(ast.NodeVisitor):
def __init__(self, data):
self.data = data
self.data.objset = set()
def visit_Name(self, node):
if node.id in self.data.symdict:
self.data.objset.add(node.id)
self.generic_visit(node)
| gw0/myhdl | myhdl/_getcellvars.py | Python | lgpl-2.1 | 710 | [
"VisIt"
] | 2c95d86b537b4f331e362a08ebda2b6a8ffc772c1427c5cc4e7e580ce3df0078 |
"""
Implementation of RSA Public Key Encryption in Python
Kunal Gosar - Dec 2015 Visit me at: http://kunalgosar.me
"""
import random
class encrypter:
def __init__(self, lower=10**50, upper=10**100, e=65537):
self.pub_key, self.prv_key = self.__keygen(lower, upper, e)
def __keygen(self, lower, upper, e):
p, q = prime_gen(lower, upper), prime_gen(lower, upper)
totient = (p - 1) * (q - 1)
while totient % e == 0:
p, q = prime_gen(lower, upper), prime_gen(lower, upper)
totient = (p-1)*(q-1)
d, n = mul_inv(e, totient), p * q
return (e, n), (d, n)
def encrypt(self, message):
"""Run encrypt function on a message to return an encrypted array"""
return [pow(chr, self.pub_key[0], self.pub_key[1]) for chr in str2num(message)]
def decrypt(self, cypher):
"""Run decrypt function on the encrypted array to decrypt the message"""
return num2str([pow(chr, self.prv_key[0], self.prv_key[1]) for chr in cypher])
def mul_inv(a, m):
"""Calculate the Multiplicative Inverse of a wrt m"""
def extended_euclid(a, b):
"""Extended Euclidean Algorithm to calculate GDC and Bezout's identity"""
if not a:
return b, 0, 1
g, y, x = extended_euclid(b % a, a)
return a, x - (b // a) * y, y
d = extended_euclid(a, m)[1] % m
if d < 0:
return d + m
return d
def primality_test(n):
"""
Probabilistic check of the primality of an input number
Uses the Miller-Rabin Primality Test
"""
assert n > 2
if not n % 2:
return False
s, t = n - 1, 0
while s % 2 == 0:
s = s // 2; t += 1
for x in range(5):
q = pow(random.randrange(2, n - 1), s, n)
if q != 1:
i = 0
while q != n - 1:
if i == t - 1:
return False
i, q = i + 1, pow(q, 2, n)
return True
def extended_euclid(a, b):
"""Extended Euclidean Algorithm to calculate GDC and Bezout's identity"""
if not a:
return b, 0, 1
g, y, x = extended_euclid(b % a, a)
return a, x - (b // a) * y, y
def prime_gen(l, u):
"""Generate a prime number between l (lower) and u (upper) bounds"""
n = random.randint(l, u)
while not primality_test(n):
n += 1
return n
def str2num(str):
"""Return an array of integer representations of the characters in a string"""
return [ord(x) for x in str]
def num2str(num):
"""Convert from array back to string"""
return "".join([chr(x) for x in num])
| kunalgosar/RSA-Encryption | RSA.py | Python | mit | 2,606 | [
"VisIt"
] | 72cecc85007c9c68534930d0bf4578b414caf45a9a0611727f97894610fe8e4b |
from abc import ABCMeta
from abc import abstractmethod
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
try:
from six import text_type
except ImportError:
from galaxy.util import unicodify as text_type
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
class LwrInteface(object):
"""
Abstract base class describes how synchronous client communicates with
(potentially remote) LWR procedures. Obvious implementation is HTTP based
but LWR objects wrapped in routes can also be directly communicated with
if in memory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def execute(self, command, args={}, data=None, input_path=None, output_path=None):
"""
Execute the correspond command against configured LWR job manager. Arguments are
method parameters and data or input_path describe essentially POST bodies. If command
results in a file, resulting path should be specified as output_path.
"""
class HttpLwrInterface(LwrInteface):
def __init__(self, destination_params, transport):
self.transport = transport
remote_host = destination_params.get("url")
assert remote_host is not None, "Failed to determine url for LWR client."
if not remote_host.endswith("/"):
remote_host = "%s/" % remote_host
if not remote_host.startswith("http"):
remote_host = "http://%s" % remote_host
self.remote_host = remote_host
self.private_key = destination_params.get("private_token", None)
def execute(self, command, args={}, data=None, input_path=None, output_path=None):
url = self.__build_url(command, args)
response = self.transport.execute(url, data=data, input_path=input_path, output_path=output_path)
return response
def __build_url(self, command, args):
if self.private_key:
args["private_key"] = self.private_key
arg_bytes = dict([(k, text_type(args[k]).encode('utf-8')) for k in args])
data = urlencode(arg_bytes)
url = self.remote_host + command + "?" + data
return url
class LocalLwrInterface(LwrInteface):
def __init__(self, destination_params, job_manager=None, file_cache=None, object_store=None):
self.job_manager = job_manager
self.file_cache = file_cache
self.object_store = object_store
def __app_args(self):
# Arguments that would be specified from LwrApp if running
# in web server.
return {
'manager': self.job_manager,
'file_cache': self.file_cache,
'object_store': self.object_store,
'ip': None
}
def execute(self, command, args={}, data=None, input_path=None, output_path=None):
# If data set, should be unicode (on Python 2) or str (on Python 3).
from lwr.web import routes
from lwr.web.framework import build_func_args
controller = getattr(routes, command)
action = controller.func
body_args = dict(body=self.__build_body(data, input_path))
args = build_func_args(action, args.copy(), self.__app_args(), body_args)
result = action(**args)
if controller.response_type != 'file':
return controller.body(result)
else:
# TODO: Add to Galaxy.
from galaxy.util import copy_to_path
with open(result, 'rb') as result_file:
copy_to_path(result_file, output_path)
def __build_body(self, data, input_path):
if data is not None:
return BytesIO(data.encode('utf-8'))
elif input_path is not None:
return open(input_path, 'rb')
else:
return None
| jmchilton/lwr | lwr/lwr_client/interface.py | Python | apache-2.0 | 3,817 | [
"Galaxy"
] | f8eb8abca3b67bfaa9556857601f2a1b2108c3feba8d11b0daea13e81022fcae |
#!/usr/bin/env python
# This example demonstrates how to use the vtkSphereWidget to control
# the position of a light.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Start by loading some data.
dem = vtk.vtkDEMReader()
dem.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
dem.Update()
Scale = 2
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
lo = Scale*dem.GetElevationBounds()[0]
hi = Scale*dem.GetElevationBounds()[1]
shrink = vtk.vtkImageShrink3D()
shrink.SetShrinkFactors(4, 4, 1)
shrink.SetInputConnection(dem.GetOutputPort())
shrink.AveragingOn()
geom = vtk.vtkImageDataGeometryFilter()
geom.SetInputConnection(shrink.GetOutputPort())
geom.ReleaseDataFlagOn()
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(geom.GetOutputPort())
warp.SetNormal(0, 0, 1)
warp.UseNormalOn()
warp.SetScaleFactor(Scale)
warp.ReleaseDataFlagOn()
elevation = vtk.vtkElevationFilter()
elevation.SetInputConnection(warp.GetOutputPort())
elevation.SetLowPoint(0, 0, lo)
elevation.SetHighPoint(0, 0, hi)
elevation.SetScalarRange(lo, hi)
elevation.ReleaseDataFlagOn()
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(elevation.GetOutputPort())
normals.SetFeatureAngle(60)
normals.ConsistencyOff()
normals.SplittingOff()
normals.ReleaseDataFlagOn()
demMapper = vtk.vtkPolyDataMapper()
demMapper.SetInputConnection(normals.GetOutputPort())
demMapper.SetScalarRange(lo, hi)
demMapper.SetLookupTable(lut)
demMapper.ImmediateModeRenderingOn()
demActor = vtk.vtkLODActor()
demActor.SetMapper(demMapper)
# Create the RenderWindow, Renderer and both Actors
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.LightFollowCameraOff()
iren.SetInteractorStyle(None)
# Associate the line widget with the interactor
sphereWidget = vtk.vtkSphereWidget()
sphereWidget.SetInteractor(iren)
sphereWidget.SetProp3D(demActor)
sphereWidget.SetPlaceFactor(4)
sphereWidget.PlaceWidget()
sphereWidget.TranslationOff()
sphereWidget.ScaleOff()
sphereWidget.HandleVisibilityOn()
# Uncomment the next line if you want to see the widget active when
# the script starts
#sphereWidget.EnabledOn()
# Actually probe the data
def MoveLight(obj, event):
global light
light.SetPosition(obj.GetHandlePosition())
sphereWidget.AddObserver("InteractionEvent", MoveLight)
# Add the actors to the renderer, set the background and size
ren.AddActor(demActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
ren.SetBackground(0.1, 0.2, 0.4)
cam1 = ren.GetActiveCamera()
cam1.SetViewUp(0, 0, 1)
cam1.SetFocalPoint(dem.GetOutput().GetCenter())
cam1.SetPosition(1, 0, 0)
ren.ResetCamera()
cam1.Elevation(25)
cam1.Azimuth(125)
cam1.Zoom(1.25)
light = vtk.vtkLight()
light.SetFocalPoint(dem.GetOutput().GetCenter())
ren.AddLight(light)
iren.Initialize()
renWin.Render()
iren.Start()
| hlzz/dotfiles | graphics/VTK-7.0.0/Examples/GUI/Python/SphereWidget.py | Python | bsd-3-clause | 3,083 | [
"VTK"
] | 79da91b894acaba379f9334c35474e9b042d507b50cd0f5e4f41b1c24942204c |
# User creation spoke
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Martin Sivak <msivak@redhat.com>
#
import re
from pyanaconda.i18n import _, CN_
from pyanaconda.users import cryptPassword, validatePassword, guess_username
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.categories.user_settings import UserSettingsCategory
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.gui.utils import enlightbox
from pyanaconda.ui.helpers import GUISpokeInputCheckHandler, GUIInputCheckHandler, InputCheck
from pykickstart.constants import FIRSTBOOT_RECONFIG
from pyanaconda.constants import ANACONDA_ENVIRON, FIRSTBOOT_ENVIRON,\
PASSWORD_EMPTY_ERROR, PASSWORD_CONFIRM_ERROR_GUI, PASSWORD_STRENGTH_DESC,\
PASSWORD_WEAK, PASSWORD_WEAK_WITH_ERROR, PASSWORD_WEAK_CONFIRM,\
PASSWORD_WEAK_CONFIRM_WITH_ERROR
from pyanaconda.regexes import GECOS_VALID, USERNAME_VALID, GROUPNAME_VALID, GROUPLIST_FANCY_PARSE
__all__ = ["UserSpoke", "AdvancedUserDialog"]
class AdvancedUserDialog(GUIObject, GUIInputCheckHandler):
builderObjects = ["advancedUserDialog", "uid", "gid"]
mainWidgetName = "advancedUserDialog"
uiFile = "spokes/advanced_user.glade"
def set_status(self, inputcheck):
# Set or clear the groups error label based on the check status
# Make the save button insensitive if the check fails
if inputcheck.check_status == InputCheck.CHECK_OK:
self._groupsError.set_text('')
self._saveButton.set_sensitive(True)
else:
self._groupsError.set_text(inputcheck.check_status)
self._saveButton.set_sensitive(False)
def _validateGroups(self, inputcheck):
groups_list = self.get_input(inputcheck.input_obj).split(",")
# Check each group name in the list
for group in groups_list:
group_name = GROUPLIST_FANCY_PARSE.match(group).group('name')
if not GROUPNAME_VALID.match(group_name):
return _("Invalid group name: %s") % group_name
return InputCheck.CHECK_OK
def __init__(self, user, groupDict, data):
GUIObject.__init__(self, data)
GUIInputCheckHandler.__init__(self)
self._user = user
self._groupDict = groupDict
def _grabObjects(self):
self._cHome = self.builder.get_object("c_home")
self._cUid = self.builder.get_object("c_uid")
self._cGid = self.builder.get_object("c_gid")
self._tHome = self.builder.get_object("t_home")
self._lHome = self.builder.get_object("l_home")
self._tGroups = self.builder.get_object("t_groups")
self._spinUid = self.builder.get_object("spin_uid")
self._spinGid = self.builder.get_object("spin_gid")
self._uid = self.builder.get_object("uid")
self._gid = self.builder.get_object("gid")
self._groupsError = self.builder.get_object("groups_error")
self._saveButton = self.builder.get_object("save_button")
def initialize(self):
GUIObject.initialize(self)
self._grabObjects()
# Validate the group input box
self.add_check(self._tGroups, self._validateGroups)
def _apply_checkboxes(self, _editable = None, data = None):
"""Update the state of this screen according to the
checkbox states on the screen. It is called from
the toggled Gtk event.
"""
c_home = self._cHome.get_active()
c_uid = self._cUid.get_active()
c_gid = self._cGid.get_active()
self._tHome.set_sensitive(c_home)
self._lHome.set_sensitive(c_home)
self._spinUid.set_sensitive(c_uid)
self._spinGid.set_sensitive(c_gid)
def _parse_groups(self):
group_strings = self._tGroups.get_text().split(",")
group_objects = []
for group in group_strings:
# Skip empty strings
if not group:
continue
(group_name, group_id) = GROUPLIST_FANCY_PARSE.match(group).groups()
if group_id:
group_id = int(group_id)
group_objects.append(self.data.GroupData(name=group_name, gid=group_id))
return group_objects
def refresh(self):
if self._user.homedir:
self._tHome.set_text(self._user.homedir)
elif self._user.name:
homedir = "/home/" + self._user.name
self._tHome.set_text(homedir)
self._user.homedir = homedir
self._cHome.set_active(bool(self._user.homedir))
self._cUid.set_active(bool(self._user.uid))
self._cGid.set_active(bool(self._user.gid))
self._apply_checkboxes()
self._spinUid.update()
self._spinGid.update()
groups = []
for group_name in self._user.groups:
group = self._groupDict[group_name]
if group.name and group.gid is not None:
groups.append("%s (%d)" % (group.name, group.gid))
elif group.name:
groups.append(group.name)
elif group.gid is not None:
groups.append("(%d)" % (group.gid,))
self._tGroups.set_text(", ".join(groups))
def run(self):
self.window.show()
rc = self.window.run()
self.window.hide()
#OK clicked
if rc == 1:
if self._cHome.get_active():
self._user.homedir = self._tHome.get_text()
else:
self._user.homedir = None
if self._cUid.get_active():
self._user.uid = int(self._uid.get_value())
else:
self._user.uid = None
if self._cGid.get_active():
self._user.gid = int(self._gid.get_value())
else:
self._user.gid = None
groups = self._parse_groups()
self._user.groups = []
self._groupDict.clear()
for group in groups:
self._groupDict[group.name] = group
self._user.groups.append(group.name)
#Cancel clicked, window destroyed...
else:
pass
return rc
class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler):
builderObjects = ["userCreationWindow"]
mainWidgetName = "userCreationWindow"
uiFile = "spokes/user.glade"
category = UserSettingsCategory
icon = "avatar-default-symbolic"
title = CN_("GUI|Spoke", "_USER CREATION")
@classmethod
def should_run(cls, environment, data):
# the user spoke should run always in the anaconda and in firstboot only
# when doing reconfig or if no user has been created in the installation
if environment == ANACONDA_ENVIRON:
return True
elif environment == FIRSTBOOT_ENVIRON and data is None:
# cannot decide, stay in the game and let another call with data
# available (will come) decide
return True
elif environment == FIRSTBOOT_ENVIRON and data and \
(data.firstboot.firstboot == FIRSTBOOT_RECONFIG or \
len(data.user.userList) == 0):
return True
else:
return False
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
GUISpokeInputCheckHandler.__init__(self)
self._oldweak = None
def initialize(self):
NormalSpoke.initialize(self)
if self.data.user.userList:
self._user = self.data.user.userList[0]
else:
self._user = self.data.UserData()
self._wheel = self.data.GroupData(name = "wheel")
self._groupDict = {"wheel": self._wheel}
# placeholders for the text boxes
self.fullname = self.builder.get_object("t_fullname")
self.username = self.builder.get_object("t_username")
self.pw = self.builder.get_object("t_password")
self.confirm = self.builder.get_object("t_verifypassword")
self.admin = self.builder.get_object("c_admin")
self.usepassword = self.builder.get_object("c_usepassword")
self.b_advanced = self.builder.get_object("b_advanced")
# Counter for the click Done twice check
self._waivePasswordClicks = 0
self.guesser = {
self.username: True
}
# Updated during the password changed event and used by the password
# field validity checker
self._pwq_error = None
self._pwq_valid = True
self.pw_bar = self.builder.get_object("password_bar")
self.pw_label = self.builder.get_object("password_label")
# Configure levels for the password bar
self.pw_bar.add_offset_value("low", 2)
self.pw_bar.add_offset_value("medium", 3)
self.pw_bar.add_offset_value("high", 4)
# indicate when the password was set by kickstart
self._user.password_kickstarted = self.data.user.seen
if self._user.password_kickstarted:
self.usepassword.set_active(self._user.password != "")
if not self._user.isCrypted:
self.pw.set_text(self._user.password)
self.confirm.set_text(self._user.password)
else:
self.usepassword.set_active(True)
self.pw.set_placeholder_text(_("The password was set by kickstart."))
self.confirm.set_placeholder_text(_("The password was set by kickstart."))
# Password checks, in order of importance:
# - if a password is required, is one specified?
# - if a password is specified and there is data in the confirm box, do they match?
# - if a password is specified and the confirm box is empty or match, how strong is it?
# - if a password is required, is there any data in the confirm box?
self.add_check(self.pw, self._checkPasswordEmpty)
# The password confirmation needs to be checked whenever either of the password
# fields change. Separate checks are created on each field so that edits on
# either will trigger a check and so that the last edited field will get the focus
# when Done is clicked. Whichever check is run needs to run the other check in
# order to reset the status. The check_data field is used as a flag to prevent
# infinite recursion.
self._confirm_check = self.add_check(self.confirm, self._checkPasswordConfirm)
self._password_check = self.add_check(self.pw, self._checkPasswordConfirm)
# Keep a reference to this check, since it has to be manually run for the
# click Done twice check.
self._pwStrengthCheck = self.add_check(self.pw, self._checkPasswordStrength)
self.add_check(self.confirm, self._checkPasswordEmpty)
# Allow empty usernames so the spoke can be exited without creating a user
self.add_re_check(self.username, re.compile(USERNAME_VALID.pattern + r'|^$'),
_("Invalid username"))
self.add_re_check(self.fullname, GECOS_VALID, _("Full name cannot contain colon characters"))
self._advanced = AdvancedUserDialog(self._user, self._groupDict,
self.data)
self._advanced.initialize()
def refresh(self):
# Enable the input checks in case they were disabled on the last exit
for check in self.checks:
check.enabled = True
self.username.set_text(self._user.name)
self.fullname.set_text(self._user.gecos)
self.admin.set_active(self._wheel.name in self._user.groups)
self.pw.emit("changed")
self.confirm.emit("changed")
if self.username.get_text() and self.usepassword.get_active() and \
self._user.password == "":
self.pw.grab_focus()
elif self.fullname.get_text():
self.username.grab_focus()
else:
self.fullname.grab_focus()
self.b_advanced.set_sensitive(bool(self._user.name))
@property
def status(self):
if len(self.data.user.userList) == 0:
return _("No user will be created")
elif self._wheel.name in self.data.user.userList[0].groups:
return _("Administrator %s will be created") % self.data.user.userList[0].name
else:
return _("User %s will be created") % self.data.user.userList[0].name
@property
def mandatory(self):
# mandatory only if root account is disabled
return (not self.data.rootpw.password) or self.data.rootpw.lock
def apply(self):
# set the password only if the user enters anything to the text entry
# this should preserve the kickstart based password
if self.usepassword.get_active():
if self.pw.get_text():
self._user.password_kickstarted = False
self._user.password = cryptPassword(self.pw.get_text())
self._user.isCrypted = True
self.pw.set_placeholder_text("")
self.confirm.set_placeholder_text("")
# reset the password when the user unselects it
else:
self.pw.set_placeholder_text("")
self.confirm.set_placeholder_text("")
self._user.password = ""
self._user.isCrypted = False
self._user.password_kickstarted = False
self._user.name = self.username.get_text()
self._user.gecos = self.fullname.get_text()
# Remove any groups that were created in a previous visit to this spoke
self.data.group.groupList = [g for g in self.data.group.groupList \
if not hasattr(g, 'anaconda_group')]
# the user will be created only if the username is set
if self._user.name:
if self.admin.get_active() and \
self._wheel.name not in self._user.groups:
self._user.groups.append(self._wheel.name)
elif not self.admin.get_active() and \
self._wheel.name in self._user.groups:
self._user.groups.remove(self._wheel.name)
anaconda_groups = [self._groupDict[g] for g in self._user.groups
if g != self._wheel.name]
self.data.group.groupList += anaconda_groups
# Flag the groups as being created in this spoke
for g in anaconda_groups:
g.anaconda_group = True
if self._user not in self.data.user.userList:
self.data.user.userList.append(self._user)
elif self._user in self.data.user.userList:
self.data.user.userList.remove(self._user)
@property
def completed(self):
return len(self.data.user.userList) > 0
def _updatePwQuality(self):
"""This method updates the password indicators according
to the password entered by the user.
"""
pwtext = self.pw.get_text()
username = self.username.get_text()
# Reset the counter used for the "press Done twice" logic
self._waivePasswordClicks = 0
self._pwq_valid, strength, self._pwq_error = validatePassword(pwtext, username)
if not pwtext:
val = 0
elif strength < 50:
val = 1
elif strength < 75:
val = 2
elif strength < 90:
val = 3
else:
val = 4
text = _(PASSWORD_STRENGTH_DESC[val])
self.pw_bar.set_value(val)
self.pw_label.set_text(text)
def usepassword_toggled(self, togglebutton = None, data = None):
"""Called by Gtk callback when the "Use password" check
button is toggled. It will make password entries in/sensitive."""
self.pw.set_sensitive(self.usepassword.get_active())
self.confirm.set_sensitive(self.usepassword.get_active())
# Re-check the password
self.pw.emit("changed")
self.confirm.emit("changed")
def password_changed(self, editable=None, data=None):
"""Update the password strength level bar"""
self._updatePwQuality()
def username_changed(self, editable = None, data = None):
"""Called by Gtk callback when the username or hostname
entry changes. It disables the guess algorithm if the
user added his own text there and reenable it when the
user deletes the whole text."""
if editable.get_text() == "":
self.guesser[editable] = True
self.b_advanced.set_sensitive(False)
else:
self.guesser[editable] = False
self.b_advanced.set_sensitive(True)
# Re-run the password checks against the new username
self.pw.emit("changed")
self.confirm.emit("changed")
def full_name_changed(self, editable = None, data = None):
"""Called by Gtk callback when the full name field changes.
It guesses the username and hostname, strips diacritics
and make those lowercase.
"""
# after the text is updated in guesser, the guess has to be reenabled
if self.guesser[self.username]:
fullname = self.fullname.get_text()
username = guess_username(fullname)
self.username.set_text(username)
self.guesser[self.username] = True
def _checkPasswordEmpty(self, inputcheck):
"""Check whether a password has been specified at all.
This check is used for both the password and the confirmation.
"""
# If the password was set by kickstart, skip the strength check
if self._user.password_kickstarted:
return InputCheck.CHECK_OK
# Skip the check if no password is required
if (not self.usepassword.get_active()) or self._user.password_kickstarted:
return InputCheck.CHECK_OK
elif not self.get_input(inputcheck.input_obj):
if inputcheck.input_obj == self.pw:
return _(PASSWORD_EMPTY_ERROR)
else:
return _(PASSWORD_CONFIRM_ERROR_GUI)
else:
return InputCheck.CHECK_OK
def _checkPasswordConfirm(self, inputcheck):
"""If the user has entered confirmation data, check whether it matches the password."""
# Skip the check if no password is required
if (not self.usepassword.get_active()) or self._user.password_kickstarted:
result = InputCheck.CHECK_OK
elif self.confirm.get_text() and (self.pw.get_text() != self.confirm.get_text()):
result = _(PASSWORD_CONFIRM_ERROR_GUI)
else:
result = InputCheck.CHECK_OK
# If the check succeeded, reset the status of the other check object
# Disable the current check to prevent a cycle
inputcheck.enabled = False
if result == InputCheck.CHECK_OK:
if inputcheck == self._confirm_check:
self._password_check.update_check_status()
else:
self._confirm_check.update_check_status()
inputcheck.enabled = True
return result
def _checkPasswordStrength(self, inputcheck):
"""Update the error message based on password strength.
The password strength has already been checked in _updatePwQuality, called
previously in the signal chain. This method converts the data set from there
into an error message.
The password strength check can be waived by pressing "Done" twice. This
is controlled through the self._waivePasswordClicks counter. The counter
is set in on_back_clicked, which also re-runs this check manually.
"""
# Skip the check if no password is required
if (not self.usepassword.get_active()) or \
((not self.pw.get_text()) and (self._user.password_kickstarted)):
return InputCheck.CHECK_OK
# If the password failed the validity check, fail this check
if (not self._pwq_valid) and (self._pwq_error):
return self._pwq_error
pwstrength = self.pw_bar.get_value()
if pwstrength < 2:
# If Done has been clicked twice, waive the check
if self._waivePasswordClicks > 1:
return InputCheck.CHECK_OK
elif self._waivePasswordClicks == 1:
if self._pwq_error:
return _(PASSWORD_WEAK_CONFIRM_WITH_ERROR) % self._pwq_error
else:
return _(PASSWORD_WEAK_CONFIRM)
else:
if self._pwq_error:
return _(PASSWORD_WEAK_WITH_ERROR) % self._pwq_error
else:
return _(PASSWORD_WEAK)
else:
return InputCheck.CHECK_OK
def on_advanced_clicked(self, _button, data=None):
"""Handler for the Advanced.. button. It starts the Advanced dialog
for setting homedit, uid, gid and groups.
"""
self._user.name = self.username.get_text()
if self.admin.get_active() and \
self._wheel.name not in self._user.groups:
self._user.groups.append(self._wheel.name)
elif not self.admin.get_active() and \
self._wheel.name in self._user.groups:
self._user.groups.remove(self._wheel.name)
self._advanced.refresh()
with enlightbox(self.window, self._advanced.window):
self._advanced.run()
self.admin.set_active(self._wheel.name in self._user.groups)
def on_back_clicked(self, button):
# Add a click and re-check the password strength
self._waivePasswordClicks += 1
self._pwStrengthCheck.update_check_status()
# If there is no user set, skip the checks
if not self.username.get_text():
for check in self.checks:
check.disable()
if GUISpokeInputCheckHandler.on_back_clicked(self, button):
NormalSpoke.on_back_clicked(self, button)
| mairin/anaconda | pyanaconda/ui/gui/spokes/user.py | Python | gpl-2.0 | 22,989 | [
"VisIt"
] | 12a7f689d15ab1157e486c2a1ba0fa0988491fc7408d78ab614eff3feb6e9def |
#!/usr/bin/env python
import argparse
import shutil
import icqsol_utils
# Parse Command Line.
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', help='Shape dataset selected from history')
parser.add_argument('--input_file_format_and_type', dest='input_file_format_and_type', help='Input file format and type')
parser.add_argument('--input_dataset_type', dest='input_dataset_type', help='Input dataset_type')
parser.add_argument('--input_texture', dest='input_texture', help='Image dataset selected from history')
parser.add_argument('--input_texture_file_format', dest='input_texture_file_format', help='Input texture file format')
parser.add_argument('--max_edge_length', dest='max_edge_length', type=float, default=float('inf'), help='Maximum edge length')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--output_vtk_type', dest='output_vtk_type', help='Output VTK type')
args = parser.parse_args()
input_format, input_file_type = icqsol_utils.get_format_and_type(args.input_file_format_and_type)
tmp_dir = icqsol_utils.get_temp_dir()
# Instantiate a ShapeManager for loading the input.
shape_mgr = icqsol_utils.get_shape_manager(input_format, args.input_dataset_type)
# Get the vtk polydata from the input dataset.
vtk_poly_data = shape_mgr.loadAsVtkPolyData(args.input)
# Apply the texture to the shape's surface.
vtk_poly_data = shape_mgr.addTextureToVtkPolyData(vtk_poly_data,
texture_file=args.input_texture,
max_edge_length=args.max_edge_length,
texture_file_format=args.input_texture_file_format)
# Define the output file format and type (the output_format can only be 'vtk').
output_format, output_file_type = icqsol_utils.get_format_and_type(args.output_vtk_type)
tmp_output_path = icqsol_utils.get_temporary_file_path(tmp_dir, output_format)
# Make sure the ShapeManager's writer is vtk.
shape_mgr.setWriter(file_format=icqsol_utils.VTK, vtk_dataset_type=icqsol_utils.POLYDATA)
# Save the output.
shape_mgr.saveVtkPolyData(vtk_poly_data=vtk_poly_data, file_name=tmp_output_path, file_type=output_file_type)
shutil.move(tmp_output_path, args.output)
| pletzer/galaxy-csg | tools/icqsol_add_texture/icqsol_add_texture.py | Python | mit | 2,295 | [
"VTK"
] | 3812a6a09caaf7754f72b0fea872d0d2fb163feb81d9a2846644111a981d54c2 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Custom Axes
###################
"""
#
#import seaborn as sns
#
#from exa.mpl import _plot_contour, _plot_surface
#from exatomic import Energy
#
#
#def _get_minimum(mindf):
# absmin = mindf[mindf[2] == mindf[2].min()]
# idxs = mindf[(mindf[0] > 0) & (mindf[1] > 0)].index.values
# id0, id1 = idxs[:2]
# cnt = 1
# try:
# while np.isclose(id0 + 1, id1):
# id0, id1 = idxs[cnt:cnt + 2]
# cnt += 1
# slc = slice(idxs[0], id0 + 1)
# amin = mindf.ix[idxs[0]:id0 + 1]
# except:
# if absmin.index[0] in idxs:
# slc = list(idxs) + [idxs[-1] + 1]
# amin = mindf.ix[idxs]
# else:
# slc = list(idxs) + list(absmin.index.values)
# return mindf.ix[slc]
#
#
#def plot_j2_surface(data, key='j2', method='wireframe', nxlabel=6,
# nylabel=6, nzlabel=6, minimum=False, figsize=(8,6),
# alpha=0.5, cmap=None, title=None):
# cmap = sns.mpl.pyplot.cm.get_cmap('coolwarm') if cmap is None else cmap
# figargs = {'figsize': figsize}
# axargs = {'alpha': alpha, 'cmap': cmap}
# fig = _plot_surface(data['alpha'], data['gamma'], data['j2'],
# nxlabel, nylabel, nzlabel, method, figargs, axargs)
# ax = fig.gca()
# if 'min' in data and minimum:
# mindf = _get_minimum(data['min'])
# ax.plot(mindf[0], mindf[1], mindf[2], color='k', zorder=2)
# ax.set_ylabel(r'$\gamma$')
# ax.set_xlabel(r'$\\\alpha$')
# ax.set_zlabel(r'J$^{2}$')
# if title is not None:
# ax.set_title(title)
# return fig
#
#
#def plot_j2_contour(data, vmin=None, vmax=None, key='j2', figsize=(8,6),
# nxlabel=6, nylabel=6, method='pcolor', cmap=None, title=None,
# minline=False, minpoint=False, legend=False, colorbar=False):
# vmin = data[key].min() if vmin is None else vmin
# vmax = data[key].max() if vmax is None else vmax
# cmap = sns.mpl.pyplot.cm.get_cmap('coolwarm') if cmap is None else cmap
# figargs = {'figsize': figsize}
# axargs = {'vmin': vmin, 'vmax': vmax, 'cmap': cmap,
# 'zorder': 1, 'rasterized': True}
# fig, cbar = _plot_contour(data['alpha'], data['gamma'], data[key],
# nxlabel, nylabel, method, colorbar, figargs, axargs)
# ax = fig.gca()
# if (minline or minpoint) and 'min' in data:
# mindf = _get_minimum(data['min'])
# if minline:
# ax.plot(mindf[0], mindf[1], label='Min.(J$^{2}$)', color='k', zorder=2)
# if minpoint:
# jmin = mindf[2].argmin()
# labl = '({:.4f},{:.4f})'.format(mindf[0][jmin], mindf[1][jmin])
# ax.scatter([mindf[0][jmin]], [mindf[1][jmin]], label=labl,
# marker='*', color='y', s=200, zorder=3)
# if legend:
# hdls, lbls = ax.get_legend_handles_labels()
# leg = ax.legend(hdls, lbls)
# leg.get_frame().set_alpha(0.5)
# ax.set_ylabel(r'$\gamma$')
# ax.set_xlabel(r'$\\\alpha$')
# if title is not None:
# ax.set_title(title)
# return fig
#
#def photoelectron_spectrum(*unis, filters=None, broaden=0.06, color=None,
# stepe=1, units='eV', fontsize=20, peaklabels=True,
# xlim=None, extra=None, figsize=(10,10)):
# """
# Plot what is essentially a density of states for any number of universes,
# attempting to associate symmetry labels in order of peak positions.
#
# Args
# unis (exatomic.container.Universe): any number of universes with orbitals
# filters (dict,list): dict or list of dicts for each universe
# accepted kwargs: 'shift', uni.orbital column names
# special kwargs: 'shift' shifts energies,
# ['energy', 'eV', units] must be in the form of [min, max]
# Note: values can be strings defining relationships like
# {'occupation': '> 0'}
# units (str): the units in which to display the spectrum
# broaden (float): how broad to convolute each orbital energy (FWHM gaussian)
# color (list): commonly sns.color_palette or ['r', 'g', 'b', ...]
# stepe (int,float): how far to separate symmetry labels on plot (modify for
# units other than 'eV')
# fontsize (int): font size of text on plot (symmetry labels are fontsize - 2)
# peaklabels (bool): if True and symmetry in uni.orbital, put labels on plots
# xlim (tuple): (xmin, xmax)
# extra (dict): Custom plot of additional data on the same figure object
# accepted kwargs: ['x', 'y', 'color', 'label']
# figsize (tuple): matplotlib.figure.Figure figuresize keyword arg
#
# Returns
# fig (matplotlib.figure.Figure): the plot
# """
# pass
## unis = [unis] if not isinstance(unis, list) else unis
## if window is None:
## window = []
## for i, uni in enumerate(unis):
## uni.orbital[units] = uni.orbital['energy'] * Energy['Ha', units]
## window.append([uni.orbital.get_orbital(orb=-15)[units],
## uni.orbital.get_orbital()[units]])
## else:
## if not isinstance(window, list): window = window * len(unis)
## if shift or not isinstance(shift, list):
##def photoelectron_spectrum(ax, unis, window=[-10, 0], broaden=0.6,
## shift=0, label='', color=None, stepe=1, units='eV',
## loc='upper left', fontsize=26, peaks=True,
## xlim=None, ylim=None):
## color = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] if color is None else color
## arrowprops = {'arrowstyle': '->', 'connectionstyle': 'arc3'}
## arrowargs = {'xycoords': 'data', 'textcoords': 'data',
## 'arrowprops': arrowprops, 'fontsize': fontsize}
## unis = [unis] if not isinstance(unis, list) else unis
## xmin, xmax = [], []
## if (len(window) != len(unis) or len(unis) == 2): window = window * len(unis)
## if not isinstance(shift, list): shift = [shift] * len(unis)
## if not isinstance(label, list): label = [label] * len(unis)
## for i, uni in enumerate(unis):
## height = len(unis) - 1 - i
## lo, hi = window[i]
## pes = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden, units=units)[::-1]
## pes[units] = -pes[units]
## pes['shifted'] = pes[units] + shift[i]
## heightened = pes['signal'] + height
## lab = uni.name if uni.name and not label[i] else label[i]
## ax.axhline(height, color='k', linewidth=1.2)
## ax.plot(pes['shifted'], heightened, label=lab, color=color[i % len(color)])
## o = uni.orbital
## o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)].drop_duplicates(
## units).copy().drop_duplicates('vector').sort_values(
## by=units, ascending=False).reset_index()
## o[units] = -o[units]
## leno = len(o)
## switch = leno // 2
## nonzero = pes[pes['signal'] > 0.1]['shifted']
## small = nonzero.min()
## esmall = small - stepe * switch
## elarge = nonzero.max()
## xmin.append(esmall)
## dh = 1 / (switch + 3)
## hlo = height + dh
## hhi = height + (switch + switch % 2) * dh
## for t in range(-20, 20):
## ax.plot([t] * 2, [height, height - 0.05], color='k', linewidth=1)
## if peaks:
## for c, (sym, en) in enumerate(zip(o['symmetry'], o[units] + shift[i])):
## ax.plot([en] * 2, [height, height + 0.05], color='k', linewidth=1)
## astr = r'$' + sym[0].lower() + '_{' + sym[1:].lower() + '}$'
## e = esmall if c < switch else elarge
## h = hlo if c < switch else hhi
## ax.annotate(astr, xy=(en, height + 0.05), xytext=(e, h), **arrowargs)
## if c < switch:
## esmall += stepe
## hlo += dh
## else:
## elarge += stepe * 1.5
## hhi -= dh
## xmax.append(elarge)
## xax = 'E* (' + units + ')' if any((i for i in shift)) else 'E (' + units + ')'
## xlim = (min(xmin), max(xmax)) if xlim is None else xlim
## ylim = (0, len(unis)) if ylim is None else ylim
## ax.set_xlim(xlim)
## ax.set_ylim(ylim)
## ax.set_xlabel(xax)
## ax.legend(loc=loc)
## return ax
#def new_pes(*unis, filters=None, broaden=0.06, color=None, stepe=0.5, units='eV',
# fontsize=20, peaklabels=True, xlim=None, extra=None,
# figsize=(10,10), title=None):
# """
# Things
# """
# def plot_details(ax, dos, xmin, xmax, peaklabels):
# switch = len(o) // 2
# nonzero = dos[dos['signal'] > 0.1]['shifted']
# small = nonzero.min()
# esmall = small - stepe * switch
# elarge = nonzero.max()
# xmin.append(esmall - 0.5)
# xmax.append(elarge + 0.5)
# dh = 1 / (switch + 3)
# hlo = dh
# hhi = (switch + switch % 2) * dh
# for c, (sym, en) in enumerate(zip(o['symmetry'], o['shifted'])):
# ax.plot([en] * 2, [0, 0.05], color='k', linewidth=1)
# if peaklabels:
# if '$' in sym: astr = sym
# else: astr = r'$\textrm{' + sym[0].lower() + '}_{\\large \\textrm{' + sym[1:].lower() + '}}$'
# e = esmall if c < switch else elarge
# h = hlo if c < switch else hhi
# ax.text(e, h, astr, fontsize=fontsize - 4)
# if c < switch:
# esmall += stepe
# hlo += dh
# else:
# elarge += stepe * 1.5
# hhi -= dh
# xmax[-1] = elarge
# return ax, xmin, xmax
#
# def plot_extra(ax, extra):
# for i, stargs in enumerate(zip(extra['x'], extra['y'])):
# kwargs = {'color': extra['color']}
# if isinstance(extra['label'], list):
# kwargs['color'] = extra['color'][i]
# kwargs['label'] = extra['label'][i]
# else:
# if not i: kwargs['label'] = extra['label']
# ax.plot(*stargs, **kwargs)
# ax.legend(frameon=False)
# return ax
#
# nuni = len(unis)
# if filters is None:
# print("filters allows for customization of the plot")
# filters = [{'eV': [-10, 0]}] * nuni
# elif isinstance(filters, dict):
# filters = [filters] * nuni
# elif len(filters) == 1 and isinstance(filters, list):
# filters = filters * nuni
# elif len(filters) != nuni:
# raise Exception("Provide a list of filter dicts same as number of unis.")
# nax = nuni + 1 if extra is not None else nuni
# figargs = {'figsize': figsize}
# fig = _gen_figure(nxplot=nax, nyplot=1, joinx=True, figargs=figargs)
# axs = fig.get_axes()
# color = sns.color_palette('cubehelix', nuni) if color is None else color
# xmin, xmax = [], []
# hdls, lbls = [], []
# for i, (uni, ax, fil) in enumerate(zip(unis, axs, filters)):
# if 'energy' in fil: lo, hi = fil['energy']
# elif units in fil: lo, hi = fil[units]
# else: raise Exception('filters must include an energetic keyword')
# shift = fil['shift'] if 'shift' in fil else 0
# lframe = uni.orbital['group'].astype(int).max()
# dos = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden,
# units=units, frame=lframe)
# dos['shifted'] = dos[units] + shift
# lab = uni.name if uni.name is not None \
# else fil['label'] if 'label' in fil else ''
# dos[dos['signal'] > 0.01].plot(ax=ax, x='shifted', y='signal',
# label=lab, color=color[i % len(color)])
# li = uni.orbital['group'].astype(int).max()
# o = uni.orbital[uni.orbital['group'] == li]
# o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)]
# o = o.drop_duplicates(units).copy().drop_duplicates(
# units).sort_values(by=units).reset_index()
# o['shifted'] = o[units] + shift
# ax, xmin, xmax = plot_details(ax, dos, xmin, xmax, peaklabels)
# if extra:
# axs[-1] = plot_extra(axs[-1], extra)
# xlim = (min(xmin), max(xmax)) if xlim is None else xlim
# if title is not None:
# axs[0].set_title(title)
# for i in range(nax):
# if not (i == nax - 1):
# sns.despine(bottom=True, trim=True)
# axs[i].set_xticks([])
# axs[i].set_xlabel('')
# axs[i].legend(frameon=False)
# axs[i].set_xlim(xlim)
# axs[i].set_yticks([])
# axs[i].set_yticklabels([])
# shifted = any(('shift' in fil for fil in filters))
# xax = 'E* (' + units + ')' if shifted else 'E (' + units + ')'
# axs[-1].set_xlabel(xax)
# nx = 2 if abs(xlim[1] - xlim[0]) > 8 else 1
# axs[-1].set_xticks(np.arange(xlim[0], xlim[1] + 1, nx, dtype=int))
# return fig
#
## Example filter for the following mo_diagram function
## applied to orbital table
##
##mofilters[key] = [{'eV': [-7, 5],
## 'occupation': 2,
## 'symmetry': 'EU'}.copy() for i in range(5)]
##mofilters[key][0]['shift'] = 24.7
##mofilters[key][0]['eV'] = [-30, -10]
##mofilters[key][0]['symmetry'] = '$\pi_{u}$'
##mofilters[key][-1]['eV'] = [0, 10]
##mofilters[key][-1]['shift'] = -11.5
#
#def new_mo_diagram(*unis, filters=None, units='eV', width=0.0625,
# pad_degen=0.125, pad_occ=0.03125, scale_occ=1,
# fontsize=22, figsize=(10,8), labelpos='right',
# ylim=None):
# """
# Args
# unis(exatomic.container.Universe): uni or list of unis
# filters(dict): dict or list of dicts for each uni
# accepted kwargs: 'shift', uni.orbital column names
# special kwargs: 'shift' shifts energies,
# ['energy', 'eV', units] must be of the form [min, max]
# Note: values can be strings defining relationships like
# {'occupation': '> 0'}
# units (str): the units in which to display the MO diagram
# width (float): the width of the line of each energy level
# pad_degen (float): the spacing between degenerate energy levels
# pad_occ (float): the spacing between arrows of occupied levels
# scale_occ (float): scales the size of the occupied arrows
# fontsize (int): font size for text on the MO diagram
# figsize (tuple): matplotlib's figure figsize kwarg
# labelpos (str): ['right', 'bottom'] placement of symmetry labels
#
# Returns
# fig (matplotlib.figure.Figure): the plot
# """
# def filter_orbs(o, fil):
# shift = fil['shift'] if 'shift' in fil else 0
# for key, val in fil.items():
# if key == 'shift': continue
# if isinstance(val, str) and \
# any((i in ['<', '>'] for i in val)):
# o = eval('o[o["' + key + '"] ' + val + ']')
# continue
# val = [val] if not isinstance(val,
# (list,tuple)) else val
# if key in [units, 'energy']:
# if len(val) != 2:
# raise Exception('energy (units) '
# 'filter arguments must be [min, max]')
# o = o[(o[key] > val[0]) & (o[key] < val[1])].copy()
# elif key == 'index':
# o = o.ix[val].copy()
# else:
# o = o[o[key].isin(val)].copy()
# return o, shift
#
# def cull_data(o, shift):
# data = OrderedDict()
# # Deduplicate manually to count degeneracy
# for en, sym, occ in zip(o[units], o['symmetry'], o['occupation']):
# en += shift
# if '$' in sym: pass
# else: sym = '${}_{{{}}}$'.format(sym[0].lower(),
# sym[1:].lower())
# data.setdefault(en, {'degen': 0, 'sym': sym, 'occ': occ})
# data[en]['degen'] += 1
# return data
#
# def offset(degen, pad_degen=pad_degen):
# start = 0.5 - pad_degen * (degen - 1)
# return [start + i * 2 * pad_degen for i in range(degen)]
#
# def occoffset(occ, pad_occ=pad_occ):
# if not occ: return []
# if occ <= 1: return [0]
# if occ <= 2: return [-pad_occ, pad_occ]
#
# def plot_axis(ax, data):
# for nrg, vals in data.items():
# # Format the occupation//symmetry
# occ = np.round(vals['occ']).astype(int)
# # Iterate over degeneracy
# offs = offset(vals['degen'])
# for x in offs:
# ax.plot([x - lw, x + lw], [nrg, nrg],
# color='k', lw=1.2)
# # Iterate over occupation
# for s, ocof in enumerate(occoffset(occ)):
# # Down arrow if beta spin else up arrow
# pt = -2 * lw * scale_occ if s == 1 else 2 * lw * scale_occ
# st = nrg + lw * scale_occ if s == 1 else nrg - lw * scale_occ
# ax.arrow(ocof + x, st, 0, pt, **arrows)
# # Assign symmetry label
# sym = vals['sym']
# if labelpos == 'right':
# ax.text(x + 2 * lw, nrg - lw, sym, fontsize=fontsize - 2)
# elif labelpos == 'bottom':
# ax.text(0.5 - 2 * lw, nrg - 4 * lw, sym, fontsize=fontsize - 2)
# return ax
#
# if filters is None:
# print('filters allows for customization of the plot.')
# filters = {'eV': [-5,5]}
# nunis = len(unis)
# filters = [filters] * nunis if isinstance(filters, dict) else filters
# # Make our figure and axes
# figargs = {'figsize': figsize}
# fig = _gen_figure(nxplot=nunis, nyplot=1, joinx=True, sharex=True, figargs=figargs)
# axs = fig.get_axes()
# # Some initialization
# ymin = np.empty(nunis, dtype=np.float64)
# ymax = np.empty(nunis, dtype=np.float64)
# ysc = exatomic.Energy['eV', units]
# lw = width
# arrows = {'fc': "k", 'ec': "k",
# 'head_width': 0.01,
# 'head_length': 0.05 * ysc}
# for i, (ax, uni, fil) in enumerate(zip(axs, unis, filters)):
# if uni.name: ax.set_title(uni.name)
# o = uni.orbital
# o[units] = o['energy'] * exatomic.Energy['Ha', units]
# o, shift = filter_orbs(o, fil)
# print('Filtered {} eigenvalues from '
# '{}'.format(o.shape[0], uni.name))
# ymin[i] = o[units].min() + shift
# ymax[i] = o[units].max() + shift
# data = cull_data(o, shift)
# ax = plot_axis(ax, data)
# # Go back through axes to set limits
# for i, ax in enumerate(axs):
# ax.set_xlim((0,1))
# ax.xaxis.set_ticklabels([])
# ylims = (min(ymin[~np.isnan(ymin)]) - 1, max(ymax[~np.isnan(ymax)]) + 1) \
# if ylim is None else ylim
# ax.set_ylim(ylims)
# if not i:
# ax.set_ylabel('E ({})'.format(units), fontsize=fontsize)
# diff = ylims[1] - ylims[0]
# headlen = 0.05 * diff
# ax.arrow(0.05, ylims[0], 0, diff - headlen, fc="k", ec="k",
# head_width=0.05, head_length= headlen)
# sns.despine(left=True, bottom=True, right=True)
# return fig
#
## unis = [unis] if not isinstance(unis, list) else unis
## if window is None:
## window = []
## for i, uni in enumerate(unis):
## uni.orbital[units] = uni.orbital['energy'] * Energy['Ha', units]
## window.append([uni.orbital.get_orbital(orb=-15)[units],
## uni.orbital.get_orbital()[units]])
## else:
## if not isinstance(window, list): window = window * len(unis)
## if shift or not isinstance(shift, list):
##def photoelectron_spectrum(ax, unis, window=[-10, 0], broaden=0.6,
## shift=0, label='', color=None, stepe=1, units='eV',
## loc='upper left', fontsize=26, peaks=True,
## xlim=None, ylim=None):
## color = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] if color is None else color
## arrowprops = {'arrowstyle': '->', 'connectionstyle': 'arc3'}
## arrowargs = {'xycoords': 'data', 'textcoords': 'data',
## 'arrowprops': arrowprops, 'fontsize': fontsize}
## unis = [unis] if not isinstance(unis, list) else unis
## xmin, xmax = [], []
## if (len(window) != len(unis) or len(unis) == 2): window = window * len(unis)
## if not isinstance(shift, list): shift = [shift] * len(unis)
## if not isinstance(label, list): label = [label] * len(unis)
## for i, uni in enumerate(unis):
## height = len(unis) - 1 - i
## lo, hi = window[i]
## pes = uni.orbital.convolve(ewin=[lo,hi], broaden=broaden, units=units)[::-1]
## pes[units] = -pes[units]
## pes['shifted'] = pes[units] + shift[i]
## heightened = pes['signal'] + height
## lab = uni.name if uni.name and not label[i] else label[i]
## ax.axhline(height, color='k', linewidth=1.2)
## ax.plot(pes['shifted'], heightened, label=lab, color=color[i % len(color)])
## o = uni.orbital
## o = o[(o[units] > lo) & (o[units] < hi) & (o['occupation'] > 0)].drop_duplicates(
## units).copy().drop_duplicates('vector').sort_values(
## by=units, ascending=False).reset_index()
## o[units] = -o[units]
## leno = len(o)
## switch = leno // 2
## nonzero = pes[pes['signal'] > 0.1]['shifted']
## small = nonzero.min()
## esmall = small - stepe * switch
## elarge = nonzero.max()
## xmin.append(esmall)
## dh = 1 / (switch + 3)
## hlo = height + dh
## hhi = height + (switch + switch % 2) * dh
## for t in range(-20, 20):
## ax.plot([t] * 2, [height, height - 0.05], color='k', linewidth=1)
## if peaks:
## for c, (sym, en) in enumerate(zip(o['symmetry'], o[units] + shift[i])):
## ax.plot([en] * 2, [height, height + 0.05], color='k', linewidth=1)
## astr = r'$' + sym[0].lower() + '_{' + sym[1:].lower() + '}$'
## e = esmall if c < switch else elarge
## h = hlo if c < switch else hhi
## ax.annotate(astr, xy=(en, height + 0.05), xytext=(e, h), **arrowargs)
## if c < switch:
## esmall += stepe
## hlo += dh
## else:
## elarge += stepe * 1.5
## hhi -= dh
## xmax.append(elarge)
## xax = 'E* (' + units + ')' if any((i for i in shift)) else 'E (' + units + ')'
## xlim = (min(xmin), max(xmax)) if xlim is None else xlim
## ylim = (0, len(unis)) if ylim is None else ylim
## ax.set_xlim(xlim)
## ax.set_ylim(ylim)
## ax.set_xlabel(xax)
## ax.legend(loc=loc)
## return ax
| exa-analytics/atomic | exatomic/mpl.py | Python | apache-2.0 | 23,126 | [
"Gaussian"
] | 0a9fd317200b7a5dbb7e6d4959c159980b3d6780c1b75200516f7922bb0b0e7e |
"""Test VTK_IGNORE_BTX setting to ensure that it is ON
"""
import sys
import vtk
from vtk.test import Testing
class TestIgnoreBTX(Testing.vtkTest):
def testIgnoreBTX(self):
"""Try to call a method that is BTX'd, to ensure VTK_IGNORE_BTX=ON
"""
stringArray = vtk.vtkStringArray()
information = vtk.vtkInformation()
stringArray.CopyInformation(information, 0)
if __name__ == "__main__":
Testing.main([(TestIgnoreBTX, 'test')])
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/Common/Core/Testing/Python/TestIgnoreBTX.py | Python | gpl-3.0 | 477 | [
"VTK"
] | 4e96a8fb67e3fb3c29a72866dd8db2eacadc1e9c62d1065cb9f19a329c3edbfa |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
SurnamePage - creates list of individuals with same surname
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.plug.report import utils
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (name_to_md5, _NAME_STYLE_FIRST,
_find_birth_date, _find_death_date,
FULLCLEAR, html_escape)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# create the page from SurnameListPage
#
#################################################
class SurnamePage(BasePage):
"""
This will create a list of individuals with the same surname
"""
def __init__(self, report, title, surname, ppl_handle_list):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: surname -- The surname to use
@param: ppl_handle_list -- The list of people for whom we need to create
a page.
"""
BasePage.__init__(self, report, title)
# module variables
showbirth = report.options['showbirth']
showdeath = report.options['showdeath']
showpartner = report.options['showpartner']
showparents = report.options['showparents']
if surname == '':
surname = self._("<absent>")
output_file, sio = self.report.create_file(name_to_md5(surname), "srn")
self.uplink = True
(surnamepage, head,
body) = self.write_header("%s - %s" % (self._("Surname"), surname))
ldatec = 0
# begin SurnameDetail division
with Html("div", class_="content", id="SurnameDetail") as surnamedetail:
body += surnamedetail
# section title
surnamedetail += Html("h3", html_escape(surname), inline=True)
# feature request 2356: avoid genitive form
msg = self._("This page contains an index of all the individuals "
"in the database with the surname of %s. "
"Selecting the person’s name "
"will take you to that person’s "
"individual page.") % html_escape(surname)
surnamedetail += Html("p", msg, id="description")
# begin surname table and thead
with Html("table", class_="infolist primobjlist surname") as table:
surnamedetail += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# Name Column
trow += Html("th", self._("Given Name"), class_="ColumnName",
inline=True)
if showbirth:
trow += Html("th", self._("Birth"), class_="ColumnDate",
inline=True)
if showdeath:
trow += Html("th", self._("Death"), class_="ColumnDate",
inline=True)
if showpartner:
trow += Html("th", self._("Partner"),
class_="ColumnPartner",
inline=True)
if showparents:
trow += Html("th", self._("Parents"),
class_="ColumnParents",
inline=True)
# begin table body
tbody = Html("tbody")
table += tbody
for person_handle in sorted(ppl_handle_list,
key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person.get_change_time() > ldatec:
ldatec = person.get_change_time()
trow = Html("tr")
tbody += trow
# firstname column
link = self.new_person_link(person_handle, uplink=True,
person=person,
name_style=_NAME_STYLE_FIRST)
trow += Html("td", link, class_="ColumnName")
# birth column
if showbirth:
tcell = Html("td", class_="ColumnBirth", inline=True)
trow += tcell
birth_date = _find_birth_date(self.r_db, person)
if birth_date is not None:
if birth_date.fallback:
tcell += Html('em',
self.rlocale.get_date(birth_date),
inline=True)
else:
tcell += self.rlocale.get_date(birth_date)
else:
tcell += " "
# death column
if showdeath:
tcell = Html("td", class_="ColumnDeath", inline=True)
trow += tcell
death_date = _find_death_date(self.r_db, person)
if death_date is not None:
if death_date.fallback:
tcell += Html('em',
self.rlocale.get_date(death_date),
inline=True)
else:
tcell += self.rlocale.get_date(death_date)
else:
tcell += " "
# partner column
if showpartner:
tcell = Html("td", class_="ColumnPartner")
trow += tcell
family_list = person.get_family_handle_list()
if family_list:
fam_count = 0
for family_handle in family_list:
fam_count += 1
family = self.r_db.get_family_from_handle(
family_handle)
partner_handle = utils.find_spouse(
person, family)
if partner_handle:
link = self.new_person_link(partner_handle,
uplink=True)
if fam_count < len(family_list):
if isinstance(link, Html):
link.inside += ","
else:
link += ','
tcell += link
else:
tcell += " "
# parents column
if showparents:
parent_hdl_list = person.get_parent_family_handle_list()
if parent_hdl_list:
parent_hdl = parent_hdl_list[0]
fam = self.r_db.get_family_from_handle(parent_hdl)
f_id = fam.get_father_handle()
m_id = fam.get_mother_handle()
mother = father = None
if f_id:
father = self.r_db.get_person_from_handle(f_id)
if father:
father_name = self.get_name(father)
if m_id:
mother = self.r_db.get_person_from_handle(m_id)
if mother:
mother_name = self.get_name(mother)
if mother and father:
tcell = Html("span", father_name,
class_="father fatherNmother")
tcell += Html("span", mother_name,
class_="mother")
elif mother:
tcell = Html("span", mother_name,
class_="mother", inline=True)
elif father:
tcell = Html("span", father_name,
class_="father", inline=True)
samerow = False
else:
tcell = " " # pylint: disable=R0204
samerow = True
trow += Html("td", tcell,
class_="ColumnParents", inline=samerow)
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(surnamepage, output_file, sio, ldatec)
| jralls/gramps | gramps/plugins/webreport/surname.py | Python | gpl-2.0 | 11,593 | [
"Brian"
] | c5c5debe19664a4ea3c0dfa13d602e780692745b949fb7008b30f282af211fc3 |
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
import httplib2
import gflags
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from datetime import datetime
from datetime import timedelta
import pytz
import dateutil.parser
import sys
import os
import json
from flask import Flask
from flask import render_template
app = Flask(__name__)
import calendar_config
FLAGS = gflags.FLAGS
# had to install:
# sudo apt-get update
# sudo apt-get install python-pip
# sudo pip install --upgrade google-api-python-client python-gflags python-dateutil Flask pytz
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications
# The client_id and client_secret can be found in Google Developers Console
FLOW = OAuth2WebServerFlow(
client_id=calendar_config.CLIENT_ID,
client_secret=calendar_config.CLIENT_SECRET,
scope=calendar_config.SCOPE,
user_agent=calendar_config.USER_AGENT)
# To disable the local server feature, uncomment the following line:
# FLAGS.auth_local_webserver = False
# If the Credentials don't exist or are invalid, run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run_flow(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google Developers Console
# to get a developerKey for your own application.
service = build(serviceName='calendar', version='v3', http=http,
developerKey=calendar_config.DEVELOPER_KEY)
la = pytz.timezone("America/Los_Angeles")
de = pytz.timezone("Europe/Berlin")
def create_time_string(dt):
if not dt:
return None
hours, remainder = divmod(dt.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
h = 'hours'
m = 'minutes'
if hours == 1:
h = 'hour'
if minutes == 1:
m = 'minute'
if hours == 0:
return '%s %s' % (minutes, m)
else:
return '%s %s and %s %s' % (hours, h, minutes, m)
# This method has a very sub-optimal approach to time zones.
@app.route('/calendars')
def calendars():
calendars = {}
items = []
free_rooms = []
events = []
upcoming = []
now = de.localize(datetime.now()) + timedelta(hours=8)
print(now)
start_time = now - timedelta(hours=8)
end_time = start_time + timedelta(hours=8)
calendar_list = service.calendarList().list().execute()
for calendar_list_entry in calendar_list['items']:
if calendar_list_entry['id'] in calendar_config.CALENDAR_IDS:
calendars[calendar_list_entry['id']] = calendar_list_entry['summary']
items.append({'id': calendar_list_entry['id']})
free_rooms.append(calendar_list_entry['id'])
# store this to a local file
with open('calendars.json', mode='w') as calendar_file:
json.dump({value: key for key, value in calendars.items()}, calendar_file)
free_busy = service.freebusy().query(body={"timeMin": start_time.isoformat(),
"timeMax": end_time.isoformat(),
"items":items}).execute()
for calendar in free_busy['calendars']:
data = free_busy['calendars'][calendar]
if data['busy']:
busy = data['busy'][0]
start = dateutil.parser.parse(busy['start']) + timedelta(hours=1)
end = dateutil.parser.parse(busy['end']) + timedelta(hours=1)
diff = start - (now - timedelta(hours=7))
print(diff)
event = {'room': calendars[calendar],
'start': start.strftime("%l:%M%p"),
'end': end.strftime("%l:%M%p")}
if diff < timedelta(minutes=5):
events.append(event)
free_rooms.remove(calendar)
elif diff < timedelta(minutes=35):
upcoming.append(event)
free_rooms.remove(calendar)
return render_template('calendars.html',
events=events,
upcoming=upcoming,
now=start_time.strftime("%A %e %B %Y, %l:%M%p"),
free_rooms=[calendars[f] for f in free_rooms])
def get_events(room_name):
items = []
now = datetime.utcnow()
la_offset = de.utcoffset(datetime.utcnow())
print(la_offset)
now = now + la_offset
start_time = datetime(year=now.year, month=now.month, day=now.day, tzinfo=la)
end_time = start_time + timedelta(days=1)
print "Running at", now.strftime("%A %e %B %Y, %l:%M%p")
print "Room name", room_name
if not os.path.isfile('calendars.json'):
# this is duplicated from the calendars() method
calendars = {}
calendar_list = service.calendarList().list().execute()
for calendar_list_entry in calendar_list['items']:
if calendar_list_entry['id'] in calendar_config.CALENDAR_IDS:
calendars[calendar_list_entry['id']] = calendar_list_entry['summary']
# store this to a local file
with open('calendars.json', mode='w') as calendar_file:
json.dump({value: key for key, value in calendars.items()}, calendar_file)
with open('calendars.json', 'r') as f:
calendars = json.load(f)
room_id = calendars[room_name]
events = service.events().list(
calendarId=room_id,
orderBy='startTime',
singleEvents=True,
timeMin=start_time.isoformat(),
timeMax=end_time.isoformat()
).execute()
next_start = None
next_end = None
status = "FREE"
for event in events['items']:
start = dateutil.parser.parse(event['start']['dateTime']).replace(tzinfo=None) + timedelta(hours=9)
end = dateutil.parser.parse(event['end']['dateTime']).replace(tzinfo=None) +timedelta(hours=9)
# print(events)
# print(start)
# print(end)
if now <= end:
if 'summary' in event.keys():
items.append({'name': event['summary'],
'creator': event['creator']['email'],
'start': start.strftime("%l:%M%p"),
'end': end.strftime("%l:%M%p"),
})
else:
items.append({'name': 'blocked',
'creator': event['creator'] ['email'],
'start': start.strftime("%l:%M%p"),
'end': end.strftime("%l:%M%p"),
})
if start < now and end > now:
status = "BUSY"
next_end = end - now
if start > now and not next_start:
next_start = start - now
next_start_str = create_time_string(next_start)
next_end_str = create_time_string(next_end)
if status == "FREE" and next_start and next_start < timedelta(minutes=15):
status = "SOON"
return {'room': events['summary'],
'status': status,
'now': now.strftime("%A %e %B %Y, %l:%M%p"),
'events': items,
'next_start_str': next_start_str,
'next_end_str': next_end_str}
@app.route('/index/<room_name>')
def index(room_name=None):
events = get_events(room_name)
return render_template('index.html',
status=events['status'],
events=events['events'],
next_start_str=events['next_start_str'],
next_end_str=events['next_end_str'],
now=events['now'],
room=room_name
)
@app.route('/<room_id>')
def main(room_id):
return render_template('main.html', room=room_id)
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| neotea/google-calendar-display | server.py | Python | apache-2.0 | 8,164 | [
"VisIt"
] | af163b40e9cb5eb20f33b19730d73b56f5541aaab9eb256f5e639c0cf15e3dfb |
# -*- coding: utf-8
from yade import ymport, utils,pack,export
import gts,os
from yade import geom
#import matplotlib
from yade import plot
#from pylab import *
#import os.path, locale
#### set False when running in batch mode
#defaultTable = True
defaultTable = False
####-------------------------------------
####-------------------------------------
utils.readParamsFromTable(
E = 202240000000,
noTableOk = True
)
from yade.params.table import *
#print 'E=',0
O.tags['description']='triaxial_E_'+str(E)
#################################
##### FUNCTIONS ####
#################################
def hMax(n):
idHMax=0
hMax=-1000000.0
for i in O.bodies:
h=i.state.pos[n]
if (h>hMax):
hMax=h
idHMax=i.id
hMax=hMax+O.bodies[idHMax].shape.radius
return (hMax)
def hMin(n):
idHMin=0
hMin=100000.0
for i in O.bodies:
h=i.state.pos[n]
if (h<hMin):
hMin=h
idHMin=i.id
hMin=hMin-O.bodies[idHMin].shape.radius
return (hMin)
#Function in order to calculate rmin (minimum radius) and rmax (maximum radius)
def MinMax():
rmax=0
rmin=10
r=0
for i in O.bodies:
if(type(i.shape)==Sphere):
r=i.shape.radius
if(r>rmax):
rmax=r
if(r<rmin):
rmin=r
l=[rmin,rmax]
return (l)
def sup():
for i in O.bodies:
if (type(i.shape)==Sphere) and (i.state.pos[2]>0.098):
O.bodies.erase(i.id)
def scalar(u,v):
ps=u[0]*v[0]+u[1]*v[1]+u[2]*v[2]
return ps
def cross(u,v):
ps=Vector3(u[1]*v[2]-u[2]*v[1], u[2]*v[0]-u[0]*v[2] ,u[0]*v[1]-u[1]*v[0])
return ps
def limitfinder():
for b in O.bodies:
if(b.state.pos[2]>=L-2*radius):
if isinstance(b.shape,GridNode):
top_boundary.append(b.id)
b.shape.color=(1,0,0)
b.state.blockedDOFs='z'
if(b.state.pos[2]<0.1*radius ):
if isinstance(b.shape,GridNode):
bottom_boundary.append(b.id)
b.state.blockedDOFs='z'
b.shape.color=(1,0,0)
##############################
##### SCRIPT ####
##############################
try:
os.mkdir('data')
except:
pass
try:
os.mkdir('paraview')
except:
pass
isBatch = runningInBatch()
####################
### ENGINES ###
####################
O.engines=[
ForceResetter(),
InsertionSortCollider([
Bo1_Sphere_Aabb(),
Bo1_Wall_Aabb(),
Bo1_PFacet_Aabb(),
Bo1_Facet_Aabb(),
]),
InteractionLoop([
Ig2_GridNode_GridNode_GridNodeGeom6D(),
Ig2_GridConnection_GridConnection_GridCoGridCoGeom(),
Ig2_Sphere_PFacet_ScGridCoGeom(),
Ig2_Sphere_Sphere_ScGeom(),
Ig2_Facet_Sphere_ScGeom(),
Ig2_Wall_Sphere_ScGeom()
],
[Ip2_CohFrictMat_CohFrictMat_CohFrictPhys(setCohesionNow=True,setCohesionOnNewContacts=True),
Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom6D_CohFrictPhys_CohesionMoment(),
Law2_ScGeom_FrictPhys_CundallStrack(),
Law2_ScGridCoGeom_FrictPhys_CundallStrack(),
Law2_GridCoGridCoGeom_FrictPhys_CundallStrack()
]
),
]
######################
### PROPERTIES ###
######################
rm=0.33
radius=0.0025*rm
sigma=-3e6
#### Parameters of a rectangular grid ###
L=0.205 #length [m]
l=0.101/2. #half width (radius) [m]
nbL=36#number of nodes for the length [#] doit etre paire
nbl=44 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
#nbL=1 #number of nodes for the length [#] doit etre paire
#nbl=4 #number of nodes for the perimeter [#] ABSOLUMENT MULTIPLE de 4 !!!
r=radius
color=[155./255.,155./255.,100./255.]
oriBody = Quaternion(Vector3(0,0,1),(pi/2))
nodesIds=[]
nodesIds1=[]
cylIds=[]
pfIds=[]
top_boundary=[]
bottom_boundary=[]
####################
### MATERIAL ###
####################
poisson=0.28
#E=2*7.9e10*(1+poisson)
density=7.8e10
Et=0
frictionAngle=0.096
frictionAngleW=0.228
O.materials.append(CohFrictMat(young=E*0.1,poisson=poisson,density=density,frictionAngle=frictionAngle,normalCohesion=1e19,shearCohesion=1e19,momentRotationLaw=False,alphaKr=0,label='NodeMat'))
O.materials.append(FrictMat(young=E*0.1,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Pmat'))
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngle,label='Smat'))
##############################
### SAMPLE GENERATION ###
##############################
kw={'color':[1,1,1],'wire':False,'dynamic':True,'material':2}
pile=ymport.text('spheres.txt',**kw)
pile2=O.bodies.append(pile)
#sup()
print hMin(2), hMax(2)
zmin=hMin(2)
zmax=hMax(2)
#L=hMax(2)
#################################
#### MEMBRANE GENERATION ###
#################################
mesh=2
#Create all nodes first :
for i in range(0,nbL+1):
for j in range(0,nbl):
z=i*L/float(nbL)
y=l*sin(2*pi*j/float(nbl))
x=l*cos(2*pi*j/float(nbl))
nodesIds.append( O.bodies.append(gridNode([x,y,z],r,wire=False,fixed=False,material='NodeMat',color=color)) )
##Create connection between the nodes
for i in range(0,nbL+1):
for j in range(0,nbl-1):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
for i in range(0,nbL,1):
for j in range(0,nbl):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],r,color=color,mask=5,material='Pmat',Et=Et) )
for i in range(-1,nbL):
j=nbl
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='Pmat',Et=Et) )
for i in range(0,nbL):
for j in range(0,nbl-1):
if (j%2==0):
O.bodies.append( gridConnection(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
else:
O.bodies.append( gridConnection(nodesIds[(i+1)*nbl+j],nodesIds[i*nbl+j+1],r,color=color,mask=5,material='Pmat',Et=Et) )
for i in range(0,nbL):
j=nbl
#O.bodies[nodesIds[(i-1)*nbl+j]].shape.color=Vector3(155./255.,155./255.,1.)
#O.bodies[nodesIds[(i)*nbl+j-1]].shape.color=Vector3(1,0,0)
O.bodies.append( gridConnection(nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],r,color=color,mask=5,material='Pmat',Et=Et) )
###Create PFacets
##wire=True
for i in range(0,nbL):
for j in range(0,nbl-1):
if (j%2==0):
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='Pmat')))
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j+1],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='Pmat')))
else:
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j],nodesIds[(i+1)*nbl+j],nodesIds[(i)*nbl+j+1],color=color,mask=5,material='Pmat')))
pfIds.append(O.bodies.append(pfacet(nodesIds[i*nbl+j+1],nodesIds[(i+1)*nbl+j],nodesIds[(i+1)*nbl+j+1],color=color,mask=5,material='Pmat')))
for i in range(0,nbL,1):
j=nbl
pfIds.append(O.bodies.append(pfacet( nodesIds[i*nbl+j],nodesIds[(i-1)*nbl+j],nodesIds[(i+1)*nbl+j-1],color=color,material='Pmat' )))
pfIds.append(O.bodies.append(pfacet( nodesIds[(i)*nbl+j-1],nodesIds[(i+1)*nbl+j-1],nodesIds[(i-1)*nbl+j],color=color,material='Pmat' )))
limitfinder()
#########################
##### WALL GENERATION ##
#########################
O.materials.append(FrictMat(young=E,poisson=poisson,density=density,frictionAngle=frictionAngleW,label='Wmat'))
topPlate=utils.wall(position=hMax(2)+radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
O.bodies.append(topPlate)
bottomPlate=utils.wall(position=-hMin(2)-radius,sense=0, axis=2,color=Vector3(1,0,0),material='Wmat')
O.bodies.append(bottomPlate)
###################
#### APPLY LOAD ##
###################
#### APPLY CONFINING PRESSURE
def Apply_confiningpressure():
#print 'Apply_confiningpressure'
for i in pfIds:
e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
P=(O.bodies[i].shape.node1.state.pos+O.bodies[i].shape.node2.state.pos+O.bodies[i].shape.node3.state.pos)/3
#print e0,e1,e2
#nodesIds.append( O.bodies.append(gridNode([P[0],P[1],P[2]],r,wire=False,fixed=True,material='NodeMat',color=color)) )
#print 'P=',P
v0 = e0
v1 = e1
v2 = P - O.bodies[i].shape.node1.state.pos
##// Compute dot products
dot00 = scalar(v0,v0)
dot01 = scalar(v0,v1)
dot02 = scalar(v0,v2)
dot11 = scalar(v1,v1)
dot12 = scalar(v1,v2)
##// Compute the barycentric coordinates of the projection P
invDenom = 1 / (dot00 * dot11 - dot01 * dot01)
p1 = (dot11 * dot02 - dot01 * dot12) * invDenom
p2 = (dot00 * dot12 - dot01 * dot02) * invDenom
p3 = 1-p1-p2
a = sqrt(scalar(e0,e0))
b = sqrt(scalar(e1,e1))
c = sqrt(scalar(e2,e2))
s=0.5*(a+b+c)
area= sqrt(s*(s-a)*(s-b)*(s-c))
Fapplied=area*sigma
normal = cross(e0,e1)
normal=normal/normal.norm()
F=Fapplied
p1normal=F*p1*normal
p2normal=F*p2*normal
p3normal=F*p3*normal
O.forces.addF(O.bodies[i].shape.node1.id,p1normal,permanent=False)
O.forces.addF(O.bodies[i].shape.node2.id,p2normal,permanent=False)
O.forces.addF(O.bodies[i].shape.node3.id,p3normal,permanent=False)
#Apply_confiningpressure()
sigma3=0
def check_confiningpressure():
global sigma3
sigma3=0
for i in pfIds:
e0 =O.bodies[i].shape.node3.state.pos - O.bodies[i].shape.node1.state.pos
e1 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node1.state.pos
e2 =O.bodies[i].shape.node2.state.pos - O.bodies[i].shape.node3.state.pos
a = sqrt(scalar(e0,e0))
b = sqrt(scalar(e1,e1))
c = sqrt(scalar(e2,e2))
s=0.5*(a+b+c)
area= sqrt(s*(s-a)*(s-b)*(s-c))
F=(O.forces.f(O.bodies[i].shape.node1.id) + O.forces.f(O.bodies[i].shape.node2.id)+O.forces.f(O.bodies[i].shape.node3.id)).norm()
sigma3=sigma3+F/area
#print sigma3
return sigma3
pos=topPlate.state.pos[2]
def dataCollector():
global pos
if(pos<0.16):
O.wait()
saveData()
O.exitNoBacktrace()
S=pi*l**2
Fnt=O.forces.f(topPlate.id)[2]
Fnb=O.forces.f(bottomPlate.id)[2]
sigma1=Fnt/S
sigma3=check_confiningpressure()
pos=topPlate.state.pos[2]
plot.addData(t=O.time,pos=pos,Fnt=Fnt,Fnb=Fnb,sigma1=sigma1,sigma3=sigma3,unbF=unbalancedForce())
def saveData():
plot.saveDataTxt('data/'+O.tags['description']+'.dat',vars=('t','pos','Fnt','Fnb','sigma1','sigma3','unbF'))
plot.plots={'t':('sigma1',None,'sigma3')}
#### MOVE TOP AND BOTTOM WALL
v=1.7e-03
#v=1
def moveWall(v):
topPlate.state.vel=(0,0,-v)
#bottomPlate.state.vel=(0,0,v)
#g=-9.81
g=0
#moveWall(v)
#limitfinder()
###########################
##### ENGINE DEFINITION ##
###########################
O.dt=0.5*PWaveTimeStep()
O.engines=O.engines+[
PyRunner(iterPeriod=1,dead=False,command='Apply_confiningpressure()'),
NewtonIntegrator(damping=0.7,gravity=(0,0,g),label='Newton'),
PyRunner(initRun=True,iterPeriod=1,command='dataCollector()'),
VTKRecorder(iterPeriod=500,initRun=True,fileName='paraview/'+O.tags['description']+'_',recorders=['spheres','velocity']),
]
if not isBatch:
# VISUALIZATION
from yade import qt
qt.Controller()
#qtv = qt.View()
#qtr = qt.Renderer()
plot.plot(noShow=False, subPlots=True)
O.run(5000)
#moveWall(v)
else:
O.run(1,True)
moveWall(v)
O.wait()
saveData() | anna-effeindzourou/trunk | examples/anna_scripts/triax/triaxial_E.py | Python | gpl-2.0 | 11,076 | [
"ParaView"
] | 6c5facc5b80f1117e9efeba8fcf0acf9fa48a5fb5a42612193526b05798c7198 |
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" Tests for pkgdb messages """
import unittest
from fedmsg.tests.test_meta import Base
from .common import add_doc
class TestPkgdbACLUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes these messages when an ACL changes on a package.
"""
expected_title = "pkgdb.acl.update"
expected_subti = ("ralph changed ralph's 'watchbugzilla' permission on "
"rpms/python-sh (EL-6) to 'Awaiting Review'")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/python-sh/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['python-sh'])
expected_usernames = set(['ralph'])
expected_objects = set(['python-sh/acls/EL-6/watchbugzilla/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357576703.125622,
"topic": "org.fedoraproject.stg.pkgdb.acl.update",
"msg": {
"status": "Awaiting Review",
"username": "ralph",
"package_listing": {
"point_of_contact": "grover",
"package": {
"upstreamurl": None,
"name": "python-sh",
"description": None,
"reviewurl": None,
"summary": "Python module to simplify calling "
"shell commands"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"agent": "ralph",
"acl": "watchbugzilla"
}
}
class TestPkgdbPackageNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when a new package is added to the DB. This
typically happens near the end of the Package Review Process as a
result of a `SCM Admin Request
<http://fedoraproject.org/wiki/Package_SCM_admin_requests>`_.
"""
expected_title = "pkgdb.package.new"
expected_subti = "ralph added a new package 'rpms/php-zmq' (devel)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/create'])
msg = {
"username": "apache",
"i": 3,
"timestamp": 1357580533.5999,
"topic": "org.fedoraproject.stg.pkgdb.package.new",
"msg": {
"package_listing": {
"point_of_contact": "lmacken",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora",
"publishurltemplate": None,
"version": "19",
"disttag": ".f19",
"branchname": "devel"
},
"specfile": None
},
"agent": "ralph"
}
}
class TestPkgdbOwnerUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when a package gets an new owner. (It is
also published when a package is orphaned; the 'owner' field will have
the string 'orphan' as its value.)
"""
expected_title = "pkgdb.owner.update"
expected_subti = "ralph changed owner of rpms/php-zmq (EL-6) to 'orphan'"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/owner/EL-6'])
msg = {
"username": "apache",
"i": 3,
"timestamp": 1357580533.5999,
"topic": "org.fedoraproject.stg.pkgdb.owner.update",
"msg": {
"package_listing": {
"point_of_contact": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"agent": "ralph"
}
}
class TestLegacyPkgdbACLRequestToggle(Base):
""" The old Fedora Package DB1 published this message when an ACL request
was toggled on a package.
"""
expected_title = "pkgdb.acl.request.toggle"
expected_subti = "ralph has requested 'commit' on rpms/php-zmq (EL-6)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/acls/EL-6/commit/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.acl.request.toggle",
"msg": {
"acl_action": "requested",
"package_listing": {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"acl_status": "Awaiting Review",
"agent": "ralph",
"acl": "commit"
}
}
class TestLegacyPkgdbPackageUpdate(Base):
""" Test old school messages. """
expected_title = "pkgdb.package.update"
expected_subti = "ralph made some updates to rpms/php-zmq"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/update'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.package.update",
"msg": {
"acl_action": "requested",
"package": "php-zmq",
"agent": "ralph",
},
}
class TestPkgdbPackageUpdateStatus(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when the status of a package is updated.
"""
expected_title = "pkgdb.package.update.status"
expected_subti = "ralph unretired rpms/guake in F-18"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['ralph'])
expected_objects = set(['guake/update'])
msg = {
"username": "apache",
"i": 144,
"timestamp": 1379605523.496933,
"msg_id": "2013-c131fb95-0a2e-4426-95c3-09766e017d29",
"topic": "org.fedoraproject.dev.pkgdb.package.update.status",
"msg": {
"status": "Approved",
"package_listing": {
"package": {
"status": "Approved",
"upstream_url": "http://guake.org",
"name": "guake",
"creation_date": 1379619917.0,
"summary": "Top down terminal for GNOME",
"review_url": "https://bugzilla.redhat.com/450189"
},
"collection": {
"pendingurltemplate": None,
"publishurltemplate": None,
"branchname": "F-18",
"name": "Fedora",
"version": "18"
},
"point_of_contact": "pingou"
},
"prev_status": "Retired",
"agent": "ralph",
"package_name": "guake"
}
}
class TestPkgdbPackageUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when metadata for a package is updated.
"""
expected_title = "pkgdb.package.update"
expected_subti = "pkgdb_updater updated: summary, description of rpms/guake"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"1ff483b03adb34142ac55a5efecfa71b0149d57566f86d969905005b0ab98def"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pkgdb_updater'])
expected_objects = set(['guake/update'])
msg = {
"username": "apache",
"i": 144,
"timestamp": 1379605523.496933,
"msg_id": "2013-c131fb95-0a2e-4426-95c3-09766e017d29",
"topic": "org.fedoraproject.dev.pkgdb.package.update",
"msg": {
"package": {
"status": "Approved",
"upstream_url": "http://guake.org",
"name": "guake",
"creation_date": 1379619917.0,
"summary": "Top down terminal for GNOME",
"review_url": "https://bugzilla.redhat.com/450189"
},
"agent": "pkgdb_updater",
"fields": ["summary", "description"],
}
}
class LegacyTestPkgdbBranchClone(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when a new branch is cloned for a
package.
"""
expected_title = "pkgdb.branch.clone"
expected_subti = "ralph branched rpms/php-zmq f18 from devel"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/branch'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.branch.clone",
"msg": {
"package": "php-zmq",
"branch": "f18",
"master": "devel",
"agent": "ralph",
},
}
class TestLegacyPkgdbCritpathUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when the critical path status of a
package changes (when it is either added, or removed from the critical
path). For example:
"""
expected_title = "pkgdb.critpath.update"
expected_subti = "ralph altered the critpath status for some packages"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set([])
expected_usernames = set(['ralph'])
expected_objects = set([])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357581512.006664,
"topic": "org.fedoraproject.stg.pkgdb.critpath.update",
"msg": {
"package_listing_ids": [],
"agent": "ralph",
"critpath": True,
},
}
class TestPkgdbPackageUpdateStatus2(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes this message when the status of a package is updated.
Here's an example of a package being retired:
"""
expected_title = "pkgdb.package.update.status"
expected_subti = "till retired rpms/libvmime07 in master"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/libvmime07/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"272bbf32f26ca494a78673f873bb62e8f3deb9f9b53213ceac3c2a144de4784a"
"?s=64&d=retro")
expected_packages = set(['libvmime07'])
expected_usernames = set(['till'])
expected_objects = set(['libvmime07/update'])
msg = {
"source_name": "datanommer",
"i": 7,
"timestamp": 1412710605.0,
"msg_id": "2014-78aa26ee-d2e5-4446-b4a4-73948704d73e",
"topic": "org.fedoraproject.prod.pkgdb.package.update.status",
"source_version": "0.6.4",
"msg": {
"status": "Retired",
"package_listing": {
"status": "Retired",
"point_of_contact": "orphan",
"package": {
"status": "Approved",
"upstream_url": "http://www.zarafa.com/wiki/index.php/Libvmime_patches",
"description": "VMime is a powerful C++ class ...",
"creation_date": 1400070978.0,
"acls": [],
"summary": "A powerful C++ class ...",
"review_url": None,
"name": "libvmime07"
},
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"acls": [
{
"fas_name": "robert",
"status": "Approved",
"acl": "watchcommits"
},
{
"fas_name": "robert",
"status": "Approved",
"acl": "watchbugzilla"
},
{
"fas_name": "robert",
"status": "Obsolete",
"acl": "commit"
},
{
"fas_name": "robert",
"status": "Obsolete",
"acl": "approveacls"
}
],
"critpath": False,
"status_change": 1412710603.0
},
"prev_status": "Orphaned",
"package_name": "libvmime07",
"agent": "till"
}
}
class TestLegacyPkgdbPackageRetire(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages on this topic when a package is retired. For example:
"""
expected_title = "pkgdb.package.retire"
expected_subti = "ralph retired rpms/php-zmq (EL-6)!"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/retire'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357583297.886945,
"topic": "org.fedoraproject.stg.pkgdb.package.retire",
"msg": {
"package_listing": {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
},
"retirement": "retired",
"agent": "ralph"
}
}
class LegacyTestPkgdbUserRemove(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
when a user is removed from a package ACL.
"""
expected_title = "pkgdb.acl.user.remove"
expected_subti = "ralph removed ralph from rpms/php-zmq (EL-6, F18)"
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/php-zmq/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set(['php-zmq'])
expected_usernames = set(['ralph'])
expected_objects = set(['php-zmq/remove/ralph'])
msg = {
"username": "apache",
"i": 2,
"timestamp": 1357583297.886945,
"topic": "org.fedoraproject.stg.pkgdb.acl.user.remove",
"msg": {
"package_listings": [{
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora EPEL",
"publishurltemplate": None,
"version": "6",
"disttag": ".el6",
"branchname": "EL-6"
},
"specfile": None
}, {
"owner": "orphan",
"package": {
"upstreamurl": None,
"name": "php-zmq",
"description": None,
"reviewurl": None,
"summary": "PHP 0MQ/zmq/zeromq extension"
},
"qacontact": None,
"collection": {
"pendingurltemplate": None,
"name": "Fedora",
"publishurltemplate": None,
"version": "18",
"disttag": ".f18",
"branchname": "F18"
},
"specfile": None
}],
"collections": [
# This actually has stuff in it in prod.
],
"username": "ralph",
"agent": "ralph",
}
}
class TestPkgdbBranchStart(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when branching starts.
"""
expected_title = "pkgdb.branch.start"
expected_subti = "ralph started a branch of F-19 from devel"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.start',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': u'ralph',
},
}
class TestLegacyPkgdbBranchStart(Base):
""" This just tests a funny case where 'agent' is a list.. """
expected_title = "pkgdb.branch.start"
expected_subti = "ralph started a branch of F-19 from devel"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.start',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': [u'ralph'],
},
}
class TestPkgdbBranchComplete(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when branching completes.
"""
expected_title = "pkgdb.branch.complete"
expected_subti = "ralph's branch of F-19 from devel completed"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 1,
u'timestamp': 1379606342.105066,
u'msg_id': u'2013-0eaf6d98-6259-4e1c-a113-e2c9284a6082',
u'topic':
u'org.fedoraproject.dev.pkgdb.branch.complete',
u'msg': {
u'collection_from': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'devel',
u'name': u'Fedora',
u'version': u'devel'
},
u'collection_to': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19'
},
u'agent': u'ralph',
},
}
class TestPkgdbCollectionNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin creates a new collection.
"""
expected_title = "pkgdb.collection.new"
expected_subti = "ralph created a new collection for Fedora 19"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 3,
u'timestamp': 1379607327.474346,
u'msg_id': u'2013-68fd388e-60ca-4cf6-888d-b51161798496',
u'topic': u'org.fedoraproject.dev.pkgdb.collection.new',
u'msg': {
u'collection': {
u'pendingurltemplate': None,
u'publishurltemplate': None,
u'branchname': u'F-19',
u'name': u'Fedora',
u'version': u'19',
},
u'agent': u'ralph',
}
}
class TestPkgdbCollectionUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin creates a new collection.
"""
expected_title = "pkgdb.collection.update"
expected_subti = ("ralph updated the following fields of the Fedora 18 "
"collection: name, version")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"9c9f7784935381befc302fe3c814f9136e7a33953d0318761669b8643f4df55c?s=64&d=retro")
expected_packages = set()
expected_usernames = set(['ralph'])
expected_objects = set()
msg = {
u'username': u'threebean',
u'i': 27,
u'timestamp': 1379607692.198447,
u'msg_id': u'2013-478a321f-ddfc-4d4c-adeb-c777619da15a',
u'topic': u'org.fedoraproject.dev.pkgdb.collection.update',
u'msg': {
u'fields': [
u'name',
u'version',
],
u'collection': {
u'pendingurltemplate': u'http://.....',
u'publishurltemplate': u'http://.....',
u'branchname': u'f18_b',
u'name': u'Fedora',
u'version': u'18'
},
u'agent': u'ralph',
}
}
class TestPkgdbDeletePackage(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a package** all
together.
"""
expected_title = "pkgdb.package.delete"
expected_subti = ("ausil deleted the 'rpms/pipelight' package from the pkgdb")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil'])
expected_objects = set(['pipelight/package/delete'])
msg = {
"i": 46,
"msg_id": "2014-9372bf63-8e32-4257-82ec-38fb5226763a",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377920.0,
"topic": "org.fedoraproject.prod.pkgdb.package.delete",
"msg": {
"agent": "ausil",
"package": {
"acls": [
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "master",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"status": "Under Development",
"version": "devel"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f19",
"dist_tag": ".fc19",
"koji_name": "f19",
"name": "Fedora",
"status": "Active",
"version": "19"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850009.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f20",
"dist_tag": ".fc20",
"koji_name": "f20",
"name": "Fedora",
"status": "Active",
"version": "20"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
{
"acls": [
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "epel7",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"status": "Under Development",
"version": "7"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850009.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/"
"1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404997736.0
}
],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using "
"Windows plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
}
},
}
class TestPkgdbDeleteBranch(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a branch** of a
particular package.
"""
expected_title = "pkgdb.package.branch.delete"
expected_subti = "ausil deleted the f21 branch of the 'rpms/pipelight' package"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil'])
expected_objects = set(['pipelight/f21/delete'])
msg = {
"i": 45,
"msg_id": "2014-fba4c0ac-f5ba-446f-bf70-94200e2d286f",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377920.0,
"topic": "org.fedoraproject.prod.pkgdb.package.branch.delete",
"msg": {
"agent": "ausil",
"package_listing": {
"acls": [
{
"acl": "watchcommits",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "besser82",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "awjb",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "awjb",
"status": "Approved"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using Windows "
"plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404997736.0
}
},
}
class TestPkgdbDeleteAcl(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **deletes a branch** of a
particular package.
"""
expected_title = "pkgdb.acl.delete"
expected_subti = ("ausil deleted awjb's watchcommits "
"rights from rpms/pipelight (f20)")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"a89b57d99dcf12d40ec2b9fb05910b90293b13b0b87415208bedc897bc18a354"
"?s=64&d=retro")
expected_packages = set(['pipelight'])
expected_usernames = set(['ausil', 'awjb'])
expected_objects = set(['pipelight/acls/f20/watchcommits/awjb'])
msg = {
"i": 23,
"msg_id": "2014-f46f0993-ea29-4fe1-af44-807b863a12de",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408377918.0,
"topic": "org.fedoraproject.prod.pkgdb.acl.delete",
"msg": {
"acl": {
"acl": "watchcommits",
"fas_name": "awjb",
"packagelist": {
"collection": {
"branchname": "f20",
"dist_tag": ".fc20",
"koji_name": "f20",
"name": "Fedora",
"status": "Active",
"version": "20"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1404850009.0,
"description": "",
"name": "pipelight",
"review_url": "https://bugzilla.redhat.com/1117403",
"status": "Approved",
"summary": "NPAPI Wrapper Plugin for using Windows "
"plugins in Linux browsers",
"upstream_url": "http://pipelight.net/"
},
"point_of_contact": "besser82",
"status": "Approved",
"status_change": 1404850010.0
},
"status": "Approved"
},
"agent": "ausil"
},
}
class TestPkgdbBranchRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an user **requests a new branch** for
a particular package.
"""
expected_title = "pkgdb.package.branch.request"
expected_subti = ("pingou requested branch epel7 for package rpms/R-BiocGenerics")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-BiocGenerics'])
expected_usernames = set(['pingou'])
expected_objects = set(['R-BiocGenerics/branch/request/epel7/pingou'])
msg = {
"i": 1,
"timestamp": 1408440084,
"msg_id": "2014-250329a1-1ccf-4fc4-ad0c-e24365f89c0f",
"topic": "org.fedoraproject.dev.pkgdb.package.branch.request",
"msg": {
"collection_to": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Generic functions for Bioconductor",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BiocGenerics"
},
"agent": "pingou",
},
}
class TestPkgdbPackageRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an user **requests a new package**
to be added into Package DB.
"""
expected_title = "pkgdb.package.new.request"
expected_subti = ("pingou requested package rpms/guake on branch master")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/guake/"
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['new/package/request/guake/master/pingou'])
msg = {
"i": 3,
"timestamp": 1408440927,
"msg_id": "2014-40c33929-8fa1-4cfb-9559-231af6d809aa",
"topic": "org.fedoraproject.dev.pkgdb.package.new.request",
"msg": {
"info": {
"pkg_summary": "A drop-down terminal for GNOME",
"pkg_collection": "master",
"pkg_review_url": "https://bugzilla.redhat.com/123",
"pkg_upstream_url": "http://guake.org",
"pkg_poc": "pingou",
"pkg_status": "Approved",
"pkg_name": "guake",
"pkg_description": "",
"pkg_critpath": False
},
"agent": "pingou",
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"package": None
},
}
class TestPkgdbAdminActionUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **update the status of an
Admin Action**.
"""
expected_title = "pkgdb.admin.action.status.update"
expected_subti = ("pingou changed pingou's package request for rpms/guake "
"in master from Awaiting Review to Approved")
expected_link = "https://admin.fedoraproject.org/pkgdb/package/rpms/guake/"
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['action/18/status/guake/master/pingou'])
msg = {
"i": 6,
"timestamp": 1408441710,
"msg_id": "2014-3a9cba3d-a1d0-4187-9fa0-995d54bf826d",
"topic": "org.fedoraproject.dev.pkgdb.admin.action.status.update",
"msg": {
"action": {
"info": {
'pkg_summary': u'A drop-down terminal for GNOME',
'pkg_status': u'Approved',
'pkg_collection': u'master',
'pkg_name': u'guake',
'pkg_review_url': u'https://bugzilla.redhat.com/123',
'pkg_description': u'',
'pkg_upstream_url': u'http://guake.org',
'pkg_poc': u'pingou',
'pkg_critpath': False
},
"status": "Approved",
"package": None,
"date_updated": 1408441710.0,
"collection": {
"status": "Under Development",
"dist_tag": ".fc22",
"koji_name": "rawhide",
"name": "Fedora",
"version": "devel",
"branchname": "master"
},
"user": "pingou",
"action": "request.package",
"date_created": 1408433727.0,
"from_collection": None,
"id": 18
},
"old_status": "Awaiting Review",
"new_status": "Approved",
"agent": "pingou"
},
}
class TestPkgdbAdminActionUpdate_Denied(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when a request for a new branch/package is
**denied/blocked**.
"""
expected_title = "pkgdb.admin.action.status.update"
expected_subti = ("pingou changed pingou's branch request for rpms/R-Biobase "
"in epel7 from Awaiting Review to Denied "
"with message: "
"This package should not be branched for EPEL7")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-Biobase'])
expected_usernames = set(['pingou'])
expected_objects = set(['action/2/status/R-Biobase/epel7/pingou'])
msg = {
"i": 1,
"timestamp": 1421830060,
"msg_id": "2015-1acdeda2-e571-4071-a893-cc2b7ba46b02",
"topic": "org.fedoraproject.dev.pkgdb.admin.action.status.update",
"msg": {
"action": {
"info": {},
"status": "Denied",
"package": {
"status": "Approved",
"upstream_url": "http://bioconductor.org/packages/release/bioc/html/Biobase.html",
"monitor": False,
"description": "Base functions for Bioconductor (bioconductor.org). Biobase provides\nfunctions that are needed by many other Bioconductor packages or which\nreplace R functions.",
"summary": "Base functions for Bioconductor",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-Biobase"
},
"date_updated": 1421830060.0,
"collection": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"user": "pingou",
"action": "request.branch",
"date_created": 1421227282.0,
"message": "This package should not be branched for EPEL7",
"id": 2
},
"old_status": "Awaiting Review",
"new_status": "Denied",
"agent": "pingou"
}
}
class TestPkgdbCritpathUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when an admin **updates the critpath flag on
a package**.
"""
expected_title = "pkgdb.package.critpath.update"
expected_subti = ("pingou set the critpath flag on the "
"rpms/openbox package (f21)")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['openbox'])
expected_usernames = set(['pingou'])
expected_objects = set(['openbox/critpath'])
msg = {
"msg_id": "2014-dbb1c4d3-2ffa-4212-9daa-1479bf11e8a4",
"source_name": "datanommer",
"source_version": "0.6.4",
"timestamp": 1408557412.0,
"topic": "org.fedoraproject.prod.pkgdb.package.critpath.update",
"i": 35,
"msg": {
"agent": "pingou",
"branches": [
"f21"
],
"critpath": True,
"package": {
"acls": [
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
}
],
"collection": {
"branchname": "FC-5",
"dist_tag": ".fc5",
"koji_name": None,
"name": "Fedora",
"status": "EOL",
"version": "5"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1400071632.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
}
],
"collection": {
"branchname": "FC-4",
"dist_tag": ".fc4",
"koji_name": None,
"name": "Fedora",
"status": "EOL",
"version": "4"
},
"critpath": False,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1400071632.0
},
{
"acls": [
{
"acl": "watchcommits",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "approveacls",
"fas_name": "mlichvar",
"status": "Approved"
},
{
"acl": "watchbugzilla",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "commit",
"fas_name": "cwickert",
"status": "Approved"
},
{
"acl": "watchcommits",
"fas_name": "athmane",
"status": "Obsolete"
},
{
"acl": "watchbugzilla",
"fas_name": "athmane",
"status": "Obsolete"
}
],
"collection": {
"branchname": "f21",
"dist_tag": ".fc21",
"koji_name": "f21",
"name": "Fedora",
"status": "Under Development",
"version": "21"
},
"critpath": True,
"package": {
"acls": [],
"creation_date": 1400070978.0,
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
},
"point_of_contact": "mlichvar",
"status": "Approved",
"status_change": 1408557402.0
}
],
"creation_date": 1400070978.0,
"description": "Openbox is a window manager designed ...",
"name": "openbox",
"review_url": None,
"status": "Approved",
"summary": "A highly configurable and "
"standards-compliant X11 window manager",
"upstream_url": None
}
},
}
def setUp(self):
super(TestPkgdbCritpathUpdate, self).setUp()
self.config['namespace'] = 'docker'
class TestPkgdbPackageBranchNewCustomNamespace(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when a **new branch** is created for a
package.
"""
expected_title = "pkgdb.package.branch.new"
expected_subti = ("pingou created the branch 'epel7' for the package "
"'docker/R-BSgenome'")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-BSgenome'])
expected_usernames = set(['pingou'])
expected_objects = set(['R-BSgenome/epel7/new'])
msg = {
"i": 1,
"timestamp": 1408957258,
"msg_id": "2014-645038a7-1f95-4a81-aa68-489c0ae55803",
"topic": "org.fedoraproject.dev.pkgdb.package.branch.new",
"msg": {
"package_listing": {
"status": "Approved",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome",
"namespace": "docker",
},
"point_of_contact": "pingou",
"collection": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"critpath": False,
"status_change": 1408950057.0
},
"agent": "pingou",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome",
"namespace": "docker",
}
}
}
def setUp(self):
super(TestPkgdbPackageBranchNewCustomNamespace, self).setUp()
self.config['namespace'] = 'docker'
class TestPkgdbPackageBranchNew(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when a **new branch** is created for a
package.
"""
expected_title = "pkgdb.package.branch.new"
expected_subti = ("pingou created the branch 'epel7' for the package "
"'rpms/R-BSgenome'")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['R-BSgenome'])
expected_usernames = set(['pingou'])
expected_objects = set(['R-BSgenome/epel7/new'])
msg = {
"i": 1,
"timestamp": 1408957258,
"msg_id": "2014-645038a7-1f95-4a81-aa68-489c0ae55803",
"topic": "org.fedoraproject.dev.pkgdb.package.branch.new",
"msg": {
"package_listing": {
"status": "Approved",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome",
"namespace": "rpms",
},
"point_of_contact": "pingou",
"collection": {
"status": "Under Development",
"dist_tag": ".el7",
"koji_name": "epel7",
"name": "Fedora EPEL",
"version": "7",
"branchname": "epel7"
},
"critpath": False,
"status_change": 1408950057.0
},
"agent": "pingou",
"package": {
"status": "Approved",
"upstream_url": None,
"description": None,
"summary": "Infrastructure shared by all the "
"Biostrings-based genome",
"acls": [],
"creation_date": 1400063778.0,
"review_url": None,
"name": "R-BSgenome",
"namespace": "rpms",
}
}
}
class TestPkgdbPackageMonitorUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone changes the
`monitoring <https://fedoraproject.org/wiki/Upstream_release_monitoring>`_
status of a package.
"""
expected_title = "pkgdb.package.monitor.update"
expected_subti = ("pingou set the monitor flag of rpms/guake to False")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['guake/monitor/false'])
msg = {
"username": "pingou",
"i": 3,
"timestamp": 1412957736,
"msg_id": "2014-905aaa3c-483d-4923-95f7-56a8da38da62",
"topic": "org.fedoraproject.dev.pkgdb.package.monitor.update",
"msg": {
"status": False,
"agent": "pingou",
"package": {
"status": "Approved",
"upstream_url": "http://www.guake.org/",
"description": "Guake is a drop-down terminal for Gnome Desktop Environment,\nso you just need to press a key to invoke him,\nand press again to hide.",
"summary": "Drop-down terminal for GNOME",
"acls": [],
"creation_date": 1397204290.0,
"review_url": None,
"name": "guake"
}
}
}
class TestPkgdbPackageUnretireRequest(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone asks that a package is
**unretired**.
"""
expected_title = "pkgdb.package.unretire.request"
expected_subti = ("moceap asks that rpms/netbeans-platform8 be unretired on "
"master")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"360e1873c56312ea5866123f5ffaf4e07d419570b03af7f475c0d20c7501db06"
"?s=64&d=retro")
expected_packages = set(['netbeans-platform8'])
expected_usernames = set(['moceap'])
expected_objects = set(['netbeans-platform8/unretire/master'])
msg = {
'i': 1,
'timestamp': 1427823120,
'msg_id': '2015-bb28a398-e638-4509-9fa0-57d41c2ae0a4',
'topic': 'org.fedoraproject.prod.pkgdb.package.unretire.request',
'msg': {
'collection': {
'status': 'UnderDevelopment',
'dist_tag': '.fc23',
'koji_name': 'rawhide',
'name': 'Fedora',
'version': 'devel',
'branchname': 'master'
},
'agent': 'moceap',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
{
'status': 'Retired',
'point_of_contact': 'orphan',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'UnderDevelopment',
'dist_tag': '.fc23',
'koji_name': 'rawhide',
'name': 'Fedora',
'version': 'devel',
'branchname': 'master'
},
'critpath': False,
'status_change': 1400071169.0
},
{
'status': 'Approved',
'point_of_contact': 'victorv',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc10',
'koji_name': 'dist-f10',
'name': 'Fedora',
'version': '10',
'branchname': 'f10'
},
'acls': [
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchcommits'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchbugzilla'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'commit'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'approveacls'
}
],
'critpath': False,
'status_change': 1400071253.0
},
{
'status': 'Approved',
'point_of_contact': 'victorv',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc11',
'koji_name': 'dist-f11',
'name': 'Fedora',
'version': '11',
'branchname': 'f11'
},
'acls': [
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchcommits'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'watchbugzilla'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'commit'
},
{
'fas_name': 'victorv',
'status': 'Approved',
'acl': 'approveacls'
}
],
'critpath': False,
'status_change': 1400071427.0
},
{
'status': 'Orphaned',
'point_of_contact': 'orphan',
'package': {
'status': 'Approved',
'upstream_url': None,
'monitor': False,
'summary': 'NetBeansPlatform8',
'name': 'netbeans-platform8',
'acls': [
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
},
'collection': {
'status': 'EOL',
'dist_tag': '.fc12',
'koji_name': 'dist-f12',
'name': 'Fedora',
'version': '12',
'branchname': 'f12'
},
'critpath': False,
'status_change': 1400071659.0
}
],
'creation_date': 1400070978.0,
'review_url': None,
'description': 'NetBeansPlatformisaframeworkfordevelopmentof\nRichClientSwingApplications.Itcontainspowerful\nmodulesystemandasetofmodulesprovidingvarious\nfunctionalitiesneededforsimplificationof\ndevelopmentofmodulardesktopapplications.'
}
}
}
class TestPkgdbPackageKoscheiUpdate(Base):
""" The Fedora `Package DB <https://admin.fedoraproject.org/pkgdb>`_
publishes messages like these when someone changes the
`koschei <https://apps.fedoraproject.org/koschei>`_ status of a package.
"""
expected_title = "pkgdb.package.koschei.update"
expected_subti = ("pingou set the koschei monitoring flag of rpms/guake to True")
expected_icon = ("https://apps.fedoraproject.org/packages/images/icons/"
"package_128x128.png")
expected_secondary_icon = (
"https://seccdn.libravatar.org/avatar/"
"01fe73d687f4db328da1183f2a1b5b22962ca9d9c50f0728aafeac974856311c"
"?s=64&d=retro")
expected_packages = set(['guake'])
expected_usernames = set(['pingou'])
expected_objects = set(['guake/koschei/true'])
msg = {
'username': u'pierrey',
'i': 3,
'timestamp': 1435313134,
'msg_id': u'2015-7d0ecbd6-6892-4b34-98ff-b212d1fef74e',
'topic': u'org.fedoraproject.dev.pkgdb.package.koschei.update',
'msg': {
'status': True,
'agent': u'pingou',
'package': {
'status': u'Approved',
'upstream_url': u'http: //www.guake.org/',
'koschei_monitor': True,
'monitor': False,
'summary': u'Drop-downterminalforGNOME',
'name': u'guake',
'acls': [
],
'creation_date': 1400063778.0,
'review_url': None,
'description': 'Guake is a drop-down terminal for Gnome'
}
}
}
add_doc(locals())
if __name__ == '__main__':
unittest.main()
| fedora-infra/fedmsg_meta_fedora_infrastructure | fedmsg_meta_fedora_infrastructure/tests/pkgdb.py | Python | lgpl-2.1 | 88,401 | [
"Bioconductor"
] | e930f4e61293bcfc3b25391a91238ea0388524863b8edcae33667ad551ef2391 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the "test" module for the validate_bom script.
It is responsible for coordinating and running the integration tests.
The test catalog is read from --test_profiles (default all_tests.yaml).
There are two parts to the catalog: "aliases" and "tests".
The "tests" is a dictionary of tests. Each entry is keyed by the name of the
test. A test has the following structure:
test_name:
requires:
configuration:
<commandline option>: <value>
services: [<microservice name>]
quota:
<resource>: <uses>
api: <primary microservice>
args:
alias: [<alias name>]
<command line flag>: <value>
The test_name.requires specifies the requirements in order to run the test.
If a requirement is not satisfied, the test will be skipped.
The test_name.requires.configuration specifies expected options and values.
These are the same names as parameters to the validate_bom__main executable.
Typically this is used to guard a test for a particular configuration (e.g.
dont test against a platform if the platform was not enabled in the
deployment).
The test_name.requires.services is a list of services that the test requires
either directly or indirectly. This is used to ensure that the services are
ready before running the test. If the service is alive but not healthy then
the test will be failed automatically without even running it (provided it
wouldnt have been skipped).
The test_name.api is used to specify the primary microservice that the test
uses. This is used to determine which port to pass to the test since the remote
ports are forwarded to unused local ports known only to this test controller.
The test_name.args are the commandline arguments to pass to the test.
The names of the arguments are the test's argument names without the
prefixed '--'. If the value begins with a '$' then the remaining value
refers to the name of an option whose value should become the argument.
A special argument "aliases" is a list of aliases. These are names that
match the key of an entry in the "aliases" part of the file where all the
name/value pairs defined for the alias are bulk added as arguments.
The test_name.quota is used to rate-limit test execution where tests are
sensitive to resource costs. Arbitrary names can be limited using
--test_quota. The controller will use this as a semaphore to rate-limit
test execution for these resources. Unrestricted resources wont rate-limit.
If the cost bigger than the total semaphore capacity then the test will
be given all the quota once all is available.
There is an overall rate-limiting semaphore on --test_concurrency for
how many tests can run at a time. This is enforced at the point of execution,
after all the setup and filtering has taken place.
"""
# pylint: disable=broad-except
from multiprocessing.pool import ThreadPool
import atexit
import collections
import logging
import math
import os
import re
import subprocess
import socket
import threading
import time
import traceback
import yaml
try:
from urllib2 import urlopen, HTTPError, URLError
except ImportError:
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from buildtool import (
add_parser_argument,
determine_subprocess_outcome_labels,
check_subprocess,
check_subprocesses_to_logfile,
raise_and_log_error,
ConfigError,
ResponseError,
TimeoutError,
UnexpectedError)
from validate_bom__deploy import replace_ha_services
ForwardedPort = collections.namedtuple('ForwardedPort', ['child', 'port'])
def _unused_port():
"""Find a port that is not currently in use."""
# pylint: disable=unused-variable
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
addr, port = sock.getsockname()
sock.close()
return port
class QuotaTracker(object):
"""Manages quota for individual resources.
Note that this quota tracker is purely logical. It does not relate to the
real world. Others may be using the actual quota we have. This is only
regulating the test's use of the quota.
"""
MAX_QUOTA_METRIC_NAME = 'ResourceQuotaMax'
FREE_QUOTA_METRIC_NAME = 'ResourceQuotaAvailable'
INSUFFICIENT_QUOTA_METRIC_NAME = 'ResourceQuotaShortage'
def __init__(self, max_counts, metrics):
"""Constructor.
Args:
max_counts: [dict] The list of resources and quotas to manage.
"""
self.__counts = dict(max_counts)
self.__max_counts = dict(max_counts)
self.__condition_variable = threading.Condition()
self.__metrics = metrics
for name, value in max_counts.items():
labels = {'resource': name}
self.__metrics.set(self.MAX_QUOTA_METRIC_NAME, labels, value)
self.__metrics.set(self.FREE_QUOTA_METRIC_NAME, labels, value)
def acquire_all_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe and will block until it can be satisified.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired.
"""
got = None
with self.__condition_variable:
got = self.acquire_all_or_none_unsafe(who, quota)
while got is None:
logging.info('"%s" waiting on quota %s', who, quota)
self.__condition_variable.wait()
got = self.acquire_all_or_none_unsafe(who, quota)
return got
def acquire_all_or_none_safe(self, who, quota):
"""Acquire the desired quota, if any.
This is thread-safe, however will return None rather than block.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
with self.__condition_variable:
return self.acquire_all_or_none_unsafe(who, quota)
def acquire_all_or_none_unsafe(self, who, quota):
"""Acquire the desired quota, if any.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is asking, for logging purposes.
quota: [dict] The desired quota for each keyed resource, if any.
Returns:
The quota acquired if successful, or None if not.
"""
if not quota:
return {}
logging.info('"%s" attempting to acquire quota %s', who, quota)
acquired = {}
have_all = True
for key, value in quota.items():
got = self.__acquire_resource_or_none(key, value)
if not got:
have_all = False # Be lazy so we can record all the missing quota
else:
acquired[key] = got
if have_all:
return acquired
self.release_all_unsafe(who, acquired)
return None
def release_all_safe(self, who, quota):
"""Release all the resource quota.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
with self.__condition_variable:
self.release_all_unsafe(who, quota)
self.__condition_variable.notify_all()
def release_all_unsafe(self, who, quota):
"""Release all the resource quota.
This is not thread-safe so should be called while locked.
Args:
who: [string] Who is releasing, for logging purposes.
quota: [dict] The non-None result from an acquire_all* method.
"""
if not quota:
return
logging.debug('"%s" releasing quota %s', who, quota)
for key, value in quota.items():
self.__release_resource(key, value)
def __acquire_resource_or_none(self, name, count):
"""Attempt to acquire some amount of quota.
Args:
name: [string] The name of the resource we're acquiring.
count: [int] The amount of the resource
Returns:
The amount we were given. This is either all or none. If non-zero
but less than we asked for, then it gave us the max quota it has.
In order for this to be the case, it must have all the quota available.
Otherwise it will return 0.
"""
have = self.__counts.get(name)
if have is None:
return count
if have >= count:
self.__counts[name] = have - count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
return count
max_count = self.__max_counts[name]
if have == max_count:
logging.warning('Quota %s has a max of %d but %d is desired.'
' Acquiring all the quota as a best effort.',
name, max_count, count)
self.__counts[name] = 0
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, 0)
return have
logging.warning('Quota %s has %d remaining, but %d are needed.'
' Rejecting the request for now.',
name, have, count)
self.__metrics.inc_counter(
self.INSUFFICIENT_QUOTA_METRIC_NAME, {'resource': name},
amount=count - have)
return 0
def __release_resource(self, name, count):
"""Restores previously acquired resource quota."""
have = self.__counts.get(name, None)
if have is not None:
self.__counts[name] = have + count
self.__metrics.set(
self.FREE_QUOTA_METRIC_NAME, {'resource': name}, self.__counts[name])
class ValidateBomTestController(object):
"""The test controller runs integration tests against a deployment."""
@property
def test_suite(self):
"""Returns the main test suite loaded from --test_suite."""
return self.__test_suite
@property
def options(self):
"""The configuration options."""
return self.__deployer.options
@property
def passed(self):
"""Returns the passed tests and reasons."""
return self.__passed
@property
def failed(self):
"""Returns the failed tests and reasons."""
return self.__failed
@property
def skipped(self):
"""Returns the skipped tests and reasons."""
return self.__skipped
@property
def exit_code(self):
"""Determine final exit code for all tests."""
return -1 if self.failed else 0
def __close_forwarded_ports(self):
for forwarding in self.__forwarded_ports.values():
try:
forwarding[0].kill()
except Exception as ex:
logging.error('Error terminating child: %s', ex)
def __collect_gce_quota(self, project, region,
project_percent=100.0, region_percent=100.0):
project_info_json = check_subprocess('gcloud compute project-info describe'
' --format yaml'
' --project %s' % project)
project_info = yaml.safe_load(project_info_json)
project_quota = {'gce_global_%s' % info['metric']:
int(max(1, math.floor(
project_percent * (info['limit'] - info['usage']))))
for info in project_info['quotas']}
region_info_json = check_subprocess('gcloud compute regions describe'
' --format yaml'
' %s' % region)
region_info = yaml.safe_load(region_info_json)
region_quota = {
'gce_region_%s' % info['metric']: int(max(
1, math.floor(region_percent * (info['limit'] - info['usage']))))
for info in region_info['quotas']
}
return project_quota, region_quota
def __init__(self, deployer):
options = deployer.options
quota_spec = {}
if options.google_account_project:
project_quota, region_quota = self.__collect_gce_quota(
options.google_account_project, options.test_gce_quota_region,
project_percent=options.test_gce_project_quota_factor,
region_percent=options.test_gce_region_quota_factor)
quota_spec.update(project_quota)
quota_spec.update(region_quota)
if options.test_default_quota:
quota_spec.update({
parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_default_quota.split(',')]
})
if options.test_quota:
quota_spec.update(
{parts[0].strip(): int(parts[1])
for parts in [entry.split('=')
for entry in options.test_quota.split(',')]})
self.__quota_tracker = QuotaTracker(quota_spec, deployer.metrics)
self.__deployer = deployer
self.__lock = threading.Lock()
self.__passed = [] # Resulted in success
self.__failed = [] # Resulted in failure
self.__skipped = [] # Will not run at all
with open(options.test_profiles, 'r') as fd:
self.__test_suite = yaml.safe_load(fd)
self.__extra_test_bindings = (
self.__load_bindings(options.test_extra_profile_bindings)
if options.test_extra_profile_bindings
else {}
)
num_concurrent = len(self.__test_suite.get('tests')) or 1
num_concurrent = int(min(num_concurrent,
options.test_concurrency or num_concurrent))
self.__semaphore = threading.Semaphore(num_concurrent)
# dictionary of service -> ForwardedPort
self.__forwarded_ports = {}
atexit.register(self.__close_forwarded_ports)
# Map of service names to native ports.
self.__service_port_map = {
# These are critical to most tests.
'clouddriver': 7002,
'clouddriver-caching': 7002,
'clouddriver-rw': 7002,
'clouddriver-ro': 7002,
'gate': 8084,
'front50': 8080,
# Some tests needed these too.
'orca': 8083,
'rosco': 8087,
'igor': 8088,
'echo': 8089,
'echo-scheduler': 8089,
'echo-worker': 8089
}
def __replace_ha_api_service(self, service, options):
transform_map = {}
if options.ha_clouddriver_enabled:
transform_map['clouddriver'] = 'clouddriver-rw'
if options.ha_echo_enabled:
transform_map['echo'] = ['echo-worker']
return transform_map.get(service, service)
def __load_bindings(self, path):
with open(path, 'r') as stream:
content = stream.read()
result = {}
for line in content.split('\n'):
match = re.match('^([a-zA-Z][^=])+=(.*)', line)
if match:
result[match.group(1).strip()] = match.group(2).strip()
def __forward_port_to_service(self, service_name):
"""Forward ports to the deployed service.
This is private to ensure that it is called with the lock.
The lock is needed to mitigate a race condition. See the
inline comment around the Popen call.
"""
local_port = _unused_port()
remote_port = self.__service_port_map[service_name]
command = self.__deployer.make_port_forward_command(
service_name, local_port, remote_port)
logging.info('Establishing connection to %s with port %d',
service_name, local_port)
# There seems to be an intermittent race condition here.
# Not sure if it is gcloud or python.
# Locking the individual calls seems to work around it.
#
# We dont need to lock because this function is called from within
# the lock already.
logging.debug('RUNNING %s', ' '.join(command))
# Redirect stdout to prevent buffer overflows (at least in k8s)
# but keep errors for failures.
class KeepAlive(threading.Thread):
def run(self):
while True:
try:
logging.info('KeepAlive %s polling', service_name)
got = urlopen('http://localhost:{port}/health'
.format(port=local_port))
logging.info('KeepAlive %s -> %s', service_name, got.getcode())
except Exception as ex:
logging.info('KeepAlive %s -> %s', service_name, ex)
time.sleep(20)
if self.options.deploy_spinnaker_type == 'distributed':
# For now, distributed deployments are k8s
# and K8s port forwarding with kubectl requires keep alive.
hack = KeepAlive()
hack.setDaemon(True)
hack.start()
logfile = os.path.join(
self.options.output_dir,
'port_forward_%s-%d.log' % (service_name, os.getpid()))
stream = open(logfile, 'w')
stream.write(str(command) + '\n\n')
logging.debug('Logging "%s" port forwarding to %s', service_name, logfile)
child = subprocess.Popen(
command,
stderr=stream,
stdout=None)
return ForwardedPort(child, local_port)
def build_summary(self):
"""Return a summary of all the test results."""
def append_list_summary(summary, name, entries):
"""Write out all the names from the test results."""
if not entries:
return
summary.append('{0}:'.format(name))
for entry in entries:
summary.append(' * {0}'.format(entry[0]))
options = self.options
if not options.testing_enabled:
return 'No test output: testing was disabled.', 0
summary = ['\nSummary:']
append_list_summary(summary, 'SKIPPED', self.skipped)
append_list_summary(summary, 'PASSED', self.passed)
append_list_summary(summary, 'FAILED', self.failed)
num_skipped = len(self.skipped)
num_passed = len(self.passed)
num_failed = len(self.failed)
summary.append('')
if num_failed:
summary.append(
'FAILED {0} of {1}, skipped {2}'.format(
num_failed, (num_failed + num_passed), num_skipped))
else:
summary.append('PASSED {0}, skipped {1}'.format(num_passed, num_skipped))
return '\n'.join(summary)
def wait_on_service(self, service_name, port=None, timeout=None):
"""Wait for the given service to be available on the specified port.
Args:
service_name: [string] The service name we we are waiting on.
port: [int] The remote port the service is at.
timeout: [int] How much time to wait before giving up.
Returns:
The ForwardedPort entry for this service.
"""
try:
with self.__lock:
forwarding = self.__forwarded_ports.get(service_name)
if forwarding is None:
forwarding = self.__forward_port_to_service(service_name)
self.__forwarded_ports[service_name] = forwarding
except Exception:
logging.exception('Exception while attempting to forward ports to "%s"',
service_name)
raise
timeout = timeout or self.options.test_service_startup_timeout
end_time = time.time() + timeout
logging.info('Waiting on "%s..."', service_name)
if port is None:
port = self.__service_port_map[service_name]
# It seems we have a race condition in the poll
# where it thinks the jobs have terminated.
# I've only seen this happen once.
time.sleep(1)
threadid = hex(threading.current_thread().ident)
logging.info('WaitOn polling %s from thread %s', service_name, threadid)
while forwarding.child.poll() is None:
try:
# localhost is hardcoded here because we are port forwarding.
# timeout=20 is to appease kubectl port forwarding, which will close
# if left idle for 30s
urlopen('http://localhost:{port}/health'
.format(port=forwarding.port),
timeout=20)
logging.info('"%s" is ready on port %d | %s',
service_name, forwarding.port, threadid)
return forwarding
except HTTPError as error:
logging.warning('%s got %s. Ignoring that for now.',
service_name, error)
return forwarding
except (URLError, Exception) as error:
if time.time() >= end_time:
logging.error(
'Timing out waiting for %s | %s', service_name, threadid)
raise_and_log_error(TimeoutError(service_name, cause=service_name))
time.sleep(2.0)
logging.error('It appears %s is no longer available.'
' Perhaps the tunnel closed.',
service_name)
raise_and_log_error(
ResponseError('It appears that {0} failed'.format(service_name),
server='tunnel'))
def run_tests(self):
"""The actual controller that coordinates and runs the tests.
This attempts to process all the tests concurrently across
seperate threads, where each test will:
(1) Determine whether or not the test is a candidate
(passes the --test_include / --test_exclude criteria)
(2) Evaluate the test's requirements.
If the configuration requirements are not met then SKIP the test.
(a) Attempt to tunnel each of the service tests, sharing existing
tunnels used by other tests. The tunnels allocate unused local
ports to avoid potential conflict within the local machine.
(b) Wait for the service to be ready. Ideally this means it is
healthy, however we'll allow unhealthy services to proceed
as well and let those tests run and fail in case they are
testing unhealthy service situations.
(c) If there is an error or the service takes too long then
outright FAIL the test.
(3) Acquire the quota that the test requires.
* If the quota is not currently available, then block the
thread until it is. Since each test is in its own thread, this
will not impact other tests.
* Quota are only internal resources within the controller.
This is used for purposes of rate limiting, etc. It does not
coordinate with the underlying platforms.
* Quota is governed with --test_quota. If a test requests
a resource without a known quota, then the quota is assumed
to be infinite.
(4) Grab a semaphore used to rate limit running tests.
This is controlled by --test_concurrency, which defaults to all.
(5) Run the test.
(6) Release the quota and semaphore to unblock other tests.
(7) Record the outcome as PASS or FAIL
If an exception is thrown along the way, the test will automatically
be recorded as a FAILURE.
Returns:
(#passed, #failed, #skipped)
"""
options = self.options
if not options.testing_enabled:
logging.info('--testing_enabled=false skips test phase entirely.')
return 0, 0, 0
all_test_profiles = self.test_suite['tests']
logging.info(
'Running tests (concurrency=%s).',
options.test_concurrency or 'infinite')
thread_pool = ThreadPool(len(all_test_profiles))
thread_pool.map(self.__run_or_skip_test_profile_entry_wrapper,
all_test_profiles.items())
thread_pool.terminate()
logging.info('Finished running tests.')
return len(self.__passed), len(self.__failed), len(self.__skipped)
def __run_or_skip_test_profile_entry_wrapper(self, args):
"""Outer wrapper for running tests
Args:
args: [dict entry] The name and spec tuple from the mapped element.
"""
test_name = args[0]
spec = args[1]
metric_labels = {'test_name': test_name, 'skipped': ''}
try:
self.__run_or_skip_test_profile_entry(test_name, spec, metric_labels)
except Exception as ex:
logging.error('%s threw an exception:\n%s',
test_name, traceback.format_exc())
with self.__lock:
self.__failed.append((test_name, 'Caught exception {0}'.format(ex)))
def __record_skip_test(self, test_name, reason, skip_code, metric_labels):
logging.warning(reason)
self.__skipped.append((test_name, reason))
copy_labels = dict(metric_labels)
copy_labels['skipped'] = skip_code
copy_labels['success'] = 'Skipped'
self.__deployer.metrics.observe_timer(
'RunTestScript' + '_Outcome', copy_labels, 0.0)
def __run_or_skip_test_profile_entry(self, test_name, spec, metric_labels):
"""Runs a test from within the thread-pool map() function.
Args:
test_name: [string] The name of the test.
spec: [dict] The test profile specification.
"""
options = self.options
if not re.search(options.test_include, test_name):
reason = ('Skipped test "{name}" because it does not match explicit'
' --test_include criteria "{criteria}".'
.format(name=test_name, criteria=options.test_include))
self.__record_skip_test(test_name, reason,
'NotExplicitInclude', metric_labels)
return
if options.test_exclude and re.search(options.test_exclude, test_name):
reason = ('Skipped test "{name}" because it matches explicit'
' --test_exclude criteria "{criteria}".'
.format(name=test_name, criteria=options.test_exclude))
self.__record_skip_test(test_name, reason,
'ExplicitExclude', metric_labels)
return
# This can raise an exception
self.run_test_profile_helper(test_name, spec, metric_labels)
def validate_test_requirements(self, test_name, spec, metric_labels):
"""Determine whether or not the test requirements are satisfied.
If not, record the reason a skip or failure.
This may throw exceptions, which are immediate failure.
Args:
test_name: [string] The name of the test.
spec: [dict] The profile specification containing requirements.
This argument will be pruned as values are consumed from it.
Returns:
True if requirements are satisifed, False if not.
"""
if not 'api' in spec:
raise_and_log_error(
UnexpectedError('Test "{name}" is missing an "api" spec.'.format(
name=test_name)))
requires = spec.pop('requires', {})
configuration = requires.pop('configuration', {})
our_config = vars(self.options)
for key, value in configuration.items():
if key not in our_config:
message = ('Unknown configuration key "{0}" for test "{1}"'
.format(key, test_name))
raise_and_log_error(ConfigError(message))
if value != our_config[key]:
reason = ('Skipped test {name} because {key}={want} != {have}'
.format(name=test_name, key=key,
want=value, have=our_config[key]))
with self.__lock:
self.__record_skip_test(test_name, reason,
'IncompatableConfig', metric_labels)
return False
services = set(replace_ha_services(
requires.pop('services', []), self.options))
services.add(self.__replace_ha_api_service(
spec.pop('api'), self.options))
if requires:
raise_and_log_error(
ConfigError('Unexpected fields in {name}.requires: {remaining}'
.format(name=test_name, remaining=requires)))
if spec:
raise_and_log_error(
ConfigError('Unexpected fields in {name} specification: {remaining}'
.format(name=test_name, remaining=spec)))
def wait_on_services(services):
thread_pool = ThreadPool(len(services))
thread_pool.map(self.wait_on_service, services)
thread_pool.terminate()
self.__deployer.metrics.track_and_time_call(
'WaitingOnServiceAvailability',
metric_labels, self.__deployer.metrics.default_determine_outcome_labels,
wait_on_services, services)
return True
def add_extra_arguments(self, test_name, args, commandline):
"""Add extra arguments to the commandline.
Args:
test_name: [string] Name of test specifying the options.
args: [dict] Specification of additioanl arguments to pass.
Each key is the name of the argument, the value is the value to pass.
If the value is preceeded with a '$' then it refers to the value of
an option. If the value is None then just add the key without an arg.
commandline: [list] The list of command line arguments to append to.
"""
option_dict = vars(self.options)
aliases_dict = self.test_suite.get('aliases', {})
for key, value in args.items():
if isinstance(value, (int, bool)):
value = str(value)
if key == 'alias':
for alias_name in value:
if not alias_name in aliases_dict:
raise_and_log_error(ConfigError(
'Unknown alias "{name}" referenced in args for "{test}"'
.format(name=alias_name, test=test_name)))
self.add_extra_arguments(
test_name, aliases_dict[alias_name], commandline)
continue
elif value is None:
pass
elif value.startswith('$'):
option_name = value[1:]
if option_name in option_dict:
value = option_dict[option_name] or '""'
elif option_name in self.__extra_test_bindings:
value = self.__extra_test_bindings[option_name] or '""'
elif option_name in os.environ:
value = os.environ[option_name]
else:
raise_and_log_error(ConfigError(
'Unknown option "{name}" referenced in args for "{test}"'
.format(name=option_name, test=test_name)))
if value is None:
commandline.append('--' + key)
else:
commandline.extend(['--' + key, value])
def make_test_command_or_none(self, test_name, spec, metric_labels):
"""Returns the command to run the test, or None to skip.
Args:
test_name: The test to run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
Returns:
The command line argument list, or None to skip.
This may throw an exception if the spec is invalid.
This does not consider quota, which is checked later.
"""
options = self.options
microservice_api = self.__replace_ha_api_service(spec.get('api'), options)
test_rel_path = spec.pop('path', None) or os.path.join(
'citest', 'tests', '{0}.py'.format(test_name))
args = spec.pop('args', {})
if not self.validate_test_requirements(test_name, spec, metric_labels):
return None
testing_root_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'testing'))
test_path = os.path.join(testing_root_dir, test_rel_path)
citest_log_dir = os.path.join(options.log_dir, 'citest_logs')
if not os.path.exists(citest_log_dir):
try:
os.makedirs(citest_log_dir)
except:
# check for race condition
if not os.path.exists(citest_log_dir):
raise
command = [
'python', test_path,
'--log_dir', citest_log_dir,
'--log_filebase', test_name,
'--native_host', 'localhost',
'--native_port', str(self.__forwarded_ports[microservice_api].port)
]
if options.test_stack:
command.extend(['--test_stack', str(options.test_stack)])
self.add_extra_arguments(test_name, args, command)
return command
def __execute_test_command(self, test_name, command, metric_labels):
metrics = self.__deployer.metrics
logging.debug('Running %s', ' '.join(command))
def run_and_log_test_script(command):
logfile = os.path.join(self.options.output_dir, 'citest_logs',
'%s-%s.console.log' % (test_name, os.getpid()))
logging.info('Logging test "%s" to %s...', test_name, logfile)
try:
check_subprocesses_to_logfile('running test', logfile, [command])
retcode = 0
logging.info('Test %s PASSED -- see %s', test_name, logfile)
except:
retcode = -1
logging.info('Test %s FAILED -- see %s', test_name, logfile)
return retcode, logfile
return metrics.track_and_time_call(
'RunTestScript',
metric_labels, determine_subprocess_outcome_labels,
run_and_log_test_script, ' '.join(command))
def run_test_profile_helper(self, test_name, spec, metric_labels):
"""Helper function for running an individual test.
The caller wraps this to trap and handle exceptions.
Args:
test_name: The test being run.
spec: The test specification profile.
This argument will be pruned as values are consumed from it.
"""
quota = spec.pop('quota', {})
command = self.make_test_command_or_none(test_name, spec, metric_labels)
if command is None:
return
logging.info('Acquiring quota for test "%s"...', test_name)
quota_tracker = self.__quota_tracker
metrics = self.__deployer.metrics
acquired_quota = metrics.track_and_time_call(
'ResourceQuotaWait',
metric_labels, metrics.default_determine_outcome_labels,
quota_tracker.acquire_all_safe, test_name, quota)
if acquired_quota:
logging.info('"%s" acquired quota %s', test_name, acquired_quota)
execute_time = None
start_time = time.time()
try:
logging.info('Scheduling "%s"...', test_name)
# This will block. Note that we already acquired quota, thus
# we are blocking holding onto that quota. However since we are
# blocked awaiting a thread, nobody else can execute either,
# so it doesnt matter that we might be starving them of quota.
self.__semaphore.acquire(True)
execute_time = time.time()
wait_time = int(execute_time - start_time + 0.5)
if wait_time > 1:
logging.info('"%s" had a semaphore contention for %d secs.',
test_name, wait_time)
logging.info('Executing "%s"...', test_name)
retcode, logfile_path = self.__execute_test_command(
test_name, command, metric_labels)
finally:
logging.info('Finished executing "%s"...', test_name)
self.__semaphore.release()
if acquired_quota:
quota_tracker.release_all_safe(test_name, acquired_quota)
end_time = time.time()
delta_time = int(end_time - execute_time + 0.5)
with self.__lock:
if not retcode:
logging.info('%s PASSED after %d secs', test_name, delta_time)
self.__passed.append((test_name, logfile_path))
else:
logging.info('FAILED %s after %d secs', test_name, delta_time)
self.__failed.append((test_name, logfile_path))
def init_argument_parser(parser, defaults):
"""Add testing related command-line parameters."""
add_parser_argument(
parser, 'test_profiles',
defaults, os.path.join(os.path.dirname(__file__), 'all_tests.yaml'),
help='The path to the set of test profiles.')
add_parser_argument(
parser, 'test_extra_profile_bindings', defaults, None,
help='Path to a file with additional bindings that the --test_profiles'
' file may reference.')
add_parser_argument(
parser, 'test_concurrency', defaults, None, type=int,
help='Limits how many tests to run at a time. Default is unbounded')
add_parser_argument(
parser, 'test_service_startup_timeout', defaults, 300, type=int,
help='Number of seconds to permit services to startup before giving up.')
add_parser_argument(
parser, 'test_gce_project_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available project quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_region_quota_factor', defaults, 1.0, type=float,
help='Default percentage of available region quota to make available'
' for tests.')
add_parser_argument(
parser, 'test_gce_quota_region', defaults, 'us-central1',
help='GCE Compute Region to gather region quota limits from.')
add_parser_argument(
parser, 'test_default_quota',
defaults, '',
help='Default quota parameters for values used in the --test_profiles.'
' This does not include GCE quota values, which are determined'
' at runtime. These value can be further overriden by --test_quota.'
' These are meant as built-in defaults, where --test_quota as'
' per-execution overriden.')
add_parser_argument(
parser, 'test_quota', defaults, '',
help='Comma-delimited name=value list of quota overrides.')
add_parser_argument(
parser, 'testing_enabled', defaults, True, type=bool,
help='If false then dont run the testing phase.')
add_parser_argument(
parser, 'test_disable', defaults, False, action='store_true',
dest='testing_enabled',
help='DEPRECATED: Use --testing_enabled=false.')
add_parser_argument(
parser, 'test_include', defaults, '.*',
help='Regular expression of tests to run or None for all.')
add_parser_argument(
parser, 'test_exclude', defaults, None,
help='Regular expression of otherwise runnable tests to skip.')
add_parser_argument(
parser, 'test_stack', defaults, None,
help='The --test_stack to pass through to tests indicating which'
' Spinnaker application "stack" to use. This is typically'
' to help trace the source of resources created within the'
' tests.')
add_parser_argument(
parser, 'test_jenkins_job_name', defaults, 'TriggerBake',
help='The Jenkins job name to use in tests.')
def validate_options(options):
"""Validate testing related command-line parameters."""
if not os.path.exists(options.test_profiles):
raise_and_log_error(
ConfigError('--test_profiles "{0}" does not exist.'.format(
options.test_profiles)))
| skim1420/spinnaker | dev/validate_bom__test.py | Python | apache-2.0 | 38,012 | [
"ORCA"
] | 919c4fc8dd3c74043d1945bcef55c4d5a5cebcf11e0ea32fa1eb6efd540200d2 |
# -*- coding: utf-8 -*-
#
# one_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
One neuron example
------------------
This script simulates a neuron driven by a constant external current
and records its membrane potential.
See Also
~~~~~~~~
:doc:`twoneurons`
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Additionally, we set the verbosity to suppress info
# messages and reset the kernel.
# Resetting the kernel allows you to execute the script several
# times in a Python shell without interferences from previous NEST
# simulations. Thus, without resetting the kernel the network status
# including connections between nodes, status of neurons, devices and
# intrinsic time clocks, is kept and influences the next simulations.
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the nodes (neurons and devices) are created using ``Create``.
# We store the returned handles in variables for later reference.
# The ``Create`` function also allow you to create multiple nodes
# e.g. ``nest.Create('iaf_psc_alpha',5)``
# Also default parameters of the model can be configured using ``Create``
# by including a list of parameter dictionaries
# e.g. `nest.Create("iaf_psc_alpha", params=[{'I_e':376.0}])`.
# In this example we will configure these parameters in an additional
# step, which is explained in the third section.
neuron = nest.Create("iaf_psc_alpha")
voltmeter = nest.Create("voltmeter")
###############################################################################
# Third, we set the external current of the neuron.
neuron.I_e = 376.0
###############################################################################
# Fourth, the neuron is connected to the voltmeter. The command
# ``Connect`` has different variants. Plain ``Connect`` just takes the
# handles of pre- and postsynaptic nodes and uses the default values
# for weight and delay. Note that the connection direction for the voltmeter is
# reversed compared to the spike recorder, because it observes the
# neuron instead of receiving events from it. Thus, ``Connect``
# reflects the direction of signal flow in the simulation kernel
# rather than the physical process of inserting an electrode into the
# neuron. The latter semantics is presently not available in NEST.
nest.Connect(voltmeter, neuron)
###############################################################################
# Now we simulate the network using ``Simulate``, which takes the
# desired simulation time in milliseconds.
nest.Simulate(1000.0)
###############################################################################
# Finally, we plot the neuron's membrane potential as a function of
# time and display the plot using pyplot.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| lekshmideepu/nest-simulator | pynest/examples/one_neuron.py | Python | gpl-2.0 | 3,680 | [
"NEURON"
] | fe00445a78708a4afe0c5ef8131300a11fef9d1038b0318243a6642a5f41527d |
import os
import glob
import sys
import copy
import itertools
import logging
import numpy as np
from .utils import stack_files
from astropy.table import Column
from fermipy.utils import get_parameter_limits
def fit_region(gta,modelname,src_name,loge_bounds=None, **kwargs):
skip_opt = kwargs.get('skip_opt',[])
gta.logger.info('Starting Region Fit %s'%(modelname))
lnl0 = -gta.like()
gta.logger.info('%s Model Likelihood: %f'%(modelname,lnl0))
gta.print_params()
if loge_bounds is not None:
gta.set_energy_range(loge_bounds[0],loge_bounds[1])
model0 = { 'SpatialModel' : 'PointSource', 'Index' : 1.5 }
model_pl20 = { 'SpatialModel' : 'PointSource', 'Index' : 2.0 }
model_pl27 = { 'SpatialModel' : 'PointSource', 'Index' : 2.7 }
model3 = { 'SpatialModel' : 'Gaussian', 'Index' : 2.0, 'SpatialWidth' : 0.1 }
model4 = { 'SpatialModel' : 'RadialDisk', 'Index' : 2.0,
'SpatialWidth' : 0.1 * 0.8246211251235321 }
gta.optimize(skip=skip_opt, shape_ts_threshold=9.0)
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
skydir = gta.roi[src_name].skydir
gta.free_sources(False)
gta.free_sources(skydir=skydir,distance=1.5, pars='norm')
gta.free_sources(skydir=skydir,distance=1.0, pars='shape', exclude=diff_sources)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.write_roi(modelname + '_roi', make_plots=True)
gta.print_roi()
gta.print_params()
lnl1 = -gta.like()
gta.logger.info('%s Model Likelihood: %f'%(modelname,lnl1))
gta.logger.info('%s Model Likelihood Delta: %f'%(modelname,lnl1-lnl0))
# TS Maps
maps_model_pl20 = gta.tsmap(modelname, model=model_pl20,
loge_bounds=loge_bounds, make_plots=True)
gta.tsmap(modelname, model=model_pl27,
loge_bounds=loge_bounds, make_plots=True)
maps_model_pl20_nosource = gta.tsmap('%s_nosource'%modelname,
model=model_pl20, exclude=[src_name],
loge_bounds=loge_bounds, make_plots=True)
maps_model_pl27_nosource = gta.tsmap('%s_nosource'%modelname,
model=model_pl27, exclude=[src_name],
loge_bounds=loge_bounds, make_plots=True)
#maps_model4_nosource = gta.tsmap('%s_nosource'%modelname,
# model=model4, exclude=[src_name],
# loge_bounds=loge_bounds, make_plots=True)
gta.residmap(modelname, model=model3,
loge_bounds=loge_bounds, make_plots=True)
# SED Analysis
gta.sed(src_name, outfile=modelname + '_sed_fixed',
prefix=modelname + '_fixed',
make_plots=True)
gta.sed(src_name, outfile=modelname + '_sed',
prefix=modelname,
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_sed_bin4',
prefix=modelname + '_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
psf_syst_scale = np.array([0.05,0.05,0.2])
psf_fnlo = ([3.0,4.0,5.5],list(-1.0*psf_syst_scale))
psf_fnhi = ([3.0,4.0,5.5],list(1.0*psf_syst_scale))
# -----------------------------------------------------------------
# Gaussian Analysis
# -----------------------------------------------------------------
kw = dict(spatial_model='RadialGaussian',
free_radius=1.0, make_tsmap=False)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext',
prefix=modelname + '_gauss',
fit_position=True, free_background=True,
make_plots=True, update=True, **kw)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext_psflo',
prefix=modelname + '_gauss_psflo',
psf_scale_fn=psf_fnlo, **kw)
gta.extension(src_name, outfile=modelname + '_ext_gauss_ext_psfhi',
prefix=modelname + '_gauss_psfhi',
psf_scale_fn=psf_fnhi, **kw)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.print_roi()
gta.print_params()
gta.sed(src_name,outfile=modelname + '_ext_gauss_sed',
prefix=modelname + '_gauss',
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_ext_gauss_sed_bin4',
prefix=modelname + '_gauss_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
gta.write_roi(modelname + '_ext_gauss_roi')
gta.tsmap(modelname + '_ext_gauss', model=model_pl20,
loge_bounds=loge_bounds, make_plots=True)
gta.tsmap(modelname + '_ext_gauss', model=model_pl27,
loge_bounds=loge_bounds, make_plots=True)
# -----------------------------------------------------------------
# Disk Analysis
# -----------------------------------------------------------------
gta.load_roi(modelname + '_roi')
gta.reload_source(src_name)
kw = dict(spatial_model='RadialDisk',
free_radius=1.0, make_tsmap=False)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext',
prefix=modelname + '_disk',
fit_position=True, free_background=True,
make_plots=True, update=True, **kw)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext_psflo',
prefix=modelname + '_disk_psflo',
psf_scale_fn=psf_fnlo, **kw)
gta.extension(src_name, outfile=modelname + '_ext_disk_ext_psfhi',
prefix=modelname + '_disk_psfhi',
psf_scale_fn=psf_fnhi, **kw)
gta.free_source(src_name)
gta.fit()
gta.update_source(src_name,reoptimize=True)
gta.print_roi()
gta.print_params()
gta.sed(src_name,outfile=modelname + '_ext_disk_sed',
prefix=modelname + '_disk',
free_radius=1.0, make_plots=True)
gta.sed(src_name,outfile=modelname + '_ext_disk_sed_bin4',
prefix=modelname + '_disk_bin4', loge_bins=gta.log_energies[::2],
free_radius=1.0, make_plots=True)
gta.write_roi(modelname + '_ext_disk_roi')
gta.load_roi(modelname + '_roi')
gta.reload_source(src_name)
gta.logger.info('Finished Region Fit %s'%(modelname))
def fit_halo_sed(gta,modelname,src_name,halo_width,
halo_index,spatial_model='RadialGaussian',
loge_bounds=None):
gta.logger.info('Starting Halo SED Fit %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 1.0, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13, 'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
gta.load_roi(modelname)
if loge_bounds is not None:
gta.set_energy_range(loge_bounds[0],loge_bounds[1])
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(distance=1.0,pars='norm', exclude=diff_sources)
gta.write_xml(modelname + '_base')
for i, w in enumerate(halo_width):
halo_source_dict['SpatialWidth'] = w
gta.load_xml(modelname + '_base')
gta.add_source(halo_source_name,halo_source_dict,free=True)
# Do one fit with index free
gta.set_parameter(halo_source_name,'Index',-2.0,
update_source=False)
gta.fit()
# SED w/ Index = 2.0
gta.sed(halo_source_name,prefix='%s_%02i'%(modelname,i),
fix_background=False, cov_scale=5.0)
gta.write_roi('%s_halo_gauss_sed_%02i'%(modelname,i),
make_plots=False)
gta.logger.info('Finished Halo SED Fit %s'%(modelname))
def fit_halo_scan(gta, modelname, src_name, halo_width,
halo_index, spatial_model='RadialGaussian',
loge_bounds=None, optimizer='NEWTON'):
gta.logger.info('Starting Halo Scan %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 0.5, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13,
'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
outprefix = '%s_%s'%(modelname,halo_source_name)
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
#gta.load_roi(modelname)
#if loge_bounds is not None:
# gta.set_energy_range(loge_bounds[0],loge_bounds[1])
skydir = gta.roi[src_name].skydir
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(skydir=skydir,distance=1.0,pars='norm',
exclude=diff_sources)
gta.write_xml(modelname + '_base')
halo_tab = gta.roi.create_table([])
halo_tab_idx_free = gta.roi.create_table([])
halo_data = []
halo_data_idx_free = []
for i, w in enumerate(halo_width):
gta.logger.info('Fitting Halo Width %.3f',w)
halo_source_dict['SpatialWidth'] = w
gta.load_xml(modelname + '_base')
gta.add_source(halo_source_name, halo_source_dict, free=True)
# Free Index
gta.free_norm(halo_source_name)
gta.fit(optimizer=optimizer)
gta.sed(halo_source_name, prefix='%s_cov05_%02i'%(modelname,i),
outfile='%s_cov05_%02i_sed'%(outprefix,i),
free_radius=1.0, cov_scale=5.0,
optimizer={'optimizer' : 'MINUIT'},
make_plots=False)
gta.sed(halo_source_name, prefix='%s_cov10_%02i'%(modelname,i),
outfile='%s_cov10_%02i_sed'%(outprefix,i),
free_radius=1.0, cov_scale=10.0,
optimizer={'optimizer' : 'MINUIT'},
make_plots=False)
gta.free_parameter(halo_source_name,'Index')
gta.fit(optimizer=optimizer)
gta.free_parameter(halo_source_name,'Index',False)
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
halo_data_idx_free += [copy.deepcopy(gta.roi[halo_source_name].data)]
gta.roi[halo_source_name].add_to_table(halo_tab_idx_free)
gta.write_roi('%s_%02i'%(outprefix,i),make_plots=False)
gta.print_params(loglevel=logging.DEBUG)
# Scan over fixed index
for j, idx in enumerate(halo_index):
gta.logger.info('Fitting Halo Index %.3f',idx)
model_idx = i*len(halo_index) + j
gta.set_norm(halo_source_name, 0.1, update_source=False)
gta.set_parameter(halo_source_name, 'Index', -1.0*idx,
update_source=False)
gta.fit(update=False, optimizer=optimizer)
gta.print_params(loglevel=logging.DEBUG)
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
ul_flux = get_parameter_limits(gta.roi[halo_source_name]['flux_scan'],
gta.roi[halo_source_name]['loglike_scan'])
ul_eflux = get_parameter_limits(gta.roi[halo_source_name]['eflux_scan'],
gta.roi[halo_source_name]['loglike_scan'])
gta.roi[halo_source_name]['flux_err'] = ul_flux['err']
gta.roi[halo_source_name]['eflux_err'] = ul_eflux['err']
gta.logger.info('%s Halo Width: %6.3f Index: %6.2f TS: %6.2f Flux: %8.4g',
modelname,w,idx,
gta.roi[halo_source_name]['ts'],
gta.roi[halo_source_name]['flux'])
#gta.write_roi('%s_%02i_%02i'%(outprefix,i,j),make_plots=False)
halo_data += [copy.deepcopy(gta.roi[halo_source_name].data)]
gta.roi[halo_source_name].add_to_table(halo_tab)
gta.delete_source(halo_source_name,save_template=False)
np.save(os.path.join(gta.workdir,'%s_data.npy'%outprefix),halo_data)
np.save(os.path.join(gta.workdir,'%s_data_idx_free.npy'%outprefix),
halo_data_idx_free)
tab_halo_width, tab_halo_index = np.meshgrid(halo_width,halo_index,indexing='ij')
halo_tab['halo_width'] = np.ravel(tab_halo_width)
halo_tab['halo_index'] = np.ravel(tab_halo_index)
halo_tab_idx_free['halo_width'] = halo_width
stack_files(sorted(glob.glob(os.path.join(gta.workdir,'%s*cov05*fits'%outprefix))),
os.path.join(gta.workdir,'%s_cov05_sed.fits'%outprefix),
new_cols=[Column(name='halo_width',data=halo_width, unit='deg')])
stack_files(sorted(glob.glob(os.path.join(gta.workdir,'%s*cov10*fits'%outprefix))),
os.path.join(gta.workdir,'%s_cov10_sed.fits'%outprefix),
new_cols=[Column(name='halo_width',data=halo_width, unit='deg')])
halo_tab.write(os.path.join(gta.workdir,'%s_data.fits'%outprefix),
overwrite=True)
halo_tab_idx_free.write(os.path.join(gta.workdir,'%s_data_idx_free.fits'%outprefix),
overwrite=True)
gta.logger.info('Finished Halo Scan %s'%(modelname))
def fit_halo(gta, modelname, src_name,
spatial_model='RadialGaussian',
loge_bounds=None, optimizer='NEWTON'):
gta.logger.info('Starting Halo Fit %s'%(modelname))
halo_source_name = 'halo_' + spatial_model
halo_source_dict = {
'SpectrumType' : 'PowerLaw',
'Index' : { 'value' : 2.0, 'scale' : -1.0, 'min' : 1.0, 'max' : 4.5 },
'Scale' : 1000,
'Prefactor' : { 'value' : 1E-5, 'scale' : 1e-13,
'min' : 1E-5, 'max' : 1E4 },
'SpatialModel' : spatial_model,
'SpatialWidth' : 1.0
}
outprefix = '%s_%s'%(modelname,halo_source_name)
halo_source_dict['ra'] = gta.roi[src_name]['ra']
halo_source_dict['dec'] = gta.roi[src_name]['dec']
# gta.load_roi(modelname)
# if loge_bounds is not None:
# gta.set_energy_range(loge_bounds[0],loge_bounds[1])
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
gta.free_sources(False)
gta.free_sources(distance=1.0,pars='norm',
exclude=diff_sources)
# Find best-fit halo model
halo_source_dict['SpatialWidth'] = 0.1
gta.add_source(halo_source_name,halo_source_dict)
gta.free_norm(halo_source_name)
gta.extension(halo_source_name,update=True,
optimizer={'optimizer' : optimizer},
free_radius=1.0)
# Fit spectrum
gta.free_parameter(halo_source_name,'Index')
gta.fit()
# Re-fit extension
gta.extension(halo_source_name,update=True,
optimizer={'optimizer' : optimizer},
free_radius=1.0)
# Re-fit Spectrum
gta.fit()
gta.update_source(halo_source_name,reoptimize=True,
optimizer={'optimizer' : optimizer})
gta.print_params()
gta.write_roi(outprefix,make_plots=False)
np.save(os.path.join(gta.workdir,'%s_data.npy'%outprefix),
copy.deepcopy(gta.roi[halo_source_name].data))
gta.delete_source(halo_source_name,save_template=False)
gta.logger.info('Finished Halo Fit %s'%(modelname))
| woodmd/haloanalysis | extpipe/fit_funcs.py | Python | bsd-3-clause | 16,120 | [
"Gaussian"
] | 2a6a32fd8b639e2e587cd456534526bf3f5b9fae3b8bb56e271ced20eb7ebc7e |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import copy
from monty.serialization import loadfn # , dumpfn
from pymatgen.command_line.critic2_caller import Critic2Output
from pymatgen.core.structure import Molecule, Structure, FunctionalGroups, Site
from pymatgen.analysis.graphs import *
from pymatgen.analysis.local_env import (
MinimumDistanceNN,
MinimumOKeeffeNN,
OpenBabelNN,
CutOffDictNN,
)
from pymatgen.util.testing import PymatgenTest
try:
import openbabel as ob
except ImportError:
ob = None
try:
import networkx as nx
import networkx.algorithms.isomorphism as iso
except ImportError:
nx = None
__author__ = "Matthew Horton, Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "August 2017"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
class StructureGraphTest(PymatgenTest):
def setUp(self):
self.maxDiff = None
# trivial example, simple square lattice for testing
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
self.square_sg = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# TODO: decorating still fails because the structure graph gives a CN of 8 for this square lattice
# self.square_sg.decorate_structure_with_ce_info()
# body-centered square lattice for testing
structure = Structure(
Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
self.bc_square_sg = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# body-centered square lattice for testing
# directions reversed, should be equivalent to as bc_square
structure = Structure(
Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
self.bc_square_sg_r = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg_r.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, -1, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(0, -1, 0), to_jimage=(0, 0, 0))
# MoS2 example, structure graph obtained from critic2
# (not ground state, from mp-1023924, single layer)
stdout_file = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/critic2/MoS2_critic2_stdout.txt",
)
with open(stdout_file, "r") as f:
reference_stdout = f.read()
self.structure = Structure.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/critic2/MoS2.cif",
)
)
c2o = Critic2Output(self.structure, reference_stdout)
self.mos2_sg = c2o.structure_graph(
edge_weight="bond_length", edge_weight_units="Å", include_critical_points=False
)
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
self.NiO = Structure.from_spacegroup(
225, latt, species, coords
).get_primitive_structure()
# BCC example.
self.bcc = Structure(
Lattice.cubic(5.0), ["He", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_properties(self):
self.assertEqual(self.mos2_sg.name, "bonds")
self.assertEqual(self.mos2_sg.edge_weight_name, "bond_length")
self.assertEqual(self.mos2_sg.edge_weight_unit, "Å")
self.assertEqual(self.mos2_sg.get_coordination_of_site(0), 6)
self.assertEqual(len(self.mos2_sg.get_connected_sites(0)), 6)
self.assertTrue(
isinstance(self.mos2_sg.get_connected_sites(0)[0].site, PeriodicSite)
)
self.assertEqual(str(self.mos2_sg.get_connected_sites(0)[0].site.specie), "S")
self.assertAlmostEqual(
self.mos2_sg.get_connected_sites(0, jimage=(0, 0, 100))[0].site.frac_coords[
2
],
100.303027,
)
# these two graphs should be equivalent
for n in range(len(self.bc_square_sg)):
self.assertEqual(
self.bc_square_sg.get_coordination_of_site(n),
self.bc_square_sg_r.get_coordination_of_site(n),
)
# test we're not getting duplicate connected sites
# thanks to Jack D. Sundberg for reporting this bug
# known example where this bug occurred due to edge weights not being
# bit-for-bit identical in otherwise identical edges
nacl_lattice = Lattice(
[
[3.48543625, 0.0, 2.01231756],
[1.16181208, 3.28610081, 2.01231756],
[0.0, 0.0, 4.02463512],
]
)
nacl = Structure(nacl_lattice, ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
nacl_graph = StructureGraph.with_local_env_strategy(
nacl, CutOffDictNN({("Cl", "Cl"): 5.0})
)
self.assertEqual(len(nacl_graph.get_connected_sites(1)), 12)
self.assertEqual(len(nacl_graph.graph.get_edge_data(1, 1)), 12)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_set_node_attributes(self):
self.square_sg.set_node_attributes()
specie = nx.get_node_attributes(self.square_sg.graph, "specie")
coords = nx.get_node_attributes(self.square_sg.graph, "coords")
properties = nx.get_node_attributes(self.square_sg.graph, "properties")
for i in range(len(self.square_sg.structure)):
self.assertEqual(str(specie[i]), str(self.square_sg.structure[i].specie))
self.assertEqual(coords[i][0], self.square_sg.structure[i].coords[0])
self.assertEqual(coords[i][1], self.square_sg.structure[i].coords[1])
self.assertEqual(coords[i][2], self.square_sg.structure[i].coords[2])
self.assertEqual(properties[i], self.square_sg.structure[i].properties)
def test_edge_editing(self):
square = copy.deepcopy(self.square_sg)
square.alter_edge(
0,
0,
to_jimage=(1, 0, 0),
new_weight=0.0,
new_edge_properties={"foo": "bar"},
)
new_edge = square.graph.get_edge_data(0, 0)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
square.break_edge(0, 0, to_jimage=(1, 0, 0))
self.assertEqual(len(square.graph.get_edge_data(0, 0)), 3)
def test_insert_remove(self):
struct_copy = copy.deepcopy(self.square_sg.structure)
square_copy = copy.deepcopy(self.square_sg)
# Ensure that insert_node appropriately wraps Structure.insert()
struct_copy.insert(1, "O", [0.5, 0.5, 0.5])
square_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(struct_copy, square_copy.structure)
# Test that removal is also equivalent between Structure and StructureGraph.structure
struct_copy.remove_sites([1])
square_copy.remove_nodes([1])
self.assertEqual(struct_copy, square_copy.structure)
square_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 0, "to_jimage": (0, 0, 0)}],
)
self.assertEqual(square_copy.get_coordination_of_site(1), 1)
# Test that StructureGraph.graph is correctly updated
square_copy.insert_node(1, "H", [0.5, 0.5, 0.75], edges=[{"from_index": 1,
"to_index": 2,
"to_jimage": (0, 0, 0)}])
square_copy.remove_nodes([1])
self.assertEqual(square_copy.graph.number_of_nodes(), 2)
self.assertEqual(square_copy.graph.number_of_edges(), 5)
def test_substitute(self):
structure = Structure.from_file(
os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_files", "Li2O.cif"
)
)
molecule = FunctionalGroups["methyl"]
structure_copy = copy.deepcopy(structure)
structure_copy_graph = copy.deepcopy(structure)
sg = StructureGraph.with_local_env_strategy(structure, MinimumDistanceNN())
sg_copy = copy.deepcopy(sg)
# Ensure that strings and molecules lead to equivalent substitutions
sg.substitute_group(1, molecule, MinimumDistanceNN)
sg_copy.substitute_group(1, "methyl", MinimumDistanceNN)
self.assertEqual(sg, sg_copy)
# Ensure that the underlying structure has been modified as expected
structure_copy.substitute(1, "methyl")
self.assertEqual(structure_copy, sg.structure)
# Test inclusion of graph dictionary
graph_dict = {
(0, 1): {"weight": 0.5},
(0, 2): {"weight": 0.5},
(0, 3): {"weight": 0.5},
}
sg_with_graph = StructureGraph.with_local_env_strategy(
structure_copy_graph, MinimumDistanceNN()
)
sg_with_graph.substitute_group(
1, "methyl", MinimumDistanceNN, graph_dict=graph_dict
)
edge = sg_with_graph.graph.get_edge_data(11, 13)[0]
self.assertEqual(edge["weight"], 0.5)
def test_auto_image_detection(self):
sg = StructureGraph.with_empty_graph(self.structure)
sg.add_edge(0, 0)
ref_edges = [
(0, 0, {"to_jimage": (-1, -1, 0)}),
(0, 0, {"to_jimage": (-1, 0, 0)}),
(0, 0, {"to_jimage": (0, -1, 0)}),
(0, 0, {"to_jimage": (0, 1, 0)}),
(0, 0, {"to_jimage": (1, 0, 0)}),
]
self.assertEqual(len(list(sg.graph.edges(data=True))), 6)
def test_str(self):
square_sg_str_ref = """Structure Graph
Structure:
Full Formula (H1)
Reduced Formula: H2
abc : 5.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (1)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (1, 0, 0)
0 0 (-1, 0, 0)
0 0 (0, 1, 0)
0 0 (0, -1, 0)
"""
mos2_sg_str_ref = """Structure Graph
Structure:
Full Formula (Mo1 S2)
Reduced Formula: MoS2
abc : 3.190316 3.190315 17.439502
angles: 90.000000 90.000000 120.000006
Sites (3)
# SP a b c
--- ---- -------- -------- --------
0 Mo 0.333333 0.666667 0.213295
1 S 0.666667 0.333333 0.303027
2 S 0.666667 0.333333 0.123562
Graph: bonds
from to to_image bond_length (A)
---- ---- ------------ ------------------
0 1 (-1, 0, 0) 2.417e+00
0 1 (0, 0, 0) 2.417e+00
0 1 (0, 1, 0) 2.417e+00
0 2 (0, 1, 0) 2.417e+00
0 2 (-1, 0, 0) 2.417e+00
0 2 (0, 0, 0) 2.417e+00
"""
# don't care about testing Py 2.7 unicode support,
# change Å to A
self.mos2_sg.graph.graph["edge_weight_units"] = "A"
self.assertStrContentEqual(str(self.square_sg), square_sg_str_ref)
self.assertStrContentEqual(str(self.mos2_sg), mos2_sg_str_ref)
def test_mul(self):
square_sg_mul = self.square_sg * (2, 1, 1)
square_sg_mul_ref_str = """Structure Graph
Structure:
Full Formula (H2)
Reduced Formula: H2
abc : 10.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (2)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
1 H 0.5 0 -0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (0, 1, 0)
0 0 (0, -1, 0)
0 1 (0, 0, 0)
0 1 (-1, 0, 0)
1 1 (0, 1, 0)
1 1 (0, -1, 0)
"""
square_sg_mul_actual_str = str(square_sg_mul)
# only testing bonds portion,
# the c frac_coord of the second H can vary from
# 0 to -0 depending on machine precision
square_sg_mul_ref_str = "\n".join(square_sg_mul_ref_str.splitlines()[11:])
square_sg_mul_actual_str = "\n".join(square_sg_mul_actual_str.splitlines()[11:])
self.assertStrContentEqual(square_sg_mul_actual_str, square_sg_mul_ref_str)
# test sequential multiplication
sq_sg_1 = self.square_sg * (2, 2, 1)
sq_sg_1 = sq_sg_1 * (2, 2, 1)
sq_sg_2 = self.square_sg * (4, 4, 1)
self.assertEqual(
sq_sg_1.graph.number_of_edges(), sq_sg_2.graph.number_of_edges()
)
# TODO: the below test still gives 8 != 4
# self.assertEqual(self.square_sg.get_coordination_of_site(0), 4)
mos2_sg_mul = self.mos2_sg * (3, 3, 1)
for idx in mos2_sg_mul.structure.indices_from_symbol("Mo"):
self.assertEqual(mos2_sg_mul.get_coordination_of_site(idx), 6)
mos2_sg_premul = StructureGraph.with_local_env_strategy(
self.structure * (3, 3, 1), MinimumDistanceNN()
)
self.assertTrue(mos2_sg_mul == mos2_sg_premul)
# test 3D Structure
nio_sg = StructureGraph.with_local_env_strategy(self.NiO, MinimumDistanceNN())
nio_sg = nio_sg * 3
for n in range(len(nio_sg)):
self.assertEqual(nio_sg.get_coordination_of_site(n), 6)
@unittest.skipIf(
not (which("neato") and which("fdp")), "graphviz executables not present"
)
def test_draw(self):
# draw MoS2 graph
self.mos2_sg.draw_graph_to_file(
"MoS2_single.pdf", image_labels=True, hide_image_edges=False
)
mos2_sg = self.mos2_sg * (9, 9, 1)
mos2_sg.draw_graph_to_file("MoS2.pdf", algo="neato")
# draw MoS2 graph that's been successively multiplied
mos2_sg_2 = self.mos2_sg * (3, 3, 1)
mos2_sg_2 = mos2_sg_2 * (3, 3, 1)
mos2_sg_2.draw_graph_to_file(
"MoS2_twice_mul.pdf", algo="neato", hide_image_edges=True
)
# draw MoS2 graph that's generated from a pre-multiplied Structure
mos2_sg_premul = StructureGraph.with_local_env_strategy(
self.structure * (3, 3, 1), MinimumDistanceNN()
)
mos2_sg_premul.draw_graph_to_file(
"MoS2_premul.pdf", algo="neato", hide_image_edges=True
)
# draw graph for a square lattice
self.square_sg.draw_graph_to_file("square_single.pdf", hide_image_edges=False)
square_sg = self.square_sg * (5, 5, 1)
square_sg.draw_graph_to_file(
"square.pdf", algo="neato", image_labels=True, node_labels=False
)
# draw graph for a body-centered square lattice
self.bc_square_sg.draw_graph_to_file(
"bc_square_single.pdf", hide_image_edges=False
)
bc_square_sg = self.bc_square_sg * (9, 9, 1)
bc_square_sg.draw_graph_to_file(
"bc_square.pdf", algo="neato", image_labels=False
)
# draw graph for a body-centered square lattice defined in an alternative way
self.bc_square_sg_r.draw_graph_to_file(
"bc_square_r_single.pdf", hide_image_edges=False
)
bc_square_sg_r = self.bc_square_sg_r * (9, 9, 1)
bc_square_sg_r.draw_graph_to_file(
"bc_square_r.pdf", algo="neato", image_labels=False
)
# delete generated test files
test_files = (
"bc_square_r_single.pdf",
"bc_square_r.pdf",
"bc_square_single.pdf",
"bc_square.pdf",
"MoS2_premul.pdf",
"MOS2_single.pdf",
"MoS2_twice_mul.pdf",
"MoS2.pdf",
"square_single.pdf",
"square.pdf",
)
for test_file in test_files:
os.remove(test_file)
def test_to_from_dict(self):
d = self.mos2_sg.as_dict()
sg = StructureGraph.from_dict(d)
d2 = sg.as_dict()
self.assertDictEqual(d, d2)
def test_from_local_env_and_equality_and_diff(self):
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(self.structure, nn)
self.assertEqual(sg.graph.number_of_edges(), 6)
nn2 = MinimumOKeeffeNN()
sg2 = StructureGraph.with_local_env_strategy(self.structure, nn2)
self.assertTrue(sg == sg2)
self.assertTrue(sg == self.mos2_sg)
# TODO: find better test case where graphs are different
diff = sg.diff(sg2)
self.assertEqual(diff["dist"], 0)
self.assertEqual(self.square_sg.get_coordination_of_site(0), 4)
def test_from_edges(self):
edges = {
(0, 0, (0, 0, 0), (1, 0, 0)): None,
(0, 0, (0, 0, 0), (-1, 0, 0)): None,
(0, 0, (0, 0, 0), (0, 1, 0)): None,
(0, 0, (0, 0, 0), (0, -1, 0)): None,
}
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
sg = StructureGraph.with_edges(structure, edges)
self.assertEqual(sg, self.square_sg)
def test_extract_molecules(self):
structure_file = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/H6PbCI3N_mp-977013_symmetrized.cif",
)
s = Structure.from_file(structure_file)
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(s, nn)
molecules = sg.get_subgraphs_as_molecules()
self.assertEqual(molecules[0].composition.formula, "H3 C1")
self.assertEqual(len(molecules), 1)
molecules = self.mos2_sg.get_subgraphs_as_molecules()
self.assertEqual(len(molecules), 0)
def test_types_and_weights_of_connections(self):
types = self.mos2_sg.types_and_weights_of_connections
self.assertEqual(len(types["Mo-S"]), 6)
self.assertAlmostEqual(types["Mo-S"][0], 2.416931678417331)
def test_weight_statistics(self):
weight_statistics = self.mos2_sg.weight_statistics
self.assertEqual(len(weight_statistics["all_weights"]), 6)
self.assertAlmostEqual(weight_statistics["min"], 2.4169314100201875)
self.assertAlmostEqual(weight_statistics["variance"], 0)
def test_types_of_coordination_environments(self):
types = self.mos2_sg.types_of_coordination_environments()
self.assertListEqual(types, ["Mo-S(6)", "S-Mo(3)"])
types_anonymous = self.mos2_sg.types_of_coordination_environments(
anonymous=True
)
self.assertListEqual(types_anonymous, ["A-B(3)", "A-B(6)"])
class MoleculeGraphTest(unittest.TestCase):
def setUp(self):
cyclohexene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/cyclohexene.xyz",
)
)
self.cyclohexene = MoleculeGraph.with_empty_graph(
cyclohexene, edge_weight_name="strength", edge_weight_units=""
)
self.cyclohexene.add_edge(0, 1, weight=1.0)
self.cyclohexene.add_edge(1, 2, weight=1.0)
self.cyclohexene.add_edge(2, 3, weight=2.0)
self.cyclohexene.add_edge(3, 4, weight=1.0)
self.cyclohexene.add_edge(4, 5, weight=1.0)
self.cyclohexene.add_edge(5, 0, weight=1.0)
self.cyclohexene.add_edge(0, 6, weight=1.0)
self.cyclohexene.add_edge(0, 7, weight=1.0)
self.cyclohexene.add_edge(1, 8, weight=1.0)
self.cyclohexene.add_edge(1, 9, weight=1.0)
self.cyclohexene.add_edge(2, 10, weight=1.0)
self.cyclohexene.add_edge(3, 11, weight=1.0)
self.cyclohexene.add_edge(4, 12, weight=1.0)
self.cyclohexene.add_edge(4, 13, weight=1.0)
self.cyclohexene.add_edge(5, 14, weight=1.0)
self.cyclohexene.add_edge(5, 15, weight=1.0)
butadiene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/butadiene.xyz",
)
)
self.butadiene = MoleculeGraph.with_empty_graph(
butadiene, edge_weight_name="strength", edge_weight_units=""
)
self.butadiene.add_edge(0, 1, weight=2.0)
self.butadiene.add_edge(1, 2, weight=1.0)
self.butadiene.add_edge(2, 3, weight=2.0)
self.butadiene.add_edge(0, 4, weight=1.0)
self.butadiene.add_edge(0, 5, weight=1.0)
self.butadiene.add_edge(1, 6, weight=1.0)
self.butadiene.add_edge(2, 7, weight=1.0)
self.butadiene.add_edge(3, 8, weight=1.0)
self.butadiene.add_edge(3, 9, weight=1.0)
ethylene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/ethylene.xyz",
)
)
self.ethylene = MoleculeGraph.with_empty_graph(
ethylene, edge_weight_name="strength", edge_weight_units=""
)
self.ethylene.add_edge(0, 1, weight=2.0)
self.ethylene.add_edge(0, 2, weight=1.0)
self.ethylene.add_edge(0, 3, weight=1.0)
self.ethylene.add_edge(1, 4, weight=1.0)
self.ethylene.add_edge(1, 5, weight=1.0)
self.pc = Molecule.from_file(
os.path.join(module_dir, "..", "..", "..", "test_files", "graphs", "PC.xyz")
)
self.pc_edges = [
[5, 10],
[5, 12],
[5, 11],
[5, 3],
[3, 7],
[3, 4],
[3, 0],
[4, 8],
[4, 9],
[4, 1],
[6, 1],
[6, 0],
[6, 2],
]
self.pc_frag1 = Molecule.from_file(
os.path.join(
module_dir, "..", "..", "..", "test_files", "graphs", "PC_frag1.xyz"
)
)
self.pc_frag1_edges = [[0, 2], [4, 2], [2, 1], [1, 3]]
self.tfsi = Molecule.from_file(
os.path.join(
module_dir, "..", "..", "..", "test_files", "graphs", "TFSI.xyz"
)
)
self.tfsi_edges = (
[14, 1],
[1, 4],
[1, 5],
[1, 7],
[7, 11],
[7, 12],
[7, 13],
[14, 0],
[0, 2],
[0, 3],
[0, 6],
[6, 8],
[6, 9],
[6, 10],
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
del self.ethylene
del self.butadiene
del self.cyclohexene
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_construction(self):
edges_frag = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_frag1_edges}
mol_graph = MoleculeGraph.with_edges(self.pc_frag1, edges_frag)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_frag1_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_frag1_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph.nodes:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
edges_pc = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges_pc)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
mol_graph_edges = MoleculeGraph.with_edges(self.pc, edges=edges_pc)
mol_graph_strat = MoleculeGraph.with_local_env_strategy(
self.pc, OpenBabelNN(), reorder=False, extend_structure=False
)
self.assertTrue(mol_graph_edges.isomorphic_to(mol_graph_strat))
def test_properties(self):
self.assertEqual(self.cyclohexene.name, "bonds")
self.assertEqual(self.cyclohexene.edge_weight_name, "strength")
self.assertEqual(self.cyclohexene.edge_weight_unit, "")
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
self.assertEqual(self.cyclohexene.get_coordination_of_site(2), 3)
self.assertEqual(self.cyclohexene.get_coordination_of_site(15), 1)
self.assertEqual(len(self.cyclohexene.get_connected_sites(0)), 4)
self.assertTrue(
isinstance(self.cyclohexene.get_connected_sites(0)[0].site, Site)
)
self.assertEqual(
str(self.cyclohexene.get_connected_sites(0)[0].site.specie), "H"
)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_set_node_attributes(self):
self.ethylene.set_node_attributes()
specie = nx.get_node_attributes(self.ethylene.graph, "specie")
coords = nx.get_node_attributes(self.ethylene.graph, "coords")
properties = nx.get_node_attributes(self.ethylene.graph, "properties")
for i in range(len(self.ethylene.molecule)):
self.assertEqual(str(specie[i]), str(self.ethylene.molecule[i].specie))
self.assertEqual(coords[i][0], self.ethylene.molecule[i].coords[0])
self.assertEqual(coords[i][1], self.ethylene.molecule[i].coords[1])
self.assertEqual(coords[i][2], self.ethylene.molecule[i].coords[2])
self.assertEqual(properties[i], self.ethylene.molecule[i].properties)
def test_coordination(self):
molecule = Molecule(["C", "C"], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
mg = MoleculeGraph.with_empty_graph(molecule)
self.assertEqual(mg.get_coordination_of_site(0), 0)
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
def test_edge_editing(self):
self.cyclohexene.alter_edge(
0, 1, new_weight=0.0, new_edge_properties={"foo": "bar"}
)
new_edge = self.cyclohexene.graph.get_edge_data(0, 1)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
self.cyclohexene.break_edge(0, 1)
self.assertTrue(self.cyclohexene.graph.get_edge_data(0, 1) is None)
# Replace the now-broken edge
self.cyclohexene.add_edge(0, 1, weight=1.0)
def test_insert_remove(self):
mol_copy = copy.deepcopy(self.ethylene.molecule)
eth_copy = copy.deepcopy(self.ethylene)
# Ensure that insert_node appropriately wraps Molecule.insert()
mol_copy.insert(1, "O", [0.5, 0.5, 0.5])
eth_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol_copy, eth_copy.molecule)
# Test that removal is also equivalent between Molecule and MoleculeGraph.molecule
mol_copy.remove_sites([1])
eth_copy.remove_nodes([1])
self.assertEqual(mol_copy, eth_copy.molecule)
eth_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 2}, {"from_index": 1, "to_index": 3}],
)
self.assertEqual(eth_copy.get_coordination_of_site(1), 2)
# Test that MoleculeGraph.graph is correctly updated
eth_copy.remove_nodes([1, 2])
self.assertEqual(eth_copy.graph.number_of_nodes(), 5)
self.assertEqual(eth_copy.graph.number_of_edges(), 2)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_split(self):
bonds = [(0, 1), (4, 5)]
alterations = {
(2, 3): {"weight": 1.0},
(0, 5): {"weight": 2.0},
(1, 2): {"weight": 2.0},
(3, 4): {"weight": 2.0},
}
# Perform retro-Diels-Alder reaction - turn product into reactants
reactants = self.cyclohexene.split_molecule_subgraphs(
bonds, allow_reverse=True, alterations=alterations
)
self.assertTrue(isinstance(reactants, list))
reactants = sorted(reactants, key=len)
# After alterations, reactants sholuld be ethylene and butadiene
self.assertEqual(reactants[0], self.ethylene)
self.assertEqual(reactants[1], self.butadiene)
with self.assertRaises(MolGraphSplitError):
self.cyclohexene.split_molecule_subgraphs([(0, 1)])
# Test naive charge redistribution
hydroxide = Molecule(["O", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]], charge=-1)
oh_mg = MoleculeGraph.with_empty_graph(hydroxide)
oh_mg.add_edge(0, 1)
new_mgs = oh_mg.split_molecule_subgraphs([(0, 1)])
for mg in new_mgs:
if str(mg.molecule[0].specie) == "O":
self.assertEqual(mg.molecule.charge, -1)
else:
self.assertEqual(mg.molecule.charge, 0)
# Trying to test to ensure that remapping of nodes to atoms works
diff_species = Molecule(
["C", "I", "Cl", "Br", "F"],
[
[0.8314, -0.2682, -0.9102],
[1.3076, 1.3425, -2.2038],
[-0.8429, -0.7410, -1.1554],
[1.9841, -1.7636, -1.2953],
[1.0098, 0.1231, 0.3916],
],
)
diff_spec_mg = MoleculeGraph.with_empty_graph(diff_species)
diff_spec_mg.add_edge(0, 1)
diff_spec_mg.add_edge(0, 2)
diff_spec_mg.add_edge(0, 3)
diff_spec_mg.add_edge(0, 4)
for i in range(1, 5):
bond = (0, i)
split_mgs = diff_spec_mg.split_molecule_subgraphs([bond])
for split_mg in split_mgs:
species = nx.get_node_attributes(split_mg.graph, "specie")
for j in range(len(split_mg.graph.nodes)):
atom = split_mg.molecule[j]
self.assertEqual(species[j], str(atom.specie))
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_build_unique_fragments(self):
edges = {(e[0], e[1]): None for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges)
unique_fragment_dict = mol_graph.build_unique_fragments()
unique_fragments = []
for key in unique_fragment_dict:
for fragment in unique_fragment_dict[key]:
unique_fragments.append(fragment)
self.assertEqual(len(unique_fragments), 295)
nm = iso.categorical_node_match("specie", "ERROR")
for ii in range(295):
# Test that each fragment is unique
for jj in range(ii + 1, 295):
self.assertFalse(
nx.is_isomorphic(
unique_fragments[ii].graph,
unique_fragments[jj].graph,
node_match=nm,
)
)
# Test that each fragment correctly maps between Molecule and graph
self.assertEqual(
len(unique_fragments[ii].molecule),
len(unique_fragments[ii].graph.nodes),
)
species = nx.get_node_attributes(unique_fragments[ii].graph, "specie")
coords = nx.get_node_attributes(unique_fragments[ii].graph, "coords")
mol = unique_fragments[ii].molecule
for ss, site in enumerate(mol):
self.assertEqual(str(species[ss]), str(site.specie))
self.assertEqual(coords[ss][0], site.coords[0])
self.assertEqual(coords[ss][1], site.coords[1])
self.assertEqual(coords[ss][2], site.coords[2])
# Test that each fragment is connected
self.assertTrue(nx.is_connected(unique_fragments[ii].graph.to_undirected()))
def test_find_rings(self):
rings = self.cyclohexene.find_rings(including=[0])
self.assertEqual(
sorted(rings[0]), [(0, 5), (1, 0), (2, 1), (3, 2), (4, 3), (5, 4)]
)
no_rings = self.butadiene.find_rings()
self.assertEqual(no_rings, [])
def test_isomorphic(self):
ethylene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/ethylene.xyz",
)
)
# switch carbons
ethylene[0], ethylene[1] = ethylene[1], ethylene[0]
eth_copy = MoleculeGraph.with_edges(
ethylene,
{
(0, 1): {"weight": 2},
(1, 2): {"weight": 1},
(1, 3): {"weight": 1},
(0, 4): {"weight": 1},
(0, 5): {"weight": 1},
},
)
# If they are equal, they must also be isomorphic
eth_copy = copy.deepcopy(self.ethylene)
self.assertTrue(self.ethylene.isomorphic_to(eth_copy))
self.assertFalse(self.butadiene.isomorphic_to(self.ethylene))
def test_substitute(self):
molecule = FunctionalGroups["methyl"]
molgraph = MoleculeGraph.with_edges(
molecule,
{(0, 1): {"weight": 1}, (0, 2): {"weight": 1}, (0, 3): {"weight": 1}},
)
eth_mol = copy.deepcopy(self.ethylene)
eth_str = copy.deepcopy(self.ethylene)
# Ensure that strings and molecules lead to equivalent substitutions
eth_mol.substitute_group(5, molecule, MinimumDistanceNN)
eth_str.substitute_group(5, "methyl", MinimumDistanceNN)
self.assertEqual(eth_mol, eth_str)
graph_dict = {
(0, 1): {"weight": 1.0},
(0, 2): {"weight": 1.0},
(0, 3): {"weight": 1.0},
}
eth_mg = copy.deepcopy(self.ethylene)
eth_graph = copy.deepcopy(self.ethylene)
# Check that MoleculeGraph input is handled properly
eth_graph.substitute_group(
5, molecule, MinimumDistanceNN, graph_dict=graph_dict
)
eth_mg.substitute_group(5, molgraph, MinimumDistanceNN)
self.assertEqual(eth_graph.graph.get_edge_data(5, 6)[0]["weight"], 1.0)
self.assertEqual(eth_mg, eth_graph)
def test_replace(self):
eth_copy_sub = copy.deepcopy(self.ethylene)
eth_copy_repl = copy.deepcopy(self.ethylene)
# First, perform a substiution as above
eth_copy_sub.substitute_group(5, "methyl", MinimumDistanceNN)
eth_copy_repl.replace_group(5, "methyl", MinimumDistanceNN)
# Test that replacement on a terminal atom is equivalent to substitution
self.assertEqual(eth_copy_repl.molecule, eth_copy_sub.molecule)
self.assertEqual(eth_copy_repl, eth_copy_sub)
# Methyl carbon should have coordination 4
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 4)
# Now swap one functional group for another
eth_copy_repl.replace_group(5, "amine", MinimumDistanceNN)
self.assertEqual(
["C", "C", "H", "H", "H", "N", "H", "H"],
[str(s) for s in eth_copy_repl.molecule.species],
)
self.assertEqual(len(eth_copy_repl.graph.nodes), 8)
# Amine nitrogen should have coordination 3
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 3)
def test_as_from_dict(self):
d = self.cyclohexene.as_dict()
mg = MoleculeGraph.from_dict(d)
d2 = mg.as_dict()
self.assertEqual(str(d), str(d2))
if __name__ == "__main__":
unittest.main()
| fraricci/pymatgen | pymatgen/analysis/tests/test_graphs.py | Python | mit | 38,226 | [
"pymatgen"
] | e254be444077bca9ee0cae2d2d7a75af0497f84a87a6b78c165d05b4bb7cef13 |
#### PATTERN | EN | INFLECT ########################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
####################################################################################################
# Regular expressions-based rules for English word inflection:
# - pluralization and singularization of nouns and adjectives,
# - conjugation of verbs,
# - comparative and superlative of adjectives.
# Accuracy (measured on CELEX English morphology word forms):
# 95% for pluralize()
# 96% for singularize()
# 95% for Verbs.find_lemma() (for regular verbs)
# 96% for Verbs.find_lexeme() (for regular verbs)
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
from pattern.text import Verbs as _Verbs
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
sys.path.pop(0)
VERB, NOUN, ADJECTIVE, ADVERB = "VB", "NN", "JJ", "RB"
VOWELS = "aeiouy"
re_vowel = re.compile(r"a|e|i|o|u|y", re.I)
is_vowel = lambda ch: ch in VOWELS
#### ARTICLE #######################################################################################
# Based on the Ruby Linguistics module by Michael Granger:
# http://www.deveiate.org/projects/Linguistics/wiki/English
RE_ARTICLE = list(map(lambda x: (re.compile(x[0]), x[1]), (
(r"euler|hour(?!i)|heir|honest|hono", "an"), # exceptions: an hour, an honor
# Abbreviations:
# strings of capitals starting with a vowel-sound consonant followed by another consonant,
# which are not likely to be real words.
(r"(?!FJO|[HLMNS]Y.|RY[EO]|SQU|(F[LR]?|[HL]|MN?|N|RH?|S[CHKLMNPTVW]?|X(YL)?)[AEIOU])[FHLMNRSX][A-Z]", "an"),
(r"^[aefhilmnorsx][.-]" , "an"), # hyphenated: an f-16, an e-mail
(r"^[a-z][.-]" , "a" ), # hyphenated: a b-52
(r"^[^aeiouy]" , "a" ), # consonants: a bear
(r"^e[uw]" , "a" ), # -eu like "you": a european
(r"^onc?e" , "a" ), # -o like "wa" : a one-liner
(r"uni([^nmd]|mo)" , "a" ), # -u like "you": a university
(r"^u[bcfhjkqrst][aeiou]", "a" ), # -u like "you": a uterus
(r"^[aeiou]" , "an"), # vowels: an owl
(r"y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt)", "an"), # y like "i": an yclept, a year
(r"" , "a" ) # guess "a"
)))
def definite_article(word):
return "the"
def indefinite_article(word):
""" Returns the indefinite article for a given word.
For example: indefinite_article("university") => "a" university.
"""
word = word.split(" ")[0]
for rule, article in RE_ARTICLE:
if rule.search(word) is not None:
return article
DEFINITE, INDEFINITE = \
"definite", "indefinite"
def article(word, function=INDEFINITE):
""" Returns the indefinite (a or an) or definite (the) article for the given word.
"""
return function == DEFINITE and definite_article(word) or indefinite_article(word)
_article = article
def referenced(word, article=INDEFINITE):
""" Returns a string with the article + the word.
"""
return "%s %s" % (_article(word, article), word)
#print referenced("hour")
#print referenced("FBI")
#print referenced("bear")
#print referenced("one-liner")
#print referenced("european")
#print referenced("university")
#print referenced("uterus")
#print referenced("owl")
#print referenced("yclept")
#print referenced("year")
#### PLURALIZE #####################################################################################
# Based on "An Algorithmic Approach to English Pluralization" by Damian Conway:
# http://www.csse.monash.edu.au/~damian/papers/HTML/Plurals.html
# Prepositions are used in forms like "mother-in-law" and "man at arms".
plural_prepositions = set((
"about" , "before" , "during", "of" , "till" ,
"above" , "behind" , "except", "off" , "to" ,
"across" , "below" , "for" , "on" , "under",
"after" , "beneath", "from" , "onto" , "until",
"among" , "beside" , "in" , "out" , "unto" ,
"around" , "besides", "into" , "over" , "upon" ,
"at" , "between", "near" , "since", "with" ,
"athwart", "betwixt",
"beyond",
"but",
"by"))
# Inflection rules that are either:
# - general,
# - apply to a certain category of words,
# - apply to a certain category of words only in classical mode,
# - apply only in classical mode.
# Each rule is a (suffix, inflection, category, classic)-tuple.
plural_rules = [
# 0) Indefinite articles and demonstratives.
(( r"^a$|^an$", "some" , None, False),
( r"^this$", "these" , None, False),
( r"^that$", "those" , None, False),
( r"^any$", "all" , None, False)
), # 1) Possessive adjectives.
(( r"^my$", "our" , None, False),
( r"^your$", "your" , None, False),
( r"^thy$", "your" , None, False),
(r"^her$|^his$", "their" , None, False),
( r"^its$", "their" , None, False),
( r"^their$", "their" , None, False)
), # 2) Possessive pronouns.
(( r"^mine$", "ours" , None, False),
( r"^yours$", "yours" , None, False),
( r"^thine$", "yours" , None, False),
(r"^her$|^his$", "theirs" , None, False),
( r"^its$", "theirs" , None, False),
( r"^their$", "theirs" , None, False)
), # 3) Personal pronouns.
(( r"^I$", "we" , None, False),
( r"^me$", "us" , None, False),
( r"^myself$", "ourselves" , None, False),
( r"^you$", "you" , None, False),
(r"^thou$|^thee$", "ye" , None, False),
( r"^yourself$", "yourself" , None, False),
( r"^thyself$", "yourself" , None, False),
( r"^she$|^he$", "they" , None, False),
(r"^it$|^they$", "they" , None, False),
(r"^her$|^him$", "them" , None, False),
(r"^it$|^them$", "them" , None, False),
( r"^herself$", "themselves" , None, False),
( r"^himself$", "themselves" , None, False),
( r"^itself$", "themselves" , None, False),
( r"^themself$", "themselves" , None, False),
( r"^oneself$", "oneselves" , None, False)
), # 4) Words that do not inflect.
(( r"$", "" , "uninflected", False),
( r"$", "" , "uncountable", False),
( r"s$", "s" , "s-singular" , False),
( r"fish$", "fish" , None, False),
(r"([- ])bass$", "\\1bass" , None, False),
( r"ois$", "ois" , None, False),
( r"sheep$", "sheep" , None, False),
( r"deer$", "deer" , None, False),
( r"pox$", "pox" , None, False),
(r"([A-Z].*)ese$", "\\1ese" , None, False),
( r"itis$", "itis" , None, False),
(r"(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$", "\\1ose", None, False)
), # 5) Irregular plural forms (e.g., mongoose, oxen).
(( r"atlas$", "atlantes" , None, True ),
( r"atlas$", "atlases" , None, False),
( r"beef$", "beeves" , None, True ),
( r"brother$", "brethren" , None, True ),
( r"child$", "children" , None, False),
( r"corpus$", "corpora" , None, True ),
( r"corpus$", "corpuses" , None, False),
( r"^cow$", "kine" , None, True ),
( r"ephemeris$", "ephemerides", None, False),
( r"ganglion$", "ganglia" , None, True ),
( r"genie$", "genii" , None, True ),
( r"genus$", "genera" , None, False),
( r"graffito$", "graffiti" , None, False),
( r"loaf$", "loaves" , None, False),
( r"money$", "monies" , None, True ),
( r"mongoose$", "mongooses" , None, False),
( r"mythos$", "mythoi" , None, False),
( r"octopus$", "octopodes" , None, True ),
( r"opus$", "opera" , None, True ),
( r"opus$", "opuses" , None, False),
( r"^ox$", "oxen" , None, False),
( r"penis$", "penes" , None, True ),
( r"penis$", "penises" , None, False),
( r"soliloquy$", "soliloquies", None, False),
( r"testis$", "testes" , None, False),
( r"trilby$", "trilbys" , None, False),
( r"turf$", "turves" , None, True ),
( r"numen$", "numena" , None, False),
( r"occiput$", "occipita" , None, True )
), # 6) Irregular inflections for common suffixes (e.g., synopses, mice, men).
(( r"man$", "men" , None, False),
( r"person$", "people" , None, False),
(r"([lm])ouse$", "\\1ice" , None, False),
( r"tooth$", "teeth" , None, False),
( r"goose$", "geese" , None, False),
( r"foot$", "feet" , None, False),
( r"zoon$", "zoa" , None, False),
( r"([csx])is$", "\\1es" , None, False)
), # 7) Fully assimilated classical inflections
# (e.g., vertebrae, codices).
(( r"ex$", "ices" , "ex-ices" , False),
( r"ex$", "ices" , "ex-ices*", True ), # * = classical mode
( r"um$", "a" , "um-a" , False),
( r"um$", "a" , "um-a*", True ),
( r"on$", "a" , "on-a" , False),
( r"a$", "ae" , "a-ae" , False),
( r"a$", "ae" , "a-ae*", True )
), # 8) Classical variants of modern inflections
# (e.g., stigmata, soprani).
(( r"trix$", "trices" , None, True),
( r"eau$", "eaux" , None, True),
( r"ieu$", "ieu" , None, True),
( r"([iay])nx$", "\\1nges" , None, True),
( r"en$", "ina" , "en-ina*", True),
( r"a$", "ata" , "a-ata*", True),
( r"is$", "ides" , "is-ides*", True),
( r"us$", "i" , "us-i*", True),
( r"us$", "us " , "us-us*", True),
( r"o$", "i" , "o-i*", True),
( r"$", "i" , "-i*", True),
( r"$", "im" , "-im*", True)
), # 9) -ch, -sh and -ss take -es in the plural
# (e.g., churches, classes).
(( r"([cs])h$", "\\1hes" , None, False),
( r"ss$", "sses" , None, False),
( r"x$", "xes" , None, False)
), # 10) -f or -fe sometimes take -ves in the plural
# (e.g, lives, wolves).
(( r"([aeo]l)f$", "\\1ves" , None, False),
( r"([^d]ea)f$", "\\1ves" , None, False),
( r"arf$", "arves" , None, False),
(r"([nlw]i)fe$", "\\1ves" , None, False),
), # 11) -y takes -ys if preceded by a vowel, -ies otherwise
# (e.g., storeys, Marys, stories).
((r"([aeiou])y$", "\\1ys" , None, False),
(r"([A-Z].*)y$", "\\1ys" , None, False),
( r"y$", "ies" , None, False)
), # 12) -o sometimes takes -os, -oes otherwise.
# -o is preceded by a vowel takes -os
# (e.g., lassos, potatoes, bamboos).
(( r"o$", "os", "o-os", False),
(r"([aeiou])o$", "\\1os" , None, False),
( r"o$", "oes" , None, False)
), # 13) Miltary stuff
# (e.g., Major Generals).
(( r"l$", "ls", "general-generals", False),
), # 14) Assume that the plural takes -s
# (cats, programmes, ...).
(( r"$", "s" , None, False),)
]
# For performance, compile the regular expressions once:
plural_rules = [[(re.compile(r[0]), r[1], r[2], r[3]) for r in grp] for grp in plural_rules]
# Suffix categories.
plural_categories = {
"uninflected": [
"bison" , "debris" , "headquarters" , "news" , "swine" ,
"bream" , "diabetes" , "herpes" , "pincers" , "trout" ,
"breeches" , "djinn" , "high-jinks" , "pliers" , "tuna" ,
"britches" , "eland" , "homework" , "proceedings", "whiting" ,
"carp" , "elk" , "innings" , "rabies" , "wildebeest" ,
"chassis" , "flounder" , "jackanapes" , "salmon" ,
"clippers" , "gallows" , "mackerel" , "scissors" ,
"cod" , "graffiti" , "measles" , "series" ,
"contretemps", "mews" , "shears" ,
"corps" , "mumps" , "species"
],
"uncountable": [
"advice" , "fruit" , "ketchup" , "meat" , "sand" ,
"bread" , "furniture" , "knowledge" , "mustard" , "software" ,
"butter" , "garbage" , "love" , "news" , "understanding",
"cheese" , "gravel" , "luggage" , "progress" , "water" ,
"electricity", "happiness" , "mathematics" , "research" ,
"equipment" , "information", "mayonnaise" , "rice"
],
"s-singular": [
"acropolis" , "caddis" , "dais" , "glottis" , "pathos" ,
"aegis" , "cannabis" , "digitalis" , "ibis" , "pelvis" ,
"alias" , "canvas" , "epidermis" , "lens" , "polis" ,
"asbestos" , "chaos" , "ethos" , "mantis" , "rhinoceros" ,
"bathos" , "cosmos" , "gas" , "marquis" , "sassafras" ,
"bias" , "glottis" , "metropolis" , "trellis"
],
"ex-ices": [
"codex" , "murex" , "silex"
],
"ex-ices*": [
"apex" , "index" , "pontifex" , "vertex" ,
"cortex" , "latex" , "simplex" , "vortex"
],
"um-a": [
"agendum" , "candelabrum", "desideratum" , "extremum" , "stratum" ,
"bacterium" , "datum" , "erratum" , "ovum"
],
"um-a*": [
"aquarium" , "emporium" , "maximum" , "optimum" , "stadium" ,
"compendium" , "enconium" , "medium" , "phylum" , "trapezium" ,
"consortium" , "gymnasium" , "memorandum" , "quantum" , "ultimatum" ,
"cranium" , "honorarium" , "millenium" , "rostrum" , "vacuum" ,
"curriculum" , "interregnum", "minimum" , "spectrum" , "velum" ,
"dictum" , "lustrum" , "momentum" , "speculum"
],
"on-a": [
"aphelion" , "hyperbaton" , "perihelion" ,
"asyndeton" , "noumenon" , "phenomenon" ,
"criterion" , "organon" , "prolegomenon"
],
"a-ae": [
"alga" , "alumna" , "vertebra"
],
"a-ae*": [
"abscissa" , "aurora" , "hyperbola" , "nebula" ,
"amoeba" , "formula" , "lacuna" , "nova" ,
"antenna" , "hydra" , "medusa" , "parabola"
],
"en-ina*": [
"foramen" , "lumen" , "stamen"
],
"a-ata*": [
"anathema" , "dogma" , "gumma" , "miasma" , "stigma" ,
"bema" , "drama" , "lemma" , "schema" , "stoma" ,
"carcinoma" , "edema" , "lymphoma" , "oedema" , "trauma" ,
"charisma" , "enema" , "magma" , "sarcoma" ,
"diploma" , "enigma" , "melisma" , "soma" ,
],
"is-ides*": [
"clitoris" , "iris"
],
"us-i*": [
"focus" , "nimbus" , "succubus" ,
"fungus" , "nucleolus" , "torus" ,
"genius" , "radius" , "umbilicus" ,
"incubus" , "stylus" , "uterus"
],
"us-us*": [
"apparatus" , "hiatus" , "plexus" , "status" ,
"cantus" , "impetus" , "prospectus" ,
"coitus" , "nexus" , "sinus" ,
],
"o-i*": [
"alto" , "canto" , "crescendo" , "soprano" ,
"basso" , "contralto" , "solo" , "tempo"
],
"-i*": [
"afreet" , "afrit" , "efreet"
],
"-im*": [
"cherub" , "goy" , "seraph"
],
"o-os": [
"albino" , "dynamo" , "guano" , "lumbago" , "photo" ,
"archipelago", "embryo" , "inferno" , "magneto" , "pro" ,
"armadillo" , "fiasco" , "jumbo" , "manifesto" , "quarto" ,
"commando" , "generalissimo", "medico" , "rhino" ,
"ditto" , "ghetto" , "lingo" , "octavo" , "stylo"
],
"general-generals": [
"Adjutant" , "Brigadier" , "Lieutenant" , "Major" , "Quartermaster",
"adjutant" , "brigadier" , "lieutenant" , "major" , "quartermaster"
]
}
def pluralize(word, pos=NOUN, custom={}, classical=True):
""" Returns the plural of a given word, e.g., child => children.
Handles nouns and adjectives, using classical inflection by default
(i.e., where "matrix" pluralizes to "matrices" and not "matrixes").
The custom dictionary is for user-defined replacements.
"""
if word in custom:
return custom[word]
# Recurse genitives.
# Remove the apostrophe and any trailing -s,
# form the plural of the resultant noun, and then append an apostrophe (dog's => dogs').
if word.endswith(("'", "'s")):
w = word.rstrip("'s")
w = pluralize(w, pos, custom, classical)
if w.endswith("s"):
return w + "'"
else:
return w + "'s"
# Recurse compound words
# (e.g., Postmasters General, mothers-in-law, Roman deities).
w = word.replace("-", " ").split(" ")
if len(w) > 1:
if w[1] == "general" or \
w[1] == "General" and \
w[0] not in plural_categories["general-generals"]:
return word.replace(w[0], pluralize(w[0], pos, custom, classical))
elif w[1] in plural_prepositions:
return word.replace(w[0], pluralize(w[0], pos, custom, classical))
else:
return word.replace(w[-1], pluralize(w[-1], pos, custom, classical))
# Only a very few number of adjectives inflect.
n = range(len(plural_rules))
if pos.startswith(ADJECTIVE):
n = [0, 1]
# Apply pluralization rules.
for i in n:
for suffix, inflection, category, classic in plural_rules[i]:
# A general rule, or a classic rule in classical mode.
if category is None:
if not classic or (classic and classical):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
# A rule pertaining to a specific category of words.
if category is not None:
if word in plural_categories[category] and (not classic or (classic and classical)):
if suffix.search(word) is not None:
return suffix.sub(inflection, word)
return word
#print pluralize("part-of-speech")
#print pluralize("child")
#print pluralize("dog's")
#print pluralize("wolf")
#print pluralize("bear")
#print pluralize("kitchen knife")
#print pluralize("octopus", classical=True)
#print pluralize("matrix", classical=True)
#print pluralize("matrix", classical=False)
#print pluralize("my", pos=ADJECTIVE)
#### SINGULARIZE ###################################################################################
# Adapted from Bermi Ferrer's Inflector for Python:
# http://www.bermi.org/inflector/
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
singular_rules = [
(r'(?i)(.)ae$' , '\\1a' ),
(r'(?i)(.)itis$' , '\\1itis' ),
(r'(?i)(.)eaux$' , '\\1eau' ),
(r'(?i)(quiz)zes$' , '\\1' ),
(r'(?i)(matr)ices$' , '\\1ix' ),
(r'(?i)(ap|vert|ind)ices$', '\\1ex' ),
(r'(?i)^(ox)en' , '\\1' ),
(r'(?i)(alias|status)es$' , '\\1' ),
(r'(?i)([octop|vir])i$' , '\\1us' ),
(r'(?i)(cris|ax|test)es$' , '\\1is' ),
(r'(?i)(shoe)s$' , '\\1' ),
(r'(?i)(o)es$' , '\\1' ),
(r'(?i)(bus)es$' , '\\1' ),
(r'(?i)([m|l])ice$' , '\\1ouse' ),
(r'(?i)(x|ch|ss|sh)es$' , '\\1' ),
(r'(?i)(m)ovies$' , '\\1ovie' ),
(r'(?i)(.)ombies$' , '\\1ombie'),
(r'(?i)(s)eries$' , '\\1eries'),
(r'(?i)([^aeiouy]|qu)ies$', '\\1y' ),
# -f, -fe sometimes take -ves in the plural
# (e.g., lives, wolves).
(r"([aeo]l)ves$" , "\\1f" ),
(r"([^d]ea)ves$" , "\\1f" ),
(r"arves$" , "arf" ),
(r"erves$" , "erve" ),
(r"([nlw]i)ves$" , "\\1fe" ),
(r'(?i)([lr])ves$' , '\\1f' ),
(r"([aeo])ves$" , "\\1ve" ),
(r'(?i)(sive)s$' , '\\1' ),
(r'(?i)(tive)s$' , '\\1' ),
(r'(?i)(hive)s$' , '\\1' ),
(r'(?i)([^f])ves$' , '\\1fe' ),
# -ses suffixes.
(r'(?i)(^analy)ses$' , '\\1sis' ),
(r'(?i)((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$', '\\1\\2sis'),
(r'(?i)(.)opses$' , '\\1opsis'),
(r'(?i)(.)yses$' , '\\1ysis' ),
(r'(?i)(h|d|r|o|n|b|cl|p)oses$', '\\1ose'),
(r'(?i)(fruct|gluc|galact|lact|ket|malt|rib|sacchar|cellul)ose$', '\\1ose'),
(r'(?i)(.)oses$' , '\\1osis' ),
# -a
(r'(?i)([ti])a$' , '\\1um' ),
(r'(?i)(n)ews$' , '\\1ews' ),
(r'(?i)s$' , '' ),
]
# For performance, compile the regular expressions only once:
singular_rules = [(re.compile(r[0]), r[1]) for r in singular_rules]
singular_uninflected = set((
"bison" , "debris" , "headquarters", "pincers" , "trout" ,
"bream" , "diabetes" , "herpes" , "pliers" , "tuna" ,
"breeches" , "djinn" , "high-jinks" , "proceedings", "whiting" ,
"britches" , "eland" , "homework" , "rabies" , "wildebeest",
"carp" , "elk" , "innings" , "salmon" ,
"chassis" , "flounder" , "jackanapes" , "scissors" ,
"christmas" , "gallows" , "mackerel" , "series" ,
"clippers" , "georgia" , "measles" , "shears" ,
"cod" , "graffiti" , "mews" , "species" ,
"contretemps", "mumps" , "swine" ,
"corps" , "news" , "swiss" ,
))
singular_uncountable = set((
"advice" , "equipment", "happiness" , "luggage" , "news" , "software" ,
"bread" , "fruit" , "information" , "mathematics", "progress" , "understanding",
"butter" , "furniture", "ketchup" , "mayonnaise" , "research" , "water" ,
"cheese" , "garbage" , "knowledge" , "meat" , "rice" ,
"electricity", "gravel" , "love" , "mustard" , "sand" ,
))
singular_ie = set((
"alergie" , "cutie" , "hoagie" , "newbie" , "softie" , "veggie" ,
"auntie" , "doggie" , "hottie" , "nightie" , "sortie" , "weenie" ,
"beanie" , "eyrie" , "indie" , "oldie" , "stoolie" , "yuppie" ,
"birdie" , "freebie" , "junkie" , "^pie" , "sweetie" , "zombie" ,
"bogie" , "goonie" , "laddie" , "pixie" , "techie" ,
"bombie" , "groupie" , "laramie" , "quickie" , "^tie" ,
"collie" , "hankie" , "lingerie" , "reverie" , "toughie" ,
"cookie" , "hippie" , "meanie" , "rookie" , "valkyrie" ,
))
singular_irregular = {
"atlantes": "atlas",
"atlases": "atlas",
"axes": "axe",
"beeves": "beef",
"brethren": "brother",
"children": "child",
"corpora": "corpus",
"corpuses": "corpus",
"ephemerides": "ephemeris",
"feet": "foot",
"ganglia": "ganglion",
"geese": "goose",
"genera": "genus",
"genii": "genie",
"graffiti": "graffito",
"helves": "helve",
"kine": "cow",
"leaves": "leaf",
"loaves": "loaf",
"men": "man",
"mongooses": "mongoose",
"monies": "money",
"moves": "move",
"mythoi": "mythos",
"numena": "numen",
"occipita": "occiput",
"octopodes": "octopus",
"opera": "opus",
"opuses": "opus",
"our": "my",
"oxen": "ox",
"penes": "penis",
"penises": "penis",
"people": "person",
"sexes": "sex",
"soliloquies": "soliloquy",
"teeth": "tooth",
"testes": "testis",
"trilbys": "trilby",
"turves": "turf",
"zoa": "zoon",
}
def singularize(word, pos=NOUN, custom={}):
""" Returns the singular of a given word.
"""
if word in custom:
return custom[word]
# Recurse compound words (e.g. mothers-in-law).
if "-" in word:
w = word.split("-")
if len(w) > 1 and w[1] in plural_prepositions:
return singularize(w[0], pos, custom) + "-" + "-".join(w[1:])
# dogs' => dog's
if word.endswith("'"):
return singularize(word[:-1]) + "'s"
w = word.lower()
for x in singular_uninflected:
if x.endswith(w):
return word
for x in singular_uncountable:
if x.endswith(w):
return word
for x in singular_ie:
if w.endswith(x + "s"):
return w
for x in singular_irregular:
if w.endswith(x):
return re.sub('(?i)' + x + '$', singular_irregular[x], word)
for suffix, inflection in singular_rules:
m = suffix.search(word)
g = m and m.groups() or []
if m:
for k in range(len(g)):
if g[k] is None:
inflection = inflection.replace('\\' + str(k + 1), '')
return suffix.sub(inflection, word)
return word
#### VERB CONJUGATION ##############################################################################
class Verbs(_Verbs):
def __init__(self):
_Verbs.__init__(self, os.path.join(MODULE, "en-verbs.txt"),
language = "en",
format = [0, 1, 2, 3, 7, 8, 17, 18, 19, 23, 25, 24, 16, 9, 10, 11, 15, 33, 26, 27, 28, 32],
default = {
1: 0, 2: 0, 3: 0, 7: 0, # present singular => infinitive ("I walk")
4: 7, 5: 7, 6: 7, # present plural
17: 25, 18: 25, 19: 25, 23: 25, # past singular
20: 23, 21: 23, 22: 23, # past plural
9: 16, 10: 16, 11: 16, 15: 16, # present singular negated
12: 15, 13: 15, 14: 15, # present plural negated
26: 33, 27: 33, 28: 33, # past singular negated
29: 32, 30: 32, 31: 32, 32: 33 # past plural negated
})
def find_lemma(self, verb):
""" Returns the base form of the given inflected verb, using a rule-based approach.
This is problematic if a verb ending in -e is given in the past tense or gerund.
"""
v = verb.lower()
b = False
if v in ("'m", "'re", "'s", "n't"):
return "be"
if v in ("'d", "'ll"):
return "will"
if v in ("'ve"):
return "have"
if v.endswith("s"):
if v.endswith("ies") and len(v) > 3 and v[-4] not in VOWELS:
return v[:-3] + "y" # complies => comply
if v.endswith(("sses", "shes", "ches", "xes")):
return v[:-2] # kisses => kiss
return v[:-1]
if v.endswith("ied") and re_vowel.search(v[:-3]) is not None:
return v[:-3] + "y" # envied => envy
if v.endswith("ing") and re_vowel.search(v[:-3]) is not None:
v = v[:-3]; b = True; # chopping => chopp
if v.endswith("ed") and re_vowel.search(v[:-2]) is not None:
v = v[:-2]; b = True; # danced => danc
if b:
# Doubled consonant after short vowel: chopp => chop.
if len(v) > 3 and v[-1] == v[-2] and v[-3] in VOWELS and v[-4] not in VOWELS and not v.endswith("ss"):
return v[:-1]
if v.endswith(("ick", "ack")):
return v[:-1] # panick => panic
# Guess common cases where the base form ends in -e:
if v.endswith(("v", "z", "c", "i")):
return v + "e" # danc => dance
if v.endswith("g") and v.endswith(("dg", "lg", "ng", "rg")):
return v + "e" # indulg => indulge
if v.endswith(("b", "d", "g", "k", "l", "m", "r", "s", "t")) \
and len(v) > 2 and v[-2] in VOWELS and not v[-3] in VOWELS \
and not v.endswith("er"):
return v + "e" # generat => generate
if v.endswith("n") and v.endswith(("an", "in")) and not v.endswith(("ain", "oin", "oan")):
return v + "e" # imagin => imagine
if v.endswith("l") and len(v) > 1 and v[-2] not in VOWELS:
return v + "e" # squabbl => squabble
if v.endswith("f") and len(v) > 2 and v[-2] in VOWELS and v[-3] not in VOWELS:
return v + "e" # chaf => chafed
if v.endswith("e"):
return v + "e" # decre => decree
if v.endswith(("th", "ang", "un", "cr", "vr", "rs", "ps", "tr")):
return v + "e"
return v
def find_lexeme(self, verb):
""" For a regular verb (base form), returns the forms using a rule-based approach.
"""
v = verb.lower()
if len(v) > 1 and v.endswith("e") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "e": dance, save, devote, evolve.
return [v, v, v, v + "s", v, v[:-1] + "ing"] + [v + "d"] * 6
if len(v) > 1 and v.endswith("y") and v[-2] not in VOWELS:
# Verbs ending in a consonant followed by "y": comply, copy, magnify.
return [v, v, v, v[:-1] + "ies", v, v + "ing"] + [v[:-1] + "ied"] * 6
if v.endswith(("ss", "sh", "ch", "x")):
# Verbs ending in sibilants: kiss, bless, box, polish, preach.
return [v, v, v, v + "es", v, v + "ing"] + [v + "ed"] * 6
if v.endswith("ic"):
# Verbs ending in -ic: panic, mimic.
return [v, v, v, v + "es", v, v + "king"] + [v + "ked"] * 6
if len(v) > 1 and v[-1] not in VOWELS and v[-2] not in VOWELS:
# Verbs ending in a consonant cluster: delight, clamp.
return [v, v, v, v + "s", v, v + "ing"] + [v + "ed"] * 6
if (len(v) > 1 and v.endswith(("y", "w")) and v[-2] in VOWELS) \
or (len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] in VOWELS) \
or (len(v) > 3 and v[-1] not in VOWELS and v[-3] in VOWELS and v[-4] in VOWELS):
# Verbs ending in a long vowel or diphthong followed by a consonant: paint, devour, play.
return [v, v, v, v + "s", v, v + "ing"] + [v + "ed"] * 6
if len(v) > 2 and v[-1] not in VOWELS and v[-2] in VOWELS and v[-3] not in VOWELS:
# Verbs ending in a short vowel followed by a consonant: chat, chop, or compel.
return [v, v, v, v + "s", v, v + v[-1] + "ing"] + [v + v[-1] + "ed"] * 6
return [v, v, v, v + "s", v, v + "ing"] + [v + "ed"] * 6
verbs = Verbs()
conjugate, lemma, lexeme, tenses = \
verbs.conjugate, verbs.lemma, verbs.lexeme, verbs.tenses
#print conjugate("imaginarify", "part", parse=True)
#print conjugate("imaginarify", "part", parse=False)
#### COMPARATIVE & SUPERLATIVE #####################################################################
VOWELS = "aeiouy"
grade_irregular = {
"bad": ( "worse", "worst"),
"far": ("further", "farthest"),
"good": ( "better", "best"),
"hind": ( "hinder", "hindmost"),
"ill": ( "worse", "worst"),
"less": ( "lesser", "least"),
"little": ( "less", "least"),
"many": ( "more", "most"),
"much": ( "more", "most"),
"well": ( "better", "best")
}
grade_uninflected = ["giant", "glib", "hurt", "known", "madly"]
COMPARATIVE = "er"
SUPERLATIVE = "est"
def _count_syllables(word):
""" Returns the estimated number of syllables in the word by counting vowel-groups.
"""
n = 0
p = False # True if the previous character was a vowel.
for ch in word.endswith("e") and word[:-1] or word:
v = ch in VOWELS
n += int(v and not p)
p = v
return n
def grade(adjective, suffix=COMPARATIVE):
""" Returns the comparative or superlative form of the given adjective.
"""
n = _count_syllables(adjective)
if adjective in grade_irregular:
# A number of adjectives inflect irregularly.
return grade_irregular[adjective][suffix != COMPARATIVE]
elif adjective in grade_uninflected:
# A number of adjectives don't inflect at all.
return "%s %s" % (suffix == COMPARATIVE and "more" or "most", adjective)
elif n <= 2 and adjective.endswith("e"):
# With one syllable and ending with an e: larger, wiser.
suffix = suffix.lstrip("e")
elif n == 1 and len(adjective) >= 3 \
and adjective[-1] not in VOWELS and adjective[-2] in VOWELS and adjective[-3] not in VOWELS:
# With one syllable ending with consonant-vowel-consonant: bigger, thinner.
if not adjective.endswith(("w")): # Exceptions: lower, newer.
suffix = adjective[-1] + suffix
elif n == 1:
# With one syllable ending with more consonants or vowels: briefer.
pass
elif n == 2 and adjective.endswith("y"):
# With two syllables ending with a y: funnier, hairier.
adjective = adjective[:-1] + "i"
elif n == 2 and adjective[-2:] in ("er", "le", "ow"):
# With two syllables and specific suffixes: gentler, narrower.
pass
else:
# With three or more syllables: more generous, more important.
return "%s %s" % (suffix == COMPARATIVE and "more" or "most", adjective)
return adjective + suffix
def comparative(adjective):
return grade(adjective, COMPARATIVE)
def superlative(adjective):
return grade(adjective, SUPERLATIVE)
#### ATTRIBUTIVE & PREDICATIVE #####################################################################
def attributive(adjective):
return adjective
def predicative(adjective):
return adjective
| clips/pattern | pattern/text/en/inflect.py | Python | bsd-3-clause | 36,230 | [
"Elk",
"Octopus"
] | 5c140129923617229220803d95a812db3734e99315fb9fc7d39136918ef2bb29 |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
import re
from subprocess import call, check_call
import sys
import py
import pytest
@pytest.fixture(scope='session')
def session_tmpdir(request):
tmpdir_handler = getattr(request.config, '_tmpdirhandler', None)
if tmpdir_handler:
# Create session tmpdir within pytest's session tmpdir
return tmpdir_handler.mktemp('session', numbered=False)
else: # pragma: NO COVER
return py.path.local.make_numbered_dir('humpty-')
@pytest.fixture(scope='session')
def packages(session_tmpdir, request):
return PackageManager(session_tmpdir, request)
class PackageManager(object):
def __init__(self, tmpdir, request):
self.tmpdir = tmpdir
self.wheelhouse = tmpdir.join('wheelhouse')
self.distdir = tmpdir.join('dist')
self.wheels = {}
self.eggs = {}
self.venvs = {}
self.saved_modes = []
# restore modes so that pytest can delete the tmpdir
request.addfinalizer(self._restore_modes)
def _restore_modes(self):
while self.saved_modes:
path, mode = self.saved_modes.pop()
path.chmod(mode)
def get_wheel(self, dist_name, python_tag=None):
key = dist_name, python_tag
wheel = self.wheels.get(key)
if wheel is None or True:
tmpdir = py.path.local.make_numbered_dir('tmp', self.tmpdir)
wheel = build_wheel(dist_name, self.wheelhouse, tmpdir,
python_tag=python_tag)
self.wheels[key] = wheel
return wheel
def get_egg(self, dist_name):
egg = self.eggs.get(dist_name)
if egg is None:
wheel = self.get_wheel(dist_name)
egg = build_egg(wheel, self.distdir)
self.eggs[dist_name] = egg
return egg
def require_eggs(self, *dists):
for dist in dists:
self.get_egg(dist)
def get_venv(self, *dists, **kwargs):
dists = frozenset(dists)
unzip = kwargs.get('unzip', False)
venv = self.venvs.get(dists)
if venv is None:
name = '-'.join(sorted(re.sub(r'\W', '_', dist) for dist in dists))
if unzip:
name += '-unzipped'
vdir = self.tmpdir.join("venv_%s" % name)
venv = Virtualenv(vdir, self.distdir, install=dists, unzip=unzip)
self.venvs[dists] = venv
# Make installed eggs read-only
for p in vdir.visit(fil="*.egg"):
if p.isdir():
self.saved_modes.append((p, p.stat().mode))
p.chmod(0o500)
return venv
def build_wheel(dist_name, wheelhouse, tmpdir, python_tag=None):
srcdir = py.path.local(__file__).dirpath(dist_name)
setup_py = [sys.executable, 'setup.py']
# Put all the build artifacts in our own private tmpdirs
# so that simultaneous tox runs don't overwite each others builds
ourtmp = tmpdir.ensure_dir(dist_name)
setup_py.extend(['egg_info',
'--egg-base', str(ourtmp)])
setup_py.extend(['build',
'--build-base', str(ourtmp.join('build')),
'--build-temp', str(ourtmp.join('btmp'))])
bdist_wheel = ['bdist_wheel', '--dist-dir', str(ourtmp)]
if python_tag is not None:
bdist_wheel.extend(['--python-tag', python_tag])
print("==== Building wheel for %s ====" % dist_name)
with srcdir.as_cwd():
check_call(setup_py + bdist_wheel)
new_wheels = ourtmp.listdir(lambda f: f.isfile()
and f.fnmatch("%s-*.whl" % dist_name))
assert len(new_wheels) == 1, "can't find newly created wheel"
new_wheel = new_wheels[0]
wheel = wheelhouse.ensure(dir=True).join(new_wheel.basename)
new_wheel.move(wheel)
return wheel
def build_egg(wheel, egg_dir):
from humpty import EggWriter
print("==== Building egg from %s ====" % wheel)
egg = EggWriter(str(wheel)).build_egg(str(egg_dir))
return py.path.local(egg)
class Virtualenv(object):
def __init__(self, path, find_links=None, install=None, unzip=False):
self.path = py.path.local(path)
self.environ = {'PATH': str(self.path.join('bin'))}
print("==== Creating virtualenv at %s ====" % path)
check_call([sys.executable, '-m', 'virtualenv',
'--no-site', str(path)])
if install:
cmd = ['easy_install', '--index-url', 'file:///dev/null']
if find_links:
cmd.extend(['--find-links', str(find_links)])
if unzip:
cmd.append('--always-unzip')
cmd.extend(install)
self.check_call(cmd)
def call(self, cmd, **kwargs):
kwargs['env'] = self.environ
return call(cmd, **kwargs)
def check_call(self, cmd, **kwargs):
kwargs['env'] = self.environ
check_call(cmd, **kwargs)
def run(self, prog, **kwargs):
return self.call(['python', '-c', prog], **kwargs)
def check_run(self, prog, **kwargs):
return self.check_call(['python', '-c', prog], **kwargs)
| dairiki/humpty | tests/conftest.py | Python | bsd-3-clause | 5,206 | [
"VisIt"
] | d3dc1bd64bdaf324e2a3ece7399753c09cb39f6e166bf0ffef04c20e7c0b9241 |
# pysam versioning information
__version__ = "0.10.0"
__samtools_version__ = "1.3.1"
__bcftools_version__ = "1.3.1"
__htslib_version__ = "1.3.2"
| bioinformed/pysam | pysam/version.py | Python | mit | 149 | [
"pysam"
] | 1fc0a718fa836df664e1305d812a6b5b6e575de5615bc65ce35a62d4fb4a0601 |
class VehicleInfo(object):
def __init__(self):
"""
make_target: option passed to make to create binaries. Usually sitl, and "-debug" may be appended if -D is passed to sim_vehicle.py
default_params_filename: filename of default parameters file. Taken to be relative to autotest dir.
extra_mavlink_cmds: extra parameters that will be passed to mavproxy
"""
self.options = {
"ArduCopter": {
"default_frame": "quad",
"frames": {
# COPTER
"+": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"quad": {
"model": "+",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"X": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
# this param set FRAME doesn't actually work because mavproxy
# won't set a parameter unless it knows of it, and the
# param fetch happens asynchronously
"extra_mavlink_cmds": "param fetch frame; param set FRAME 1;",
},
"bfx": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-bfx.parm" ],
},
"djix": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-djix.parm" ],
},
"cwx": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-cwx.parm" ],
},
"hexa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-hexa.parm" ],
},
"octa-quad": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octaquad.parm" ],
},
"octa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-octa.parm" ],
},
"tri": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-tri.parm" ],
},
"y6": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-y6.parm" ],
},
"dodeca-hexa": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/copter-dodecahexa.parm" ],
},
# SIM
"IrisRos": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"gazebo-iris": {
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter.parm",
"default_params/gazebo-iris.parm"],
},
"airsim-copter": {
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
# HELICOPTER
"heli": {
"make_target": "sitl-heli",
"waf_target": "bin/arducopter-heli",
"default_params_filename": "default_params/copter-heli.parm",
},
"heli-dual": {
"make_target": "sitl-heli-dual",
"waf_target": "bin/arducopter-heli",
"default_params_filename": ["default_params/copter-heli.parm",
"default_params/copter-heli-dual.parm"],
},
"heli-compound": {
"make_target": "sitl-heli-compound",
"waf_target": "bin/arducopter-heli",
},
"singlecopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter-single.parm",
},
"coaxcopter": {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": ["default_params/copter-single.parm",
"default_params/copter-coax.parm"],
},
"scrimmage-copter" : {
"make_target": "sitl",
"waf_target": "bin/arducopter",
"default_params_filename": "default_params/copter.parm",
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduPlane": {
"default_frame": "plane",
"frames": {
# PLANE
"tilt-quad": {
"make_target": "sitl-tilt-quad",
"waf_target": "bin/arduplane-tilt-quad",
"default_params_filename": "default_params/tiltquad.parm",
},
"quadplane-tilttri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttri.parm",
},
"quadplane-tilttrivec": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tilttrivec.parm",
},
"quadplane-tilthvec": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/quadplane-tilthvec.parm"],
},
"quadplane-tri": {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane-tri.parm",
},
"quadplane-cl84" : {
"make_target" : "sitl",
"waf_target" : "bin/arduplane",
"default_params_filename": "default_params/quadplane-cl84.parm",
},
"quadplane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/quadplane.parm",
},
"firefly": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/firefly.parm",
},
"plane-elevon": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-elevons.parm"],
},
"plane-vtail": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-vtail.parm"],
},
"plane-tailsitter": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-tailsitter.parm",
},
"plane-jet": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-jet.parm"],
},
"plane": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"plane-dspoilers": {
"waf_target": "bin/arduplane",
"default_params_filename": ["default_params/plane.parm", "default_params/plane-dspoilers.parm"]
},
"gazebo-zephyr": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/gazebo-zephyr.parm",
},
"last_letter": {
"waf_target": "bin/arduplane",
},
"CRRCSim": {
"waf_target": "bin/arduplane",
},
"jsbsim": {
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane-jsbsim.parm",
},
"scrimmage-plane" : {
"make_target": "sitl",
"waf_target": "bin/arduplane",
"default_params_filename": "default_params/plane.parm",
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"APMrover2": {
"default_frame": "rover",
"frames": {
# ROVER
"rover": {
"waf_target": "bin/ardurover",
"default_params_filename": "default_params/rover.parm",
},
"rover-skid": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"balancebot": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm",
"default_params/balancebot.parm"],
},
"sailboat": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat.parm"],
},
"sailboat-motor": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/sailboat-motor.parm"],
},
"gazebo-rover": {
"waf_target": "bin/ardurover",
"default_params_filename": ["default_params/rover.parm",
"default_params/rover-skid.parm"],
},
"calibration": {
"extra_mavlink_cmds": "module load sitl_calibration;",
},
},
},
"ArduSub": {
"default_frame": "vectored",
"frames": {
"vectored": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
"gazebo-bluerov2": {
"waf_target": "bin/ardusub",
"default_params_filename": "default_params/sub.parm",
},
},
},
"AntennaTracker": {
"default_frame": "tracker",
"frames": {
"tracker": {
"waf_target": "bin/antennatracker",
},
},
},
}
def default_frame(self, vehicle):
return self.options[vehicle]["default_frame"]
def default_waf_target(self, vehicle):
"""Returns a waf target based on vehicle type, which is often determined by which directory the user is in"""
default_frame = self.default_frame(vehicle)
return self.options[vehicle]["frames"][default_frame]["waf_target"]
def options_for_frame(self, frame, vehicle, opts):
"""Return informatiom about how to sitl for frame e.g. build-type==sitl"""
ret = None
frames = self.options[vehicle]["frames"]
if frame in frames:
ret = self.options[vehicle]["frames"][frame]
else:
for p in ["octa", "tri", "y6", "firefly", "heli", "gazebo", "last_letter", "jsbsim", "quadplane", "plane-elevon", "plane-vtail", "plane", "airsim"]:
if frame.startswith(p):
ret = self.options[vehicle]["frames"][p]
break
if ret is None:
if frame.endswith("-heli"):
ret = self.options[vehicle]["frames"]["heli"]
if ret is None:
print("WARNING: no config for frame (%s)" % frame)
ret = {}
if "model" not in ret:
ret["model"] = frame
if "sitl-port" not in ret:
ret["sitl-port"] = True
if opts.model is not None:
ret["model"] = opts.model
if (ret["model"].find("xplane") != -1 or ret["model"].find("flightaxis") != -1):
ret["sitl-port"] = False
if "make_target" not in ret:
ret["make_target"] = "sitl"
if "waf_target" not in ret:
ret["waf_target"] = self.default_waf_target(vehicle)
if opts.build_target is not None:
ret["make_target"] = opts.build_target
ret["waf_target"] = opts.build_target
return ret
| avrem/ardupilot | Tools/autotest/pysim/vehicleinfo.py | Python | gpl-3.0 | 13,904 | [
"Firefly"
] | 550884c5f49682eaae1c010d669a59615a224a12a13ce1cfde2de14b7777bdc3 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.asset_v1.services.asset_service import AssetServiceAsyncClient
from google.cloud.asset_v1.services.asset_service import AssetServiceClient
from google.cloud.asset_v1.services.asset_service import pagers
from google.cloud.asset_v1.services.asset_service import transports
from google.cloud.asset_v1.types import asset_service
from google.cloud.asset_v1.types import assets
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AssetServiceClient._get_default_mtls_endpoint(None) is None
assert (
AssetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert AssetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AssetServiceGrpcTransport, "grpc"),
(transports.AssetServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_asset_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_client_get_transport_class():
transport = AssetServiceClient.get_transport_class()
available_transports = [
transports.AssetServiceGrpcTransport,
]
assert transport in available_transports
transport = AssetServiceClient.get_transport_class("grpc")
assert transport == transports.AssetServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "true"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "false"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_asset_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient])
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_asset_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_asset_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.asset_v1.services.asset_service.transports.AssetServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AssetServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [asset_service.ExportAssetsRequest, dict,])
def test_export_assets(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.export_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ExportAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_export_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_assets), "__call__") as call:
client.export_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ExportAssetsRequest()
@pytest.mark.asyncio
async def test_export_assets_async(
transport: str = "grpc_asyncio", request_type=asset_service.ExportAssetsRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.export_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ExportAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_export_assets_async_from_dict():
await test_export_assets_async(request_type=dict)
def test_export_assets_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ExportAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_assets), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_export_assets_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ExportAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.export_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.export_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [asset_service.ListAssetsRequest, dict,])
def test_list_assets(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListAssetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
client.list_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
@pytest.mark.asyncio
async def test_list_assets_async(
transport: str = "grpc_asyncio", request_type=asset_service.ListAssetsRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListAssetsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_assets_async_from_dict():
await test_list_assets_async(request_type=dict)
def test_list_assets_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = asset_service.ListAssetsResponse()
client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_assets_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListAssetsResponse()
)
await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_assets_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListAssetsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_assets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_assets_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_assets(
asset_service.ListAssetsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_assets_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListAssetsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListAssetsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_assets(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_assets_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_assets(
asset_service.ListAssetsRequest(), parent="parent_value",
)
def test_list_assets_pager(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, assets.Asset) for i in results)
def test_list_assets_pages(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
pages = list(client.list_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_assets_async_pager():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
async_pager = await client.list_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, assets.Asset) for i in responses)
@pytest.mark.asyncio
async def test_list_assets_async_pages():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [asset_service.BatchGetAssetsHistoryRequest, dict,]
)
def test_batch_get_assets_history(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_get_assets_history), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.BatchGetAssetsHistoryResponse()
response = client.batch_get_assets_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.BatchGetAssetsHistoryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.BatchGetAssetsHistoryResponse)
def test_batch_get_assets_history_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_get_assets_history), "__call__"
) as call:
client.batch_get_assets_history()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.BatchGetAssetsHistoryRequest()
@pytest.mark.asyncio
async def test_batch_get_assets_history_async(
transport: str = "grpc_asyncio",
request_type=asset_service.BatchGetAssetsHistoryRequest,
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_get_assets_history), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.BatchGetAssetsHistoryResponse()
)
response = await client.batch_get_assets_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.BatchGetAssetsHistoryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.BatchGetAssetsHistoryResponse)
@pytest.mark.asyncio
async def test_batch_get_assets_history_async_from_dict():
await test_batch_get_assets_history_async(request_type=dict)
def test_batch_get_assets_history_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.BatchGetAssetsHistoryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_get_assets_history), "__call__"
) as call:
call.return_value = asset_service.BatchGetAssetsHistoryResponse()
client.batch_get_assets_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_batch_get_assets_history_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.BatchGetAssetsHistoryRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.batch_get_assets_history), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.BatchGetAssetsHistoryResponse()
)
await client.batch_get_assets_history(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [asset_service.CreateFeedRequest, dict,])
def test_create_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
response = client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
def test_create_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
client.create_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
@pytest.mark.asyncio
async def test_create_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.CreateFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
)
response = await client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.CreateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
@pytest.mark.asyncio
async def test_create_feed_async_from_dict():
await test_create_feed_async(request_type=dict)
def test_create_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.CreateFeedRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.CreateFeedRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.create_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_feed(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_create_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_feed(
asset_service.CreateFeedRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_create_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_feed(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_feed(
asset_service.CreateFeedRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [asset_service.GetFeedRequest, dict,])
def test_get_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
response = client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
def test_get_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
client.get_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
@pytest.mark.asyncio
async def test_get_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.GetFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
)
response = await client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.GetFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
@pytest.mark.asyncio
async def test_get_feed_async_from_dict():
await test_get_feed_async(request_type=dict)
def test_get_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.GetFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.GetFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.get_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_feed(
asset_service.GetFeedRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_feed(
asset_service.GetFeedRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [asset_service.ListFeedsRequest, dict,])
def test_list_feeds(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
response = client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.ListFeedsResponse)
def test_list_feeds_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
client.list_feeds()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
@pytest.mark.asyncio
async def test_list_feeds_async(
transport: str = "grpc_asyncio", request_type=asset_service.ListFeedsRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
response = await client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListFeedsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.ListFeedsResponse)
@pytest.mark.asyncio
async def test_list_feeds_async_from_dict():
await test_list_feeds_async(request_type=dict)
def test_list_feeds_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListFeedsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
call.return_value = asset_service.ListFeedsResponse()
client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_feeds_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListFeedsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
await client.list_feeds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_feeds_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_feeds(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_feeds_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_feeds(
asset_service.ListFeedsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_feeds_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_feeds), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListFeedsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListFeedsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_feeds(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_feeds_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_feeds(
asset_service.ListFeedsRequest(), parent="parent_value",
)
@pytest.mark.parametrize("request_type", [asset_service.UpdateFeedRequest, dict,])
def test_update_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
response = client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
def test_update_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
client.update_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
@pytest.mark.asyncio
async def test_update_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.UpdateFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.Feed(
name="name_value",
asset_names=["asset_names_value"],
asset_types=["asset_types_value"],
content_type=asset_service.ContentType.RESOURCE,
relationship_types=["relationship_types_value"],
)
)
response = await client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.UpdateFeedRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.Feed)
assert response.name == "name_value"
assert response.asset_names == ["asset_names_value"]
assert response.asset_types == ["asset_types_value"]
assert response.content_type == asset_service.ContentType.RESOURCE
assert response.relationship_types == ["relationship_types_value"]
@pytest.mark.asyncio
async def test_update_feed_async_from_dict():
await test_update_feed_async(request_type=dict)
def test_update_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.UpdateFeedRequest()
request.feed.name = "feed.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
call.return_value = asset_service.Feed()
client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "feed.name=feed.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.UpdateFeedRequest()
request.feed.name = "feed.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
await client.update_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "feed.name=feed.name/value",) in kw["metadata"]
def test_update_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_feed(feed=asset_service.Feed(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].feed
mock_val = asset_service.Feed(name="name_value")
assert arg == mock_val
def test_update_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_feed(
asset_service.UpdateFeedRequest(),
feed=asset_service.Feed(name="name_value"),
)
@pytest.mark.asyncio
async def test_update_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.Feed()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(asset_service.Feed())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_feed(feed=asset_service.Feed(name="name_value"),)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].feed
mock_val = asset_service.Feed(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_feed(
asset_service.UpdateFeedRequest(),
feed=asset_service.Feed(name="name_value"),
)
@pytest.mark.parametrize("request_type", [asset_service.DeleteFeedRequest, dict,])
def test_delete_feed(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_feed_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
client.delete_feed()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
@pytest.mark.asyncio
async def test_delete_feed_async(
transport: str = "grpc_asyncio", request_type=asset_service.DeleteFeedRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.DeleteFeedRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_feed_async_from_dict():
await test_delete_feed_async(request_type=dict)
def test_delete_feed_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.DeleteFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
call.return_value = None
client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_feed_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.DeleteFeedRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_feed(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_feed_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_feed_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_feed(
asset_service.DeleteFeedRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_feed_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_feed), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_feed(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_feed_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_feed(
asset_service.DeleteFeedRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [asset_service.SearchAllResourcesRequest, dict,]
)
def test_search_all_resources(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllResourcesResponse(
next_page_token="next_page_token_value",
)
response = client.search_all_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAllResourcesPager)
assert response.next_page_token == "next_page_token_value"
def test_search_all_resources_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
client.search_all_resources()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllResourcesRequest()
@pytest.mark.asyncio
async def test_search_all_resources_async(
transport: str = "grpc_asyncio",
request_type=asset_service.SearchAllResourcesRequest,
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllResourcesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_all_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllResourcesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAllResourcesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_all_resources_async_from_dict():
await test_search_all_resources_async(request_type=dict)
def test_search_all_resources_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.SearchAllResourcesRequest()
request.scope = "scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
call.return_value = asset_service.SearchAllResourcesResponse()
client.search_all_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "scope=scope/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_all_resources_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.SearchAllResourcesRequest()
request.scope = "scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllResourcesResponse()
)
await client.search_all_resources(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "scope=scope/value",) in kw["metadata"]
def test_search_all_resources_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllResourcesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_all_resources(
scope="scope_value", query="query_value", asset_types=["asset_types_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = "scope_value"
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
arg = args[0].asset_types
mock_val = ["asset_types_value"]
assert arg == mock_val
def test_search_all_resources_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_all_resources(
asset_service.SearchAllResourcesRequest(),
scope="scope_value",
query="query_value",
asset_types=["asset_types_value"],
)
@pytest.mark.asyncio
async def test_search_all_resources_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllResourcesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllResourcesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_all_resources(
scope="scope_value", query="query_value", asset_types=["asset_types_value"],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = "scope_value"
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
arg = args[0].asset_types
mock_val = ["asset_types_value"]
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_all_resources_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_all_resources(
asset_service.SearchAllResourcesRequest(),
scope="scope_value",
query="query_value",
asset_types=["asset_types_value"],
)
def test_search_all_resources_pager(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllResourcesResponse(
results=[
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllResourcesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(), assets.ResourceSearchResult(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("scope", ""),)),
)
pager = client.search_all_resources(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, assets.ResourceSearchResult) for i in results)
def test_search_all_resources_pages(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllResourcesResponse(
results=[
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllResourcesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(), assets.ResourceSearchResult(),],
),
RuntimeError,
)
pages = list(client.search_all_resources(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_all_resources_async_pager():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllResourcesResponse(
results=[
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllResourcesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(), assets.ResourceSearchResult(),],
),
RuntimeError,
)
async_pager = await client.search_all_resources(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, assets.ResourceSearchResult) for i in responses)
@pytest.mark.asyncio
async def test_search_all_resources_async_pages():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_resources),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllResourcesResponse(
results=[
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
assets.ResourceSearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllResourcesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllResourcesResponse(
results=[assets.ResourceSearchResult(), assets.ResourceSearchResult(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_all_resources(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [asset_service.SearchAllIamPoliciesRequest, dict,]
)
def test_search_all_iam_policies(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllIamPoliciesResponse(
next_page_token="next_page_token_value",
)
response = client.search_all_iam_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllIamPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAllIamPoliciesPager)
assert response.next_page_token == "next_page_token_value"
def test_search_all_iam_policies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
client.search_all_iam_policies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllIamPoliciesRequest()
@pytest.mark.asyncio
async def test_search_all_iam_policies_async(
transport: str = "grpc_asyncio",
request_type=asset_service.SearchAllIamPoliciesRequest,
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllIamPoliciesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_all_iam_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.SearchAllIamPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchAllIamPoliciesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_all_iam_policies_async_from_dict():
await test_search_all_iam_policies_async(request_type=dict)
def test_search_all_iam_policies_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.SearchAllIamPoliciesRequest()
request.scope = "scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
call.return_value = asset_service.SearchAllIamPoliciesResponse()
client.search_all_iam_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "scope=scope/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_all_iam_policies_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.SearchAllIamPoliciesRequest()
request.scope = "scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllIamPoliciesResponse()
)
await client.search_all_iam_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "scope=scope/value",) in kw["metadata"]
def test_search_all_iam_policies_flattened():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllIamPoliciesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_all_iam_policies(
scope="scope_value", query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = "scope_value"
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
def test_search_all_iam_policies_flattened_error():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_all_iam_policies(
asset_service.SearchAllIamPoliciesRequest(),
scope="scope_value",
query="query_value",
)
@pytest.mark.asyncio
async def test_search_all_iam_policies_flattened_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.SearchAllIamPoliciesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.SearchAllIamPoliciesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_all_iam_policies(
scope="scope_value", query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = "scope_value"
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_all_iam_policies_flattened_error_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_all_iam_policies(
asset_service.SearchAllIamPoliciesRequest(),
scope="scope_value",
query="query_value",
)
def test_search_all_iam_policies_pager(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllIamPoliciesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllIamPoliciesResponse(
results=[assets.IamPolicySearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("scope", ""),)),
)
pager = client.search_all_iam_policies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, assets.IamPolicySearchResult) for i in results)
def test_search_all_iam_policies_pages(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllIamPoliciesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllIamPoliciesResponse(
results=[assets.IamPolicySearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
),
RuntimeError,
)
pages = list(client.search_all_iam_policies(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_all_iam_policies_async_pager():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllIamPoliciesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllIamPoliciesResponse(
results=[assets.IamPolicySearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
),
RuntimeError,
)
async_pager = await client.search_all_iam_policies(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, assets.IamPolicySearchResult) for i in responses)
@pytest.mark.asyncio
async def test_search_all_iam_policies_async_pages():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_all_iam_policies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
next_page_token="abc",
),
asset_service.SearchAllIamPoliciesResponse(
results=[], next_page_token="def",
),
asset_service.SearchAllIamPoliciesResponse(
results=[assets.IamPolicySearchResult(),], next_page_token="ghi",
),
asset_service.SearchAllIamPoliciesResponse(
results=[
assets.IamPolicySearchResult(),
assets.IamPolicySearchResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.search_all_iam_policies(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [asset_service.AnalyzeIamPolicyRequest, dict,])
def test_analyze_iam_policy(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.AnalyzeIamPolicyResponse(fully_explored=True,)
response = client.analyze_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.AnalyzeIamPolicyResponse)
assert response.fully_explored is True
def test_analyze_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy), "__call__"
) as call:
client.analyze_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyRequest()
@pytest.mark.asyncio
async def test_analyze_iam_policy_async(
transport: str = "grpc_asyncio", request_type=asset_service.AnalyzeIamPolicyRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.AnalyzeIamPolicyResponse(fully_explored=True,)
)
response = await client.analyze_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.AnalyzeIamPolicyResponse)
assert response.fully_explored is True
@pytest.mark.asyncio
async def test_analyze_iam_policy_async_from_dict():
await test_analyze_iam_policy_async(request_type=dict)
def test_analyze_iam_policy_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeIamPolicyRequest()
request.analysis_query.scope = "analysis_query.scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy), "__call__"
) as call:
call.return_value = asset_service.AnalyzeIamPolicyResponse()
client.analyze_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"analysis_query.scope=analysis_query.scope/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_analyze_iam_policy_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeIamPolicyRequest()
request.analysis_query.scope = "analysis_query.scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.AnalyzeIamPolicyResponse()
)
await client.analyze_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"analysis_query.scope=analysis_query.scope/value",
) in kw["metadata"]
@pytest.mark.parametrize(
"request_type", [asset_service.AnalyzeIamPolicyLongrunningRequest, dict,]
)
def test_analyze_iam_policy_longrunning(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy_longrunning), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.analyze_iam_policy_longrunning(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyLongrunningRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_analyze_iam_policy_longrunning_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy_longrunning), "__call__"
) as call:
client.analyze_iam_policy_longrunning()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyLongrunningRequest()
@pytest.mark.asyncio
async def test_analyze_iam_policy_longrunning_async(
transport: str = "grpc_asyncio",
request_type=asset_service.AnalyzeIamPolicyLongrunningRequest,
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy_longrunning), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.analyze_iam_policy_longrunning(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeIamPolicyLongrunningRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_analyze_iam_policy_longrunning_async_from_dict():
await test_analyze_iam_policy_longrunning_async(request_type=dict)
def test_analyze_iam_policy_longrunning_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeIamPolicyLongrunningRequest()
request.analysis_query.scope = "analysis_query.scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy_longrunning), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.analyze_iam_policy_longrunning(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"analysis_query.scope=analysis_query.scope/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_analyze_iam_policy_longrunning_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeIamPolicyLongrunningRequest()
request.analysis_query.scope = "analysis_query.scope/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.analyze_iam_policy_longrunning), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.analyze_iam_policy_longrunning(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"analysis_query.scope=analysis_query.scope/value",
) in kw["metadata"]
@pytest.mark.parametrize("request_type", [asset_service.AnalyzeMoveRequest, dict,])
def test_analyze_move(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_move), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.AnalyzeMoveResponse()
response = client.analyze_move(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeMoveRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.AnalyzeMoveResponse)
def test_analyze_move_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_move), "__call__") as call:
client.analyze_move()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeMoveRequest()
@pytest.mark.asyncio
async def test_analyze_move_async(
transport: str = "grpc_asyncio", request_type=asset_service.AnalyzeMoveRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_move), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.AnalyzeMoveResponse()
)
response = await client.analyze_move(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.AnalyzeMoveRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, asset_service.AnalyzeMoveResponse)
@pytest.mark.asyncio
async def test_analyze_move_async_from_dict():
await test_analyze_move_async(request_type=dict)
def test_analyze_move_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeMoveRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_move), "__call__") as call:
call.return_value = asset_service.AnalyzeMoveResponse()
client.analyze_move(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_analyze_move_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.AnalyzeMoveRequest()
request.resource = "resource/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.analyze_move), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.AnalyzeMoveResponse()
)
await client.analyze_move(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"]
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AssetServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AssetServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AssetServiceGrpcTransport,)
def test_asset_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_asset_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.asset_v1.services.asset_service.transports.AssetServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"export_assets",
"list_assets",
"batch_get_assets_history",
"create_feed",
"get_feed",
"list_feeds",
"update_feed",
"delete_feed",
"search_all_resources",
"search_all_iam_policies",
"analyze_iam_policy",
"analyze_iam_policy_longrunning",
"analyze_move",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_asset_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.asset_v1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_asset_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.asset_v1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport()
adc.assert_called_once()
def test_asset_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AssetServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_asset_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AssetServiceGrpcTransport, grpc_helpers),
(transports.AssetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_asset_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_asset_service_host_no_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com"
),
)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_host_with_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com:8000"
),
)
assert client.transport._host == "cloudasset.googleapis.com:8000"
def test_asset_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_asset_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_asset_service_grpc_lro_client():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_asset_service_grpc_lro_async_client():
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_access_level_path():
access_policy = "squid"
access_level = "clam"
expected = "accessPolicies/{access_policy}/accessLevels/{access_level}".format(
access_policy=access_policy, access_level=access_level,
)
actual = AssetServiceClient.access_level_path(access_policy, access_level)
assert expected == actual
def test_parse_access_level_path():
expected = {
"access_policy": "whelk",
"access_level": "octopus",
}
path = AssetServiceClient.access_level_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_access_level_path(path)
assert expected == actual
def test_access_policy_path():
access_policy = "oyster"
expected = "accessPolicies/{access_policy}".format(access_policy=access_policy,)
actual = AssetServiceClient.access_policy_path(access_policy)
assert expected == actual
def test_parse_access_policy_path():
expected = {
"access_policy": "nudibranch",
}
path = AssetServiceClient.access_policy_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_access_policy_path(path)
assert expected == actual
def test_asset_path():
expected = "*".format()
actual = AssetServiceClient.asset_path()
assert expected == actual
def test_feed_path():
project = "cuttlefish"
feed = "mussel"
expected = "projects/{project}/feeds/{feed}".format(project=project, feed=feed,)
actual = AssetServiceClient.feed_path(project, feed)
assert expected == actual
def test_parse_feed_path():
expected = {
"project": "winkle",
"feed": "nautilus",
}
path = AssetServiceClient.feed_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_feed_path(path)
assert expected == actual
def test_inventory_path():
project = "scallop"
location = "abalone"
instance = "squid"
expected = "projects/{project}/locations/{location}/instances/{instance}/inventory".format(
project=project, location=location, instance=instance,
)
actual = AssetServiceClient.inventory_path(project, location, instance)
assert expected == actual
def test_parse_inventory_path():
expected = {
"project": "clam",
"location": "whelk",
"instance": "octopus",
}
path = AssetServiceClient.inventory_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_inventory_path(path)
assert expected == actual
def test_service_perimeter_path():
access_policy = "oyster"
service_perimeter = "nudibranch"
expected = "accessPolicies/{access_policy}/servicePerimeters/{service_perimeter}".format(
access_policy=access_policy, service_perimeter=service_perimeter,
)
actual = AssetServiceClient.service_perimeter_path(access_policy, service_perimeter)
assert expected == actual
def test_parse_service_perimeter_path():
expected = {
"access_policy": "cuttlefish",
"service_perimeter": "mussel",
}
path = AssetServiceClient.service_perimeter_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_service_perimeter_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AssetServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = AssetServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder,)
actual = AssetServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = AssetServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization,)
actual = AssetServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = AssetServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project,)
actual = AssetServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = AssetServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AssetServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = AssetServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AssetServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport),
(AssetServiceAsyncClient, transports.AssetServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-asset | tests/unit/gapic/asset_v1/test_asset_service.py | Python | apache-2.0 | 162,124 | [
"Octopus"
] | e5d77f78a834d5b14b902c240b2c6bfdc83613bb948b24897b7ce0fd91fbecb0 |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metropolis-Hastings Monte Carlo.
NOTE: these functions operate on batches of MCMC configurations and should not
be vmapped.
"""
from ferminet import constants
import jax
from jax import lax
from jax import numpy as jnp
def _harmonic_mean(x, atoms):
"""Calculates the harmonic mean of each electron distance to the nuclei.
Args:
x: electron positions. Shape (batch, nelectrons, 1, ndim). Note the third
dimension is already expanded, which allows for avoiding additional
reshapes in the MH algorithm.
atoms: atom positions. Shape (natoms, ndim)
Returns:
Array of shape (batch, nelectrons, 1, 1), where the (i, j, 0, 0) element is
the harmonic mean of the distance of the j-th electron of the i-th MCMC
configuration to all atoms.
"""
ae = x - atoms[None, ...]
r_ae = jnp.linalg.norm(ae, axis=-1, keepdims=True)
return 1.0 / jnp.mean(1.0 / r_ae, axis=-2, keepdims=True)
def _log_prob_gaussian(x, mu, sigma):
"""Calculates the log probability of Gaussian with diagonal covariance.
Args:
x: Positions. Shape (batch, nelectron, 1, ndim) - as used in mh_update.
mu: means of Gaussian distribution. Same shape as or broadcastable to x.
sigma: standard deviation of the distribution. Same shape as or
broadcastable to x.
Returns:
Log probability of Gaussian distribution with shape as required for
mh_update - (batch, nelectron, 1, 1).
"""
numer = jnp.sum(-0.5 * ((x - mu)**2) / (sigma**2), axis=[1, 2, 3])
denom = x.shape[-1] * jnp.sum(jnp.log(sigma), axis=[1, 2, 3])
return numer - denom
def mh_update(params,
f,
x1,
key,
lp_1,
num_accepts,
stddev=0.02,
atoms=None,
i=0):
"""Performs one Metropolis-Hastings step using an all-electron move.
Args:
params: Wavefuncttion parameters.
f: Callable with signature f(params, x) which returns the log of the
wavefunction (i.e. the sqaure root of the log probability of x).
x1: Initial MCMC configurations. Shape (batch, nelectrons*ndim).
key: RNG state.
lp_1: log probability of f evaluated at x1 given parameters params.
num_accepts: Number of MH move proposals accepted.
stddev: width of Gaussian move proposal.
atoms: If not None, atom positions. Shape (natoms, 3). If present, then the
Metropolis-Hastings move proposals are drawn from a Gaussian distribution,
N(0, (h_i stddev)^2), where h_i is the harmonic mean of distances between
the i-th electron and the atoms, otherwise the move proposal drawn from
N(0, stddev^2).
i: Ignored.
Returns:
(x, key, lp, num_accepts), where:
x: Updated MCMC configurations.
key: RNG state.
lp: log probability of f evaluated at x.
num_accepts: update running total of number of accepted MH moves.
"""
del i # electron index ignored for all-electron moves
key, subkey = jax.random.split(key)
if atoms is None: # symmetric proposal, same stddev everywhere
x2 = x1 + stddev * jax.random.normal(subkey, shape=x1.shape) # proposal
lp_2 = 2. * f(params, x2) # log prob of proposal
ratio = lp_2 - lp_1
else: # asymmetric proposal, stddev propto harmonic mean of nuclear distances
n = x1.shape[0]
x1 = jnp.reshape(x1, [n, -1, 1, 3])
hmean1 = _harmonic_mean(x1, atoms) # harmonic mean of distances to nuclei
x2 = x1 + stddev * hmean1 * jax.random.normal(subkey, shape=x1.shape)
lp_2 = 2. * f(params, x2) # log prob of proposal
hmean2 = _harmonic_mean(x2, atoms) # needed for probability of reverse jump
lq_1 = _log_prob_gaussian(x1, x2, stddev * hmean1) # forward probability
lq_2 = _log_prob_gaussian(x2, x1, stddev * hmean2) # reverse probability
ratio = lp_2 + lq_2 - lp_1 - lq_1
x1 = jnp.reshape(x1, [n, -1])
x2 = jnp.reshape(x2, [n, -1])
key, subkey = jax.random.split(key)
rnd = jnp.log(jax.random.uniform(subkey, shape=lp_1.shape))
cond = ratio > rnd
x_new = jnp.where(cond[..., None], x2, x1)
lp_new = jnp.where(cond, lp_2, lp_1)
num_accepts += jnp.sum(cond)
return x_new, key, lp_new, num_accepts
def mh_one_electron_update(params,
f,
x1,
key,
lp_1,
num_accepts,
stddev=0.02,
atoms=None,
i=0):
"""Performs one Metropolis-Hastings step for a single electron.
Args:
params: Wavefuncttion parameters.
f: Callable with signature f(params, x) which returns the log of the
wavefunction (i.e. the sqaure root of the log probability of x).
x1: Initial MCMC configurations. Shape (batch, nelectrons*ndim).
key: RNG state.
lp_1: log probability of f evaluated at x1 given parameters params.
num_accepts: Number of MH move proposals accepted.
stddev: width of Gaussian move proposal.
atoms: Ignored. Asymmetric move proposals are not implemented for
single-electron moves.
i: index of electron to move.
Returns:
(x, key, lp, num_accepts), where:
x: Updated MCMC configurations.
key: RNG state.
lp: log probability of f evaluated at x.
num_accepts: update running total of number of accepted MH moves.
Raises:
NotImplementedError: if atoms is supplied.
"""
key, subkey = jax.random.split(key)
n = x1.shape[0]
x1 = jnp.reshape(x1, [n, -1, 1, 3])
nelec = x1.shape[1]
ii = i % nelec
if atoms is None: # symmetric proposal, same stddev everywhere
x2 = x1.at[:, ii].add(stddev *
jax.random.normal(subkey, shape=x1[:, ii].shape))
lp_2 = 2. * f(params, x2) # log prob of proposal
ratio = lp_2 - lp_1
else: # asymmetric proposal, stddev propto harmonic mean of nuclear distances
raise NotImplementedError('Still need to work out reverse probabilities '
'for asymmetric moves.')
x1 = jnp.reshape(x1, [n, -1])
x2 = jnp.reshape(x2, [n, -1])
key, subkey = jax.random.split(key)
rnd = jnp.log(jax.random.uniform(subkey, shape=lp_1.shape))
cond = ratio > rnd
x_new = jnp.where(cond[..., None], x2, x1)
lp_new = jnp.where(cond, lp_2, lp_1)
num_accepts += jnp.sum(cond)
return x_new, key, lp_new, num_accepts
def make_mcmc_step(batch_network,
batch_per_device,
steps=10,
atoms=None,
one_electron_moves=False):
"""Creates the MCMC step function.
Args:
batch_network: function, signature (params, x), which evaluates the log of
the wavefunction (square root of the log probability distribution) at x
given params. Inputs and outputs are batched.
batch_per_device: Batch size per device.
steps: Number of MCMC moves to attempt in a single call to the MCMC step
function.
atoms: atom positions. If given, an asymmetric move proposal is used based
on the harmonic mean of electron-atom distances for each electron.
Otherwise the (conventional) normal distribution is used.
one_electron_moves: If true, attempt to move one electron at a time.
Otherwise, attempt one all-electron move per MCMC step.
Returns:
Callable which performs the set of MCMC steps.
"""
inner_fun = mh_one_electron_update if one_electron_moves else mh_update
@jax.jit
def mcmc_step(params, data, key, width):
"""Performs a set of MCMC steps.
Args:
params: parameters to pass to the network.
data: (batched) MCMC configurations to pass to the network.
key: RNG state.
width: standard deviation to use in the move proposal.
Returns:
(data, pmove), where data is the updated MCMC configurations, key the
updated RNG state and pmove the average probability a move was accepted.
"""
def step_fn(i, x):
return inner_fun(
params, batch_network, *x, stddev=width, atoms=atoms, i=i)
nelec = data.shape[-1] // 3
nsteps = nelec * steps if one_electron_moves else steps
logprob = 2. * batch_network(params, data)
data, key, _, num_accepts = lax.fori_loop(0, nsteps, step_fn,
(data, key, logprob, 0.))
pmove = jnp.sum(num_accepts) / (nsteps * batch_per_device)
pmove = constants.pmean(pmove)
return data, pmove
return mcmc_step
| deepmind/ferminet | ferminet/mcmc.py | Python | apache-2.0 | 9,058 | [
"Gaussian"
] | ff991c5625a806d84d3583e44af73b0646c6b231cbc788646d67d554a3783307 |
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file position.py
A few adjustments to the Position classes at the Python layer.
"""
from . import _galsim
def Position_repr(self):
return self.__class__.__name__+"(x="+str(self.x)+", y="+str(self.y)+")"
def Position_str(self):
return "("+str(self.x)+", "+str(self.y)+")"
def Position_getinitargs(self):
return self.x, self.y
for Class in (_galsim.PositionD, _galsim.PositionI):
Class.__repr__ = Position_repr
Class.__str__ = Position_str
Class.__getinitargs__ = Position_getinitargs
Class.__doc__ = """A class for representing 2D positions on the plane.
PositionD describes positions with floating point values in `x` and `y`.
PositionI described positions with integer values in `x` and `y`.
Initialization
--------------
For the float-valued position class, example inits include:
>>> pos = galsim.PositionD(x=0.5, y=-0.5)
>>> pos = galsim.PositionD(0.5, -0.5)
And for the integer-valued position class, example inits include:
>>> pos = galsim.PositionI(x=45, y=13)
>>> pos = galsim.PositionI(45, 13)
Attributes
----------
For an instance `pos` as instantiated above, `pos.x` and `pos.y` store the x and y values of the
position.
Arithmetic
----------
Most arithmetic that makes sense for a position is allowed:
>>> pos1 + pos2
>>> pos1 - pos2
>>> pos * x
>>> pos / x
>>> -pos
>>> pos1 += pos2
>>> pos1 -= pos2
>>> pos *= x
>>> pos -= x
Note though that the types generally need to match. For example, you cannot multiply
a PositionI by a float or add a PositionI to a PositionD.
"""
del Class # cleanup public namespace
| mardom/GalSim | galsim/position.py | Python | gpl-3.0 | 2,525 | [
"Galaxy"
] | 300c094094736480594b75712fd9a20ae0ad68842ecc54d5eede26b91fbe3d04 |
"""
See more here: http://www.pymolwiki.org/index.php/center_of_mass
DESCRIPTION
Places a pseudoatom at the center of mass
Author: Sean Law
Michigan State University
slaw (at) msu . edu
SEE ALSO
pseudoatom, get_com
"""
from __future__ import print_function
from pymol import cmd
def com(selection, state=None, mass=None, object=None, quiet=0, **kwargs):
quiet = int(quiet)
if (object == None):
try:
object = cmd.get_legal_name(selection)
object = cmd.get_unused_name(object + "_COM", 0)
except AttributeError:
object = 'COM'
cmd.delete(object)
if (state != None):
x, y, z = get_com(selection, mass=mass, quiet=quiet)
if not quiet:
print("[%f %f %f]" % (x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], **kwargs)
cmd.show("spheres", object)
else:
for i in range(cmd.count_states()):
x, y, z = get_com(selection, mass=mass, state=i + 1, quiet=quiet)
if not quiet:
# print("State %d:%f %f %f" % (i + 1, x, y, z))
print("[%f, %f, %f]" % (i + 1, x, y, z))
cmd.pseudoatom(object, pos=[x, y, z], state=i + 1, **kwargs)
cmd.show("spheres", 'last ' + object)
cmd.extend("com", com)
def get_com(selection, state=1, mass=None, quiet=1):
"""
DESCRIPTION
Calculates the center of mass
Author: Sean Law
Michigan State University
slaw (at) msu . edu
"""
quiet = int(quiet)
totmass = 0.0
if mass != None and not quiet:
print("Calculating mass-weighted COM")
state = int(state)
model = cmd.get_model(selection, state)
x, y, z = 0, 0, 0
for a in model.atom:
if (mass != None):
m = a.get_mass()
x += a.coord[0] * m
y += a.coord[1] * m
z += a.coord[2] * m
totmass += m
else:
x += a.coord[0]
y += a.coord[1]
z += a.coord[2]
if (mass != None):
return [x / totmass, y / totmass, z / totmass]
else:
return [x / len(model.atom), y / len(model.atom), z / len(model.atom)]
cmd.extend("get_com", get_com)
# vi:expandtab:sw=3 | joy13975/elfin | pymol_scripts/extensions/deprecated/center_of_mass.py | Python | mit | 2,243 | [
"PyMOL"
] | e00f46e3ae34b3bbc73cd6699c5dacc2234deaa223d50d55e279c9081ed6a07b |
#
# basic run through the PDB with dynamically-sized labels
#
from glob import glob
import threading
import time
from pymol import cmd
import sys, os, os.path
import traceback
ent_dir = "pdb"
def load():
cmd.set("valence")
r = 0
list = glob("pdb/*/*")
# while list[0]!="pdb/f8/pdb1f8u":
# list.pop(0)
cmd.set("label_size",-1.0)
for file in list:
try:
cmd.delete('pdb')
cmd.load(file,'pdb')
cmd.set_title('pdb',1,os.path.split(file)[-1])
cmd.rewind()
cmd.orient('pdb')
cmd.label("polymer and (name ca or elem P)","'//%s/%s/%s`%s/%s'%(segi,chain,resn,resi,name)")
cmd.refresh()
sys.__stderr__.write(".")
sys.__stderr__.flush()
n = cmd.count_states()
if n>1:
cmd.rewind()
sys.__stderr__.write(file+"\n")
sys.__stderr__.flush()
for a in range(1,n+1):
cmd.forward()
cmd.refresh()
except:
traceback.print_exc()
cmd.feedback('disable','symmetry objectmolecule executive','everything')
load()
| gratefulfrog/lib | python/pymol/pymol_path/test/inp/B09.py | Python | gpl-2.0 | 1,119 | [
"PyMOL"
] | 87789fdab649e2097a47df15be2ef5d6f583287b3bf5de0e4723011efa010abb |
"""Github setting 'Protect this branch' was disabled for a repo."""
from streamalert.shared.rule import rule
@rule(logs=['ghe:general'])
def github_disable_protect_this_branch(rec):
"""
author: @mimeframe
description: Github setting 'Protect this branch' was disabled for a repo.
When unchecking this top-level option, it also disables
'Require pull request reviews before merging',
'Require review from Code Owners', and all other branch protections
like status checks.
repro_steps: (a) Visit /<org>/<repo>/settings/branches/<branch>
(b) Uncheck 'Protect this branch'
(c) Click 'Save Changes'
reference: https://help.github.com/articles/configuring-protected-branches/
"""
return rec['action'] == 'protected_branch.destroy'
| airbnb/streamalert | rules/community/github/github_disable_protect_this_branch.py | Python | apache-2.0 | 874 | [
"VisIt"
] | d17049a11a5be547953cf4ce3cb978bb311f24b207ee655e8d8f27e39d0c8d40 |
import numpy as np
from ase import Atoms
from ase.calculators.emt import EMT
from ase.constraints import FixAtoms, FixBondLength
from ase.db import connect
from ase.io import read
from ase.structure import molecule
from ase.test import must_raise
for name in ['y2.json', 'y2.db']:
c = connect(name)
print(name, c)
id = c.reserve(abc=7)
c.delete([d.id for d in c.select(abc=7)])
id = c.reserve(abc=7)
assert c[id].abc == 7
a = c.get_atoms(id)
c.write(Atoms())
ch4 = molecule('CH4', calculator=EMT())
ch4.constraints = [FixAtoms(indices=[1]),
FixBondLength(0, 2)]
f1 = ch4.get_forces()
print(f1)
c.delete([d.id for d in c.select(C=1)])
chi = np.array([1 + 0.5j, 0.5])
id = c.write(ch4, data={'1-butyne': 'bla-bla', 'chi': chi})
row = c.get(id)
print(row.data['1-butyne'], row.data.chi)
assert (row.data.chi == chi).all()
assert len(c.get_atoms(C=1).constraints) == 2
f2 = c.get(C=1).forces
assert abs(f2.sum(0)).max() < 1e-14
f3 = c.get_atoms(C=1).get_forces()
assert abs(f1 - f3).max() < 1e-14
a = read(name + '@' + str(id))
f4 = a.get_forces()
assert abs(f1 - f4).max() < 1e-14
with must_raise(ValueError):
c.update(id, abc={'a': 42})
c.update(id, grr='hmm')
row = c.get(C=1)
assert row.id == id
assert (row.data.chi == chi).all()
with must_raise(ValueError):
c.write(ch4, foo=['bar', 2])
| suttond/MODOI | ase/test/db2.py | Python | lgpl-3.0 | 1,480 | [
"ASE"
] | 3d5ffa7d0ab76fb5cf8f38ef6651e52b4560e441ffc4ea6fadaa413fb283f39c |
"""
An example showing the Julia set displayed as a z-warped surface.
The Julia set is a fractal (see http://en.wikipedia.org/wiki/Julia_set
). We display it here in a canyon-like view using mlab's surf function:
:func:`mayavi.mlab.surf`.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
from mayavi import mlab
import numpy as np
# Calculate the Julia set on a grid
x, y = np.ogrid[-1.5:0.5:500j, -1:1:500j]
z = x + 1j * y
julia = np.zeros(z.shape)
for i in range(50):
z = z ** 2 - 0.70176 - 0.3842j
julia += 1 / float(2 + i) * (z * np.conj(z) > 4)
# Display it
mlab.figure(size=(400, 300))
mlab.surf(julia, colormap='gist_earth', warp_scale='auto', vmax=1.5)
# A view into the "Canyon"
mlab.view(65, 27, 322, [30., -13.7, 136])
mlab.show()
| dmsurti/mayavi | examples/mayavi/mlab/julia_set.py | Python | bsd-3-clause | 832 | [
"Mayavi"
] | c186bf89cba8e172563ae33882c20ba15a0c684b4343d67f7fded8a4a5539e62 |
import itertools as it
import difflib
class Order:
def order_for(self, stage, elements):
self.elements = elements
return stage.visit(self)
#def case_present_past_future(self, stage):
# pass
#def case_seasons_of_year(self, stage):
# pass
def case_days_of_week(self, stage):
parts = self.elements
monday_first = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
sunday_first = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
if parts == monday_first:
return 'monday_first'
elif parts == sunday_first:
return 'sunday_first'
else:
return 'not_ordered'
def case_parts_of_day(self, stage):
parts = it.cycle(self.elements)
parts = it.dropwhile(lambda x: x != 'morning', parts)
morning = next(parts)
assert(morning == "morning")
after_morning = next(parts)
if after_morning == 'afternoon':
return 'counterclockwise'
else:
return 'clockwise'
def case_timeline(self, stage):
if stage.rotation() < 90 or stage.rotation() > 270:
return 'left_right'
else:
return 'right_left'
@staticmethod
def matching_score(shown, selected):
assert(len(shown) == len(selected))
matcher = difflib.SequenceMatcher()
matcher.set_seqs(shown, selected)
return matcher.ratio()
| alepulver/my-thesis | results-tables/aggregators/order.py | Python | mit | 1,510 | [
"VisIt"
] | 0c175f0ae68e67601c986b940cc9b85dd69857f76f879541a4f37d3c0dc69d80 |
#! /usr/bin/env python3
#
# Affiche un fichier VRML 2 + capture
# RoBo - mai 2006
#
import vtk
import sys
#filename = "brain_bis_Rempli.wrl"
if len(sys.argv)!=2:
print('usage: ShowVRML.py file.wrl')
sys.exit()
filename = sys.argv[1]
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
importer = vtk.vtkVRMLImporter()
importer.SetRenderWindow(renWin)
importer.SetFileName(filename)
importer.Read()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
importer.GetRenderer().SetBackground(0.1, 0.2, 0.4)
importer.GetRenderWindow().SetSize(600, 600)
renWin.Render()
# capture png (x2)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(2)
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(renderLarge.GetOutputPort())
writer.SetFileName(sys.argv[1]+".png")
writer.Write()
#print ren
iren.Start()
| rboman/progs | metafor/biomec/vrml/ShowVRML.py | Python | apache-2.0 | 902 | [
"VTK"
] | faa41a8b9718e7ca07b6a3b2a1f8f92352cd7b3c02565acedbbf17848ebb38b5 |
#!/usr/bin/env python
import pysam
| yunlongliukm/chm1_scripts | MeasureeMapQV.py | Python | mit | 37 | [
"pysam"
] | 596be566692c78557c7b56bb56c7f1637c30e7b2d49af1b55492b342e53af522 |
# Copyright (C) 2013 by Ben Morris (ben@bendmorris.com)
# Based on Bio.Nexus, copyright 2005-2008 by Frank Kauff & Cymon J. Cox
# and Bio.Phylo.Newick, copyright 2009 by Eric Talevich.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""I/O function wrappers for the RDF/CDAO file format.
This is an RDF format that conforms to the Comparative Data Analysis Ontology (CDAO).
See: http://www.evolutionaryontology.org/cdao
This module requires the librdf Python bindings (http://www.librdf.org)
The CDAOIO.Parser, in addition to parsing text files, can also parse directly
from a triple store that implements the Redland storage interface; similarly,
the CDAOIO.Writer can store triples in a triple store instead of serializing
them to a file.
"""
__docformat__ = "restructuredtext en"
from Bio._py3k import StringIO
from Bio.Phylo import CDAO
from ._cdao_owl import cdao_elements, cdao_namespaces, resolve_uri
import os
class CDAOError(Exception):
"""Exception raised when CDAO object construction cannot continue."""
pass
try:
import rdflib
rdfver = rdflib.__version__
if rdfver[0] in ["1", "2"] or (rdfver in ["3.0.0", "3.1.0", "3.2.0"]):
raise CDAOError(
'Support for CDAO tree format requires RDFlib v3.2.1 or later.')
except ImportError:
raise CDAOError('Support for CDAO tree format requires RDFlib.')
RDF_NAMESPACES = {
'owl': 'http://www.w3.org/2002/07/owl#',
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
}
RDF_NAMESPACES.update(cdao_namespaces)
# pad node ids with zeroes until they're at least this length
ZEROES = 8
def qUri(x):
return resolve_uri(x, namespaces=RDF_NAMESPACES)
def format_label(x):
return x.replace('_', ' ')
# ---------------------------------------------------------
# Public API
def parse(handle, **kwargs):
"""Iterate over the trees in a CDAO file handle.
:returns: generator of Bio.Phylo.CDAO.Tree objects.
"""
return Parser(handle).parse(**kwargs)
def write(trees, handle, plain=False, **kwargs):
"""Write a trees in CDAO format to the given file handle.
:returns: number of trees written.
"""
return Writer(trees).write(handle, plain=plain, **kwargs)
# ---------------------------------------------------------
# Input
class Parser(object):
"""Parse a CDAO tree given a file handle."""
def __init__(self, handle=None):
self.handle = handle
self.graph = None
self.node_info = None
self.children = {}
self.rooted = False
@classmethod
def from_string(cls, treetext):
handle = StringIO(treetext)
return cls(handle)
def parse(self, **kwargs):
"""Parse the text stream this object was initialized with."""
self.parse_handle_to_graph(**kwargs)
return self.parse_graph()
def parse_handle_to_graph(self, rooted=False,
parse_format='turtle', context=None, **kwargs):
"""Parse self.handle into RDF model self.model."""
if self.graph is None:
self.graph = rdflib.Graph()
graph = self.graph
for k, v in RDF_NAMESPACES.items():
graph.bind(k, v)
self.rooted = rooted
if 'base_uri' in kwargs:
base_uri = kwargs['base_uri']
else:
base_uri = "file://" + os.path.abspath(self.handle.name)
graph.parse(file=self.handle, publicID=base_uri, format=parse_format)
return self.parse_graph(graph, context=context)
def parse_graph(self, graph=None, context=None):
"""Generator that yields CDAO.Tree instances from an RDF model."""
if graph is None:
graph = self.graph
# look up branch lengths/TUs for all nodes
self.get_node_info(graph, context=context)
for root_node in self.tree_roots:
clade = self.parse_children(root_node)
yield CDAO.Tree(root=clade, rooted=self.rooted)
def new_clade(self, node):
"""Returns a CDAO.Clade object for a given named node."""
result = self.node_info[node]
kwargs = {}
if 'branch_length' in result:
kwargs['branch_length'] = result['branch_length']
if 'label' in result:
kwargs['name'] = result['label'].replace('_', ' ')
if 'confidence' in result:
kwargs['confidence'] = result['confidence']
clade = CDAO.Clade(**kwargs)
return clade
def get_node_info(self, graph, context=None):
"""Creates a dictionary containing information about all nodes in the tree."""
self.node_info = {}
self.obj_info = {}
self.children = {}
self.nodes = set()
self.tree_roots = set()
assignments = {
qUri('cdao:has_Parent'): 'parent',
qUri('cdao:belongs_to_Edge_as_Child'): 'edge',
qUri('cdao:has_Annotation'): 'annotation',
qUri('cdao:has_Value'): 'value',
qUri('cdao:represents_TU'): 'tu',
qUri('rdfs:label'): 'label',
qUri('cdao:has_Support_Value'): 'confidence',
}
for s, v, o in graph:
# process each RDF triple in the graph sequentially
s, v, o = str(s), str(v), str(o)
if s not in self.obj_info:
self.obj_info[s] = {}
this = self.obj_info[s]
try:
# if the predicate is one we care about, store information for
# later
this[assignments[v]] = o
except KeyError:
pass
if v == qUri('rdf:type'):
if o in (qUri('cdao:AncestralNode'), qUri('cdao:TerminalNode')):
# this is a tree node; store it in set of all nodes
self.nodes.add(s)
if v == qUri('cdao:has_Root'):
# this is a tree; store its root in set of all tree roots
self.tree_roots.add(o)
for node in self.nodes:
# for each node, look up all information needed to create a
# CDAO.Clade
self.node_info[node] = {}
node_info = self.node_info[node]
obj = self.obj_info[node]
if 'edge' in obj:
# if this object points to an edge, we need a branch length from
# the annotation on that edge
edge = self.obj_info[obj['edge']]
if 'annotation' in edge:
annotation = self.obj_info[edge['annotation']]
if 'value' in annotation:
node_info['branch_length'] = float(annotation['value'])
if 'tu' in obj:
# if this object points to a TU, we need the label of that TU
tu = self.obj_info[obj['tu']]
if 'label' in tu:
node_info['label'] = tu['label']
if 'parent' in obj:
# store this node as a child of its parent, if it has one,
# so that the tree can be traversed from parent to children
parent = obj['parent']
if parent not in self.children:
self.children[parent] = []
self.children[parent].append(node)
def parse_children(self, node):
"""Traverse the tree to create a nested clade structure.
Return a CDAO.Clade, and calls itself recursively for each child,
traversing the entire tree and creating a nested structure of CDAO.Clade
objects.
"""
clade = self.new_clade(node)
children = self.children[node] if node in self.children else []
clade.clades = [
self.parse_children(child_node) for child_node in children]
return clade
# ---------------------------------------------------------
# Output
class Writer(object):
"""Based on the writer in Bio.Nexus.Trees (str, to_string)."""
prefixes = RDF_NAMESPACES
def __init__(self, trees):
self.trees = trees
self.node_counter = 0
self.edge_counter = 0
self.tu_counter = 0
self.tree_counter = 0
def write(self, handle, tree_uri='', record_complete_ancestry=False,
rooted=False, **kwargs):
"""Write this instance's trees to a file handle."""
self.rooted = rooted
self.record_complete_ancestry = record_complete_ancestry
if tree_uri and not tree_uri.endswith('/'):
tree_uri += '/'
trees = self.trees
if tree_uri:
handle.write('@base <%s>\n' % tree_uri)
for k, v in self.prefixes.items():
handle.write('@prefix %s: <%s> .\n' % (k, v))
handle.write('<%s> a owl:Ontology .\n' % self.prefixes['cdao'])
for tree in trees:
self.tree_counter += 1
self.tree_uri = 'tree%s'
first_clade = tree.clade
statements = self.process_clade(first_clade, root=tree)
for stmt in statements:
self.add_stmt_to_handle(handle, stmt)
def add_stmt_to_handle(self, handle, stmt):
# apply URI prefixes
stmt_strings = []
for n, part in enumerate(stmt):
if isinstance(part, rdflib.URIRef):
node_uri = str(part)
changed = False
for prefix, uri in self.prefixes.items():
if node_uri.startswith(uri):
node_uri = node_uri.replace(uri, '%s:' % prefix, 1)
if node_uri == 'rdf:type':
node_uri = 'a'
changed = True
if changed or ':' in node_uri:
stmt_strings.append(node_uri)
else:
stmt_strings.append('<%s>' % node_uri)
elif isinstance(part, rdflib.Literal):
stmt_strings.append(part.n3())
else:
stmt_strings.append(str(part))
handle.write('%s .\n' % ' '.join(stmt_strings))
def process_clade(self, clade, parent=None, root=False):
"""recursively generate triples describing a tree of clades"""
self.node_counter += 1
clade.uri = 'node%s' % str(self.node_counter).zfill(ZEROES)
if parent:
clade.ancestors = parent.ancestors + [parent.uri]
else:
clade.ancestors = []
nUri = lambda s: rdflib.URIRef(s)
pUri = lambda s: rdflib.URIRef(qUri(s))
tree_id = nUri('')
statements = []
if root is not False:
# create a cdao:RootedTree with reference to the tree root
tree_type = pUri('cdao:RootedTree') if self.rooted else pUri(
'cdao:UnrootedTree')
statements += [
(tree_id, pUri('rdf:type'), tree_type),
(tree_id, pUri('cdao:has_Root'), nUri(clade.uri)),
]
try:
tree_attributes = root.attributes
except AttributeError:
tree_attributes = []
for predicate, obj in tree_attributes:
statements.append((tree_id, predicate, obj))
if clade.name:
# create TU
self.tu_counter += 1
tu_uri = 'tu%s' % str(self.tu_counter).zfill(ZEROES)
statements += [
(nUri(tu_uri), pUri('rdf:type'), pUri('cdao:TU')),
(nUri(clade.uri), pUri(
'cdao:represents_TU'), nUri(tu_uri)),
(nUri(tu_uri), pUri('rdfs:label'),
rdflib.Literal(format_label(clade.name))),
]
try:
tu_attributes = clade.tu_attributes
except AttributeError:
tu_attributes = []
for predicate, obj in tu_attributes:
yield (nUri(tu_uri), predicate, obj)
# create this node
node_type = 'cdao:TerminalNode' if clade.is_terminal(
) else 'cdao:AncestralNode'
statements += [
(nUri(clade.uri), pUri('rdf:type'), pUri(node_type)),
(nUri(clade.uri), pUri(
'cdao:belongs_to_Tree'), tree_id),
]
if parent is not None:
# create edge from the parent node to this node
self.edge_counter += 1
edge_uri = 'edge%s' % str(self.edge_counter).zfill(ZEROES)
statements += [
(nUri(edge_uri), pUri('rdf:type'), pUri('cdao:DirectedEdge')),
(nUri(edge_uri), pUri(
'cdao:belongs_to_Tree'), tree_id),
(nUri(edge_uri), pUri('cdao:has_Parent_Node'),
nUri(parent.uri)),
(nUri(edge_uri), pUri('cdao:has_Child_Node'),
nUri(clade.uri)),
(nUri(clade.uri), pUri(
'cdao:belongs_to_Edge_as_Child'), nUri(edge_uri)),
(nUri(clade.uri), pUri('cdao:has_Parent'),
nUri(parent.uri)),
(nUri(parent.uri), pUri(
'cdao:belongs_to_Edge_as_Parent'), nUri(edge_uri)),
]
if hasattr(clade, 'confidence') and clade.confidence is not None:
confidence = rdflib.Literal(
clade.confidence, datatype='http://www.w3.org/2001/XMLSchema#decimal')
statements += [(nUri(clade.uri),
pUri('cdao:has_Support_Value'), confidence)]
if self.record_complete_ancestry and len(clade.ancestors) > 0:
statements += [(nUri(clade.uri), pUri('cdao:has_Ancestor'), nUri(ancestor))
for ancestor in clade.ancestors]
if clade.branch_length is not None:
# add branch length
edge_ann_uri = 'edge_annotation%s' % str(
self.edge_counter).zfill(ZEROES)
branch_length = rdflib.Literal(clade.branch_length, datatype=rdflib.URIRef(
'http://www.w3.org/2001/XMLSchema#decimal'))
statements += [
(nUri(edge_ann_uri), pUri('rdf:type'),
pUri('cdao:EdgeLength')),
(nUri(edge_uri), pUri('cdao:has_Annotation'),
nUri(edge_ann_uri)),
(nUri(edge_ann_uri),
pUri('cdao:has_Value'), branch_length),
]
try:
edge_attributes = clade.edge_attributes
except AttributeError:
edge_attributes = []
for predicate, obj in edge_attributes:
yield (nUri(edge_uri), predicate, obj)
for stmt in statements:
yield stmt
try:
clade_attributes = clade.attributes
except AttributeError:
clade_attributes = []
for predicate, obj in clade_attributes:
yield (nUri(clade.uri), predicate, obj)
if not clade.is_terminal():
for new_clade in clade.clades:
for stmt in self.process_clade(new_clade, parent=clade, root=False):
yield stmt
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/Phylo/CDAOIO.py | Python | gpl-2.0 | 15,371 | [
"Biopython"
] | 04da70a981b0b060c17936326fde1a97884ea03863628e6f20cfc23e9074e84f |
# Orca
#
# Copyright 2010 Joanmarie Diggs.
# Copyright 2014-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs." \
"Copyright (c) 2014-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
import urllib
from orca import debug
from orca import input_event
from orca import orca
from orca import orca_state
from orca import script_utilities
from orca import settings
from orca import settings_manager
_settingsManager = settings_manager.getManager()
class Utilities(script_utilities.Utilities):
def __init__(self, script):
super().__init__(script)
self._currentAttrs = {}
self._caretContexts = {}
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._inferredLabels = {}
self._text = {}
self._tag = {}
self._treatAsDiv = {}
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
def _cleanupContexts(self):
toRemove = []
for key, [obj, offset] in self._caretContexts.items():
if self.isZombie(obj):
toRemove.append(key)
for key in toRemove:
self._caretContexts.pop(key, None)
def clearCachedObjects(self):
debug.println(debug.LEVEL_INFO, "WEB: cleaning up cached objects")
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._inferredLabels = {}
self._tag = {}
self._treatAsDiv = {}
self._cleanupContexts()
def clearContentCache(self):
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
self._currentAttrs = {}
self._text = {}
def inDocumentContent(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
rv = self._inDocumentContent.get(hash(obj))
if rv is not None:
return rv
document = self.getDocumentForObject(obj)
rv = document is not None
self._inDocumentContent[hash(obj)] = rv
return rv
def getDocumentForObject(self, obj):
if not obj:
return None
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB, pyatspi.ROLE_EMBEDDED]
def isDocument(x):
try:
return x and x.getRole() in roles
except:
msg = "WEB: Exception getting role for %s" % x
debug.println(debug.LEVEL_INFO, msg)
return False
if isDocument(obj):
return obj
return pyatspi.findAncestor(obj, isDocument)
def _getDocumentsEmbeddedBy(self, frame):
isEmbeds = lambda r: r.getRelationType() == pyatspi.RELATION_EMBEDS
relations = list(filter(isEmbeds, frame.getRelationSet()))
if not relations:
return []
relation = relations[0]
targets = [relation.getTarget(i) for i in range(relation.getNTargets())]
if not targets:
return []
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
isDocument = lambda x: x and x.getRole() in roles
return list(filter(isDocument, targets))
def documentFrame(self, obj=None):
isShowing = lambda x: x and x.getState().contains(pyatspi.STATE_SHOWING)
try:
windows = [child for child in self._script.app]
except:
msg = "WEB: Exception getting children for app %s" % self._script.app
debug.println(debug.LEVEL_INFO, msg)
windows = []
if orca_state.activeWindow in windows:
windows = [orca_state.activeWindow]
for window in windows:
documents = self._getDocumentsEmbeddedBy(window)
documents = list(filter(isShowing, documents))
if len(documents) == 1:
return documents[0]
return self.getDocumentForObject(obj or orca_state.locusOfFocus)
def documentFrameURI(self):
documentFrame = self.documentFrame()
if documentFrame and not self.isZombie(documentFrame):
document = documentFrame.queryDocument()
return document.getAttributeValue('DocURL')
return None
def setCaretPosition(self, obj, offset):
if self._script.flatReviewContext:
self._script.toggleFlatReviewMode()
obj, offset = self.findFirstCaretContext(obj, offset)
self.setCaretContext(obj, offset, documentFrame=None)
if self._script.focusModeIsSticky():
return
try:
state = obj.getState()
except:
msg = "WEB: Exception getting state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
orca.setLocusOfFocus(None, obj, notifyScript=False)
if state.contains(pyatspi.STATE_FOCUSABLE):
try:
obj.queryComponent().grabFocus()
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return
except:
msg = "WEB: Exception grabbing focus on %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
text = self.queryNonEmptyText(obj)
if text:
text.setCaretOffset(offset)
if self._script.useFocusMode(obj) != self._script.inFocusMode():
self._script.togglePresentationMode(None)
obj.clearCache()
# TODO - JD: This is private.
self._script._saveFocusedObjectInfo(obj)
def getNextObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_TO:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) > offset:
return child
if obj and obj.childCount:
return obj[0]
nextObj = None
while obj and not nextObj:
index = obj.getIndexInParent() + 1
if 0 < index < obj.parent.childCount:
nextObj = obj.parent[index]
elif obj.parent != documentFrame:
obj = obj.parent
else:
break
return nextObj
def getPreviousObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_FROM:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) < offset:
return child
index = obj.getIndexInParent() - 1
if not 0 <= index < obj.parent.childCount:
obj = obj.parent
index = obj.getIndexInParent() - 1
previousObj = obj.parent[index]
while previousObj and previousObj.childCount:
previousObj = previousObj[previousObj.childCount - 1]
return previousObj
def getTopOfFile(self):
return self.findFirstCaretContext(self.documentFrame(), 0)
def getBottomOfFile(self):
obj = self.getLastObjectInDocument(self.documentFrame())
offset = 0
text = self.queryNonEmptyText(obj)
if text:
offset = text.characterCount - 1
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not lastobj:
break
obj, offset = lastobj, lastoffset
return [obj, offset]
def getLastObjectInDocument(self, documentFrame):
try:
lastChild = documentFrame[documentFrame.childCount - 1]
except:
lastChild = documentFrame
while lastChild:
lastObj = self.getNextObjectInDocument(lastChild, documentFrame)
if lastObj and lastObj != lastChild:
lastChild = lastObj
else:
break
return lastChild
def _getTag(self, obj):
rv = self._tag.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return None
rv = attrs.get('tag')
self._tag[hash(obj)] = rv
return rv
def inFindToolbar(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
if obj and obj.parent \
and obj.parent.getRole() == pyatspi.ROLE_AUTOCOMPLETE:
return False
return super().inFindToolbar(obj)
def isEmpty(self, obj):
if not self.isTextBlockElement(obj):
return False
return self.queryNonEmptyText(obj, False) is None
def isHidden(self, obj):
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
return attrs.get('hidden', False)
def isTextArea(self, obj):
if self.isLink(obj):
return False
return super().isTextArea(obj)
def isReadOnlyTextArea(self, obj):
# NOTE: This method is deliberately more conservative than isTextArea.
if obj.getRole() != pyatspi.ROLE_ENTRY:
return False
state = obj.getState()
readOnly = state.contains(pyatspi.STATE_FOCUSABLE) \
and not state.contains(pyatspi.STATE_EDITABLE)
return readOnly
def setCaretOffset(self, obj, characterOffset):
self.setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
def nextContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
nextobj, nextoffset = self.findNextCaretInOrder(obj, offset)
if (obj, offset) == (nextobj, nextoffset):
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
if skipSpace:
text = self.queryNonEmptyText(nextobj)
while text and text.getText(nextoffset, nextoffset + 1).isspace():
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
text = self.queryNonEmptyText(nextobj)
return nextobj, nextoffset
def previousContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
prevobj, prevoffset = self.findPreviousCaretInOrder(obj, offset)
if (obj, offset) == (prevobj, prevoffset):
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
if skipSpace:
text = self.queryNonEmptyText(prevobj)
while text and text.getText(prevoffset, prevoffset + 1).isspace():
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
text = self.queryNonEmptyText(prevobj)
return prevobj, prevoffset
def lastContext(self, root):
offset = 0
text = self.queryNonEmptyText(root)
if text:
offset = text.characterCount - 1
def _isInRoot(o):
return o == root or pyatspi.utils.findAncestor(o, lambda x: x == root)
obj = root
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not (lastobj and _isInRoot(lastobj)):
break
obj, offset = lastobj, lastoffset
return obj, offset
def contextsAreOnSameLine(self, a, b):
if a == b:
return True
aObj, aOffset = a
bObj, bOffset = b
aExtents = self.getExtents(aObj, aOffset, aOffset + 1)
bExtents = self.getExtents(bObj, bOffset, bOffset + 1)
return self.extentsAreOnSameLine(aExtents, bExtents)
@staticmethod
def extentsAreOnSameLine(a, b, pixelDelta=5):
if a == b:
return True
aX, aY, aWidth, aHeight = a
bX, bY, bWidth, bHeight = b
if aWidth == 0 and aHeight == 0:
return bY <= aY <= bY + bHeight
if bWidth == 0 and bHeight == 0:
return aY <= bY <= aY + aHeight
highestBottom = min(aY + aHeight, bY + bHeight)
lowestTop = max(aY, bY)
if lowestTop >= highestBottom:
return False
aMiddle = aY + aHeight / 2
bMiddle = bY + bHeight / 2
if abs(aMiddle - bMiddle) > pixelDelta:
return False
return True
@staticmethod
def getExtents(obj, startOffset, endOffset):
if not obj:
return [0, 0, 0, 0]
try:
text = obj.queryText()
if text.characterCount:
return list(text.getRangeExtents(startOffset, endOffset, 0))
except NotImplementedError:
pass
except:
msg = "WEB: Exception getting range extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
role = obj.getRole()
parentRole = obj.parent.getRole()
if role in [pyatspi.ROLE_MENU, pyatspi.ROLE_LIST_ITEM] \
and parentRole in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_LIST_BOX]:
try:
ext = obj.parent.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
else:
try:
ext = obj.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
return [ext.x, ext.y, ext.width, ext.height]
def expandEOCs(self, obj, startOffset=0, endOffset=-1):
if not self.inDocumentContent(obj):
return ""
text = self.queryNonEmptyText(obj)
if not text:
return ""
string = text.getText(startOffset, endOffset)
if self.EMBEDDED_OBJECT_CHARACTER in string:
# If we're not getting the full text of this object, but
# rather a substring, we need to figure out the offset of
# the first child within this substring.
childOffset = 0
for child in obj:
if self.characterOffsetInParent(child) >= startOffset:
break
childOffset += 1
toBuild = list(string)
count = toBuild.count(self.EMBEDDED_OBJECT_CHARACTER)
for i in range(count):
index = toBuild.index(self.EMBEDDED_OBJECT_CHARACTER)
try:
child = obj[i + childOffset]
except:
continue
childText = self.expandEOCs(child)
if not childText:
childText = ""
toBuild[index] = "%s " % childText
string = "".join(toBuild).strip()
return string
def substring(self, obj, startOffset, endOffset):
if not self.inDocumentContent(obj):
return super().substring(obj, startOffset, endOffset)
text = self.queryNonEmptyText(obj)
if text:
return text.getText(startOffset, endOffset)
return ""
def textAttributes(self, acc, offset, get_defaults=False):
attrsForObj = self._currentAttrs.get(hash(acc)) or {}
if offset in attrsForObj:
return attrsForObj.get(offset)
attrs = super().textAttributes(acc, offset, get_defaults)
self._currentAttrs[hash(acc)] = {offset:attrs}
return attrs
def findObjectInContents(self, obj, offset, contents):
if not obj or not contents:
return -1
offset = max(0, offset)
matches = [x for x in contents if x[0] == obj]
match = [x for x in matches if x[1] <= offset < x[2]]
if match and match[0] and match[0] in contents:
return contents.index(match[0])
return -1
def isNonEntryTextWidget(self, obj):
rv = self._isNonEntryTextWidget.get(hash(obj))
if rv is not None:
return rv
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
rv = True
elif role in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_TABLE_CELL]:
rv = not self.isTextBlockElement(obj)
self._isNonEntryTextWidget[hash(obj)] = rv
return rv
def queryNonEmptyText(self, obj, excludeNonEntryTextWidgets=True):
if hash(obj) in self._text:
return self._text.get(hash(obj))
try:
rv = obj.queryText()
characterCount = rv.characterCount
except:
rv = None
else:
if not characterCount:
rv = None
if not self.isLiveRegion(obj):
doNotQuery = [pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TOOL_BAR]
if rv and obj.getRole() in doNotQuery:
rv = None
if rv and excludeNonEntryTextWidgets and self.isNonEntryTextWidget(obj):
rv = None
if rv and (self.isHidden(obj) or self.isOffScreenLabel(obj)):
rv = None
self._text[hash(obj)] = rv
return rv
def _treatTextObjectAsWhole(self, obj):
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
return True
if role == pyatspi.ROLE_TABLE_CELL and self.isFocusModeWidget(obj):
return True
return False
def __findRange(self, text, offset, start, end, boundary):
# We should not have to do any of this. Seriously. This is why
# We can't have nice things.
allText = text.getText(0, -1)
extents = list(text.getRangeExtents(offset, offset + 1, 0))
def _inThisSpan(span):
return span[0] <= offset <= span[1]
def _onThisLine(span):
rangeExtents = list(text.getRangeExtents(span[0], span[0] + 1, 0))
return self.extentsAreOnSameLine(extents, rangeExtents)
spans = []
charCount = text.characterCount
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START:
spans = [m.span() for m in re.finditer("\S*[^\.\?\!]+((?<!\w)[\.\?\!]+(?!\w)|\S*)", allText)]
elif boundary is not None:
spans = [m.span() for m in re.finditer("[^\n\r]+", allText)]
if not spans:
spans = [(0, charCount)]
rangeStart, rangeEnd = 0, charCount
for span in spans:
if _inThisSpan(span):
rangeStart, rangeEnd = span[0], span[1] + 1
break
string = allText[rangeStart:rangeEnd]
if string and boundary in [pyatspi.TEXT_BOUNDARY_SENTENCE_START, None]:
return string, rangeStart, rangeEnd
words = [m.span() for m in re.finditer("[^\s\ufffc]+", string)]
words = list(map(lambda x: (x[0] + rangeStart, x[1] + rangeStart), words))
if boundary == pyatspi.TEXT_BOUNDARY_WORD_START:
spans = list(filter(_inThisSpan, words))
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START:
spans = list(filter(_onThisLine, words))
if spans:
rangeStart, rangeEnd = spans[0][0], spans[-1][1] + 1
string = allText[rangeStart:rangeEnd]
return string, rangeStart, rangeEnd
def _attemptBrokenTextRecovery(self):
return False
def _getTextAtOffset(self, obj, offset, boundary):
if not obj:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 0. (obj is None)" % (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 0
text = self.queryNonEmptyText(obj)
if not text:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 1. (queryNonEmptyText() returned None)" \
% (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 1
if boundary == pyatspi.TEXT_BOUNDARY_CHAR:
string, start, end = text.getText(offset, offset + 1), offset, offset + 1
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if not boundary:
string, start, end = text.getText(offset, -1), offset, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
allText = text.getText(0, -1)
if obj.getRole() in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_HEADING] \
or not (re.search("\w", allText) and self.isTextBlockElement(obj)):
string, start, end = allText, 0, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
offset = max(0, offset)
string, start, end = text.getTextAtOffset(offset, boundary)
# The above should be all that we need to do, but....
if not self._attemptBrokenTextRecovery():
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" Not checking for broken text." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
needSadHack = False
testString, testStart, testEnd = text.getTextAtOffset(start, boundary)
if (string, start, end) != (testString, testStart, testEnd):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = testString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset for %s using %s.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" The bug is the above results should be the same.\n" \
" This very likely needs to be fixed by the toolkit." \
% (obj, boundary, offset, s1, start, end, start, s2, testStart, testEnd)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not string and 0 <= offset < text.characterCount:
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = text.getText(0, -1).replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is no text reported for a valid offset.\n" \
" Character count: %i, Full text: '%s'.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end, text.characterCount, s2)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not (start <= offset < end):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is the range returned is outside of the offset.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
if needSadHack:
sadString, sadStart, sadEnd = self.__findRange(text, offset, start, end, boundary)
s = sadString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "HACK: Attempting to recover from above failure.\n" \
" String: '%s', Start: %i, End: %i." % (s, sadStart, sadEnd)
debug.println(debug.LEVEL_INFO, msg)
return sadString, sadStart, sadEnd
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
def _getContentsForObj(self, obj, offset, boundary):
if not obj:
return []
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START and self.isMath(obj):
if self.isMathTopLevel(obj):
math = obj
else:
math = self.getMathAncestor(obj)
return [[math, 0, 1, '']]
string, start, end = self._getTextAtOffset(obj, offset, boundary)
if not string:
return [[obj, start, end, string]]
stringOffset = offset - start
try:
char = string[stringOffset]
except:
pass
else:
if char == self.EMBEDDED_OBJECT_CHARACTER:
childIndex = self.getChildIndex(obj, offset)
try:
child = obj[childIndex]
except:
pass
else:
return self._getContentsForObj(child, 0, boundary)
ranges = [m.span() for m in re.finditer("[^\ufffc]+", string)]
strings = list(filter(lambda x: x[0] <= stringOffset <= x[1], ranges))
if len(strings) == 1:
rangeStart, rangeEnd = strings[0]
start += rangeStart
string = string[rangeStart:rangeEnd]
end = start + len(string)
return [[obj, start, end, string]]
def getSentenceContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentSentenceContents) != -1:
return self._currentSentenceContents
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
objects = self._getContentsForObj(obj, offset, boundary)
state = obj.getState()
if state.contains(pyatspi.STATE_EDITABLE) \
and state.contains(pyatspi.STATE_FOCUSED):
return objects
def _treatAsSentenceEnd(x):
xObj, xStart, xEnd, xString = x
if not self.isTextBlockElement(xObj):
return False
text = self.queryNonEmptyText(xObj)
if text and 0 < text.characterCount <= xEnd:
return True
if 0 <= xStart <= 5:
xString = " ".join(xString.split()[1:])
match = re.search("\S[\.\!\?]+(\s|\Z)", xString)
return match is not None
# Check for things in the same sentence before this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
while firstObj and firstString:
if firstStart == 0 and self.isTextBlockElement(firstObj):
break
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(lambda x: x not in objects, onLeft))
endsOnLeft = list(filter(_treatAsSentenceEnd, onLeft))
if endsOnLeft:
i = onLeft.index(endsOnLeft[-1])
onLeft = onLeft[i+1:]
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
# Check for things in the same sentence after this object.
while not _treatAsSentenceEnd(objects[-1]):
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(lambda x: x not in objects, onRight))
if not onRight:
break
objects.extend(onRight)
if useCache:
self._currentSentenceContents = objects
return objects
def getCharacterAtOffset(self, obj, offset):
text = self.queryNonEmptyText(obj)
if text:
return text.getText(offset, offset + 1)
return ""
def getCharacterContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentCharacterContents) != -1:
return self._currentCharacterContents
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self._getContentsForObj(obj, offset, boundary)
if useCache:
self._currentCharacterContents = objects
return objects
def getWordContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentWordContents) != -1:
return self._currentWordContents
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self._getContentsForObj(obj, offset, boundary)
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd or not xString:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
return self.extentsAreOnSameLine(extents, xExtents)
# Check for things in the same word to the left of this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
while prevObj and firstString:
text = self.queryNonEmptyText(prevObj)
if not text or text.getText(pOffset, pOffset + 1).isspace():
break
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things in the same word to the right of this object.
lastObj, lastStart, lastEnd, lastString = objects[-1]
while lastObj and lastString and not lastString[-1].isspace():
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastStart, lastEnd, lastString = objects[-1]
# We want to treat the list item marker as its own word.
firstObj, firstStart, firstEnd, firstString = objects[0]
if firstStart == 0 and firstObj.getRole() == pyatspi.ROLE_LIST_ITEM:
objects = [objects[0]]
if useCache:
self._currentWordContents = objects
return objects
def getObjectContentsAtOffset(self, obj, offset=0, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentObjectContents) != -1:
return self._currentObjectContents
objIsLandmark = self.isLandmark(obj)
def _isInObject(x):
if not x:
return False
if x == obj:
return True
return _isInObject(x.parent)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
if objIsLandmark and self.isLandmark(xObj) and obj != xObj:
return False
return _isInObject(xObj)
objects = self._getContentsForObj(obj, offset, None)
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
while nextObj:
onRight = self._getContentsForObj(nextObj, nOffset, None)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentObjectContents = objects
return objects
def _contentIsSubsetOf(self, contentA, contentB):
objA, startA, endA, stringA = contentA
objB, startB, endB, stringB = contentB
if objA == objB:
setA = set(range(startA, endA))
setB = set(range(startB, endB))
return setA.issubset(setB)
return False
def getLineContentsAtOffset(self, obj, offset, layoutMode=None, useCache=True):
if not obj:
return []
text = self.queryNonEmptyText(obj)
if text and offset == text.characterCount:
offset -= 1
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentLineContents) != -1:
return self._currentLineContents
if layoutMode == None:
layoutMode = _settingsManager.getSetting('layoutMode')
objects = []
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
if self.isMathTopLevel(xObj):
onSameLine = self.extentsAreOnSameLine(extents, xExtents, extents[3])
else:
onSameLine = self.extentsAreOnSameLine(extents, xExtents)
return onSameLine
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self._getContentsForObj(obj, offset, boundary)
if not layoutMode:
if useCache:
self._currentLineContents = objects
return objects
firstObj, firstStart, firstEnd, firstString = objects[0]
if (extents[2] == 0 and extents[3] == 0) or self.isMath(firstObj):
extents = self.getExtents(firstObj, firstStart, firstEnd)
lastObj, lastStart, lastEnd, lastString = objects[-1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
# Check for things on the same line to the left of this object.
while prevObj:
text = self.queryNonEmptyText(prevObj)
if text and text.getText(pOffset, pOffset + 1) in [" ", "\xa0"]:
prevObj, pOffset = self.findPreviousCaretInOrder(prevObj, pOffset)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
if self._contentIsSubsetOf(objects[0], onLeft[-1]):
objects.pop(0)
objects[0:0] = onLeft
firstObj, firstStart = objects[0][0], objects[0][1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things on the same line to the right of this object.
while nextObj:
text = self.queryNonEmptyText(nextObj)
if text and text.getText(nOffset, nOffset + 1) in [" ", "\xa0"]:
nextObj, nOffset = self.findNextCaretInOrder(nextObj, nOffset)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentLineContents = objects
return objects
def getPreviousLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie" % obj
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
firstObj, firstOffset = line[0][0], line[0][1]
msg = "WEB: First context on line is: %s, %i" % (firstObj, firstOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.previousContext(firstObj, firstOffset, True)
if not obj and firstObj:
msg = "WEB: Previous context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.previousContext(firstObj, firstOffset, True)
msg = "WEB: Previous context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def getNextLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie" % obj
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
math = self.getMathAncestor(obj)
if math:
lastObj, lastOffset = self.lastContext(math)
else:
lastObj, lastOffset = line[-1][0], line[-1][2] - 1
msg = "WEB: Last context on line is: %s, %i" % (lastObj, lastOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.nextContext(lastObj, lastOffset, True)
if not obj and lastObj:
msg = "WEB: Next context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.nextContext(lastObj, lastOffset, True)
msg = "WEB: Next context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def isFocusModeWidget(self, obj):
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
if state.contains(pyatspi.STATE_EDITABLE) \
or state.contains(pyatspi.STATE_EXPANDABLE):
return True
focusModeRoles = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PROGRESS_BAR,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TREE_TABLE,
pyatspi.ROLE_TREE]
if role in focusModeRoles \
and not self.isTextBlockElement(obj):
return True
if self.isGridDescendant(obj):
return True
return False
def isTextBlockElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isTextBlockElement.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
textBlockElements = [pyatspi.ROLE_CAPTION,
pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_TABLE_CELL]
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
textBlockElements.append(pyatspi.ROLE_STATIC)
except:
pass
if not role in textBlockElements:
rv = False
elif state.contains(pyatspi.STATE_EDITABLE):
rv = False
elif role in [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]:
rv = True
elif not state.contains(pyatspi.STATE_FOCUSABLE) and not state.contains(pyatspi.STATE_FOCUSED):
rv = True
else:
rv = False
self._isTextBlockElement[hash(obj)] = rv
return rv
def treatAsDiv(self, obj):
rv = self._treatAsDiv.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
childCount = obj.childCount
except:
msg = "WEB: Exception getting role and childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
rv = False
if role == pyatspi.ROLE_LIST:
rv = not (childCount and obj[0].getRole() == pyatspi.ROLE_LIST_ITEM)
self._treatAsDiv[hash(obj)] = rv
return rv
def speakMathSymbolNames(self, obj=None):
obj = obj or orca_state.locusOfFocus
return self.isMath(obj)
def isInMath(self):
return self.isMath(orca_state.locusOfFocus)
def isMath(self, obj):
rv = self._isMath.get(hash(obj))
if rv is not None:
return rv
tag = self._getTag(obj)
rv = tag in ['math',
'maction',
'maligngroup',
'malignmark',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msup',
'msubsup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover']
self._isMath[hash(obj)] = rv
return rv
def isNoneElement(self, obj):
return self._getTag(obj) == 'none'
def isMathLayoutOnly(self, obj):
return self._getTag(obj) in ['mrow', 'mstyle', 'merror', 'mpadded']
def isMathMultiline(self, obj):
return self._getTag(obj) in ['mtable', 'mstack', 'mlongdiv']
def isMathEnclose(self, obj):
return self._getTag(obj) == 'menclose'
def isMathFenced(self, obj):
return self._getTag(obj) == 'mfenced'
def isMathFraction(self, obj):
return self._getTag(obj) == 'mfrac'
def isMathFractionWithoutBar(self, obj):
if not self.isMathFraction(obj):
return False
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
linethickness = attrs.get('linethickness')
if not linethickness:
return False
for char in linethickness:
if char.isnumeric() and char != '0':
return False
return True
def isMathPhantom(self, obj):
return self._getTag(obj) == 'mphantom'
def isMathRoot(self, obj):
return self.isMathSquareRoot(obj) or self.isMathNthRoot(obj)
def isMathNthRoot(self, obj):
return self._getTag(obj) == 'mroot'
def isMathMultiScript(self, obj):
return self._getTag(obj) == 'mmultiscripts'
def _isMathPrePostScriptSeparator(self, obj):
return self._getTag(obj) == 'mprescripts'
def isMathSubOrSuperScript(self, obj):
return self._getTag(obj) in ['msub', 'msup', 'msubsup']
def isMathTable(self, obj):
return self._getTag(obj) == 'mtable'
def isMathTableRow(self, obj):
return self._getTag(obj) in ['mtr', 'mlabeledtr']
def isMathTableCell(self, obj):
return self._getTag(obj) == 'mtd'
def isMathUnderOrOverScript(self, obj):
return self._getTag(obj) in ['mover', 'munder', 'munderover']
def _isMathSubElement(self, obj):
return self._getTag(obj) == 'msub'
def _isMathSupElement(self, obj):
return self._getTag(obj) == 'msup'
def _isMathSubsupElement(self, obj):
return self._getTag(obj) == 'msubsup'
def _isMathUnderElement(self, obj):
return self._getTag(obj) == 'munder'
def _isMathOverElement(self, obj):
return self._getTag(obj) == 'mover'
def _isMathUnderOverElement(self, obj):
return self._getTag(obj) == 'munderover'
def isMathSquareRoot(self, obj):
return self._getTag(obj) == 'msqrt'
def isMathToken(self, obj):
return self._getTag(obj) in ['mi', 'mn', 'mo', 'mtext', 'ms', 'mspace']
def isMathTopLevel(self, obj):
return obj.getRole() == pyatspi.ROLE_MATH
def getMathAncestor(self, obj):
if not self.isMath(obj):
return None
if self.isMathTopLevel(obj):
return obj
return pyatspi.findAncestor(obj, self.isMathTopLevel)
def getMathDenominator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[1]
def getMathNumerator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[0]
def getMathRootBase(self, obj):
if self.isMathNthRoot(obj):
return obj[0]
if self.isMathSquareRoot(obj):
return obj
return None
def getMathRootIndex(self, obj):
if not self.isMathNthRoot(obj):
return None
try:
return obj[1]
except:
pass
return None
def getMathScriptBase(self, obj):
if self.isMathSubOrSuperScript(obj) \
or self.isMathUnderOrOverScript(obj) \
or self.isMathMultiScript(obj):
return obj[0]
return None
def getMathScriptSubscript(self, obj):
if self._isMathSubElement(obj) or self._isMathSubsupElement(obj):
return obj[1]
return None
def getMathScriptSuperscript(self, obj):
if self._isMathSupElement(obj):
return obj[1]
if self._isMathSubsupElement(obj):
return obj[2]
return None
def getMathScriptUnderscript(self, obj):
if self._isMathUnderElement(obj) or self._isMathUnderOverElement(obj):
return obj[1]
return None
def getMathScriptOverscript(self, obj):
if self._isMathOverElement(obj):
return obj[1]
if self._isMathUnderOverElement(obj):
return obj[2]
return None
def _getMathPrePostScriptSeparator(self, obj):
for child in obj:
if self._isMathPrePostScriptSeparator(child):
return child
return None
def getMathPrescripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if not separator:
return []
index = separator.getIndexInParent()
return [obj[i] for i in range(index+1, obj.childCount)]
def getMathPostscripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if separator:
index = separator.getIndexInParent()
else:
index = obj.childCount
return [obj[i] for i in range(1, index)]
def getMathEnclosures(self, obj):
if not self.isMathEnclose(obj):
return []
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return []
return attrs.get('notation', 'longdiv').split()
def getMathFencedSeparators(self, obj):
if not self.isMathFenced(obj):
return ['']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['']
return list(attrs.get('separators', ','))
def getMathFences(self, obj):
if not self.isMathFenced(obj):
return ['', '']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['', '']
return [attrs.get('open', '('), attrs.get('close', ')')]
def getMathNestingLevel(self, obj, test=None):
rv = self._mathNestingLevel.get(hash(obj))
if rv is not None:
return rv
if not test:
test = lambda x: self._getTag(x) == self._getTag(obj)
rv = -1
ancestor = obj
while ancestor:
ancestor = pyatspi.findAncestor(ancestor, test)
rv += 1
self._mathNestingLevel[hash(obj)] = rv
return rv
def filterContentsForPresentation(self, contents, inferLabels=False):
def _include(x):
obj, start, end, string = x
if not obj:
return False
if (self.isTextBlockElement(obj) and not string.strip()) \
or self.isAnchor(obj) \
or self.hasNoSize(obj) \
or self.isOffScreenLabel(obj) \
or self.isUselessImage(obj) \
or self.isLabellingContents(x, contents):
return False
widget = self.isInferredLabelForContents(x, contents)
alwaysFilter = [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]
if widget and (inferLabels or widget.getRole() in alwaysFilter):
return False
return True
return list(filter(_include, contents))
def needsSeparator(self, lastChar, nextChar):
if lastChar.isspace() or nextChar.isspace():
return False
openingPunctuation = ["(", "[", "{", "<"]
closingPunctuation = [".", "?", "!", ":", ",", ";", ")", "]", "}", ">"]
if lastChar in closingPunctuation or nextChar in openingPunctuation:
return True
if lastChar in openingPunctuation or nextChar in closingPunctuation:
return False
return lastChar.isalnum()
def supportsSelectionAndTable(self, obj):
interfaces = pyatspi.listInterfaces(obj)
return 'Table' in interfaces and 'Selection' in interfaces
def isGridDescendant(self, obj):
if not obj:
return False
rv = self._isGridDescendant.get(hash(obj))
if rv is not None:
return rv
rv = pyatspi.findAncestor(obj, self.supportsSelectionAndTable) is not None
self._isGridDescendant[hash(obj)] = rv
return rv
def isLayoutOnly(self, obj):
if not obj:
return False
rv = self._isLayoutOnly.get(hash(obj))
if rv is not None:
return rv
if self.isMath(obj):
rv = False
else:
rv = super().isLayoutOnly(obj)
self._isLayoutOnly[hash(obj)] = rv
return rv
def isOffScreenLabel(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isOffScreenLabel.get(hash(obj))
if rv is not None:
return rv
rv = False
isLabelFor = lambda x: x.getRelationType() == pyatspi.RELATION_LABEL_FOR
try:
relationSet = obj.getRelationSet()
except:
pass
else:
relations = list(filter(isLabelFor, relationSet))
if relations:
try:
text = obj.queryText()
end = text.characterCount
except:
end = 1
x, y, width, height = self.getExtents(obj, 0, end)
if x < 0 or y < 0:
rv = True
self._isOffScreenLabel[hash(obj)] = rv
return rv
def isInferredLabelForContents(self, content, contents):
obj, start, end, string = content
objs = list(filter(self.shouldInferLabelFor, [x[0] for x in contents]))
if not objs:
return None
for o in objs:
label, sources = self.inferLabelFor(o)
if obj in sources and label.strip() == string.strip():
return o
return None
def isLabellingContents(self, content, contents):
obj, start, end, string = content
if obj.getRole() != pyatspi.ROLE_LABEL:
return None
relationSet = obj.getRelationSet()
if not relationSet:
return None
for relation in relationSet:
if relation.getRelationType() == pyatspi.RELATION_LABEL_FOR:
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
for content in contents:
if content[0] == target:
return target
return None
def isAnchor(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isAnchor.get(hash(obj))
if rv is not None:
return rv
rv = False
if obj.getRole() == pyatspi.ROLE_LINK \
and not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not 'Action' in pyatspi.listInterfaces(obj) \
and not self.queryNonEmptyText(obj):
rv = True
self._isAnchor[hash(obj)] = rv
return rv
def isClickableElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isClickableElement.get(hash(obj))
if rv is not None:
return rv
rv = False
if not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not self.isFocusModeWidget(obj):
try:
action = obj.queryAction()
names = [action.getName(i) for i in range(action.nActions)]
except NotImplementedError:
rv = False
else:
rv = "click" in names
self._isClickableElement[hash(obj)] = rv
return rv
def isLandmark(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLandmark.get(hash(obj))
if rv is not None:
return rv
if obj.getRole() == pyatspi.ROLE_LANDMARK:
rv = True
else:
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('xml-roles') in settings.ariaLandmarks
self._isLandmark[hash(obj)] = rv
return rv
def isLiveRegion(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLiveRegion.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = 'container-live' in attrs
self._isLiveRegion[hash(obj)] = rv
return rv
def isLink(self, obj):
if not obj:
return False
rv = self._isLink.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
if role == pyatspi.ROLE_STATIC:
role = pyatspi.ROLE_TEXT
except:
pass
if role == pyatspi.ROLE_LINK and not self.isAnchor(obj):
rv = True
elif role == pyatspi.ROLE_TEXT \
and obj.parent.getRole() == pyatspi.ROLE_LINK \
and obj.name and obj.name == obj.parent.name:
rv = True
self._isLink[hash(obj)] = rv
return rv
def isNonNavigablePopup(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isNonNavigablePopup.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
if role == pyatspi.ROLE_TOOL_TIP:
rv = True
self._isNonNavigablePopup[hash(obj)] = rv
return rv
def isUselessImage(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isUselessImage.get(hash(obj))
if rv is not None:
return rv
rv = True
if obj.getRole() != pyatspi.ROLE_IMAGE:
rv = False
if rv and (obj.name or obj.description or obj.childCount):
rv = False
if rv and (self.isClickableElement(obj) or self.hasLongDesc(obj)):
rv = False
if rv and obj.parent.getRole() == pyatspi.ROLE_LINK:
uri = self.uri(obj.parent)
if uri and not uri.startswith('javascript'):
rv = False
if rv and 'Image' in pyatspi.listInterfaces(obj):
image = obj.queryImage()
if image.imageDescription:
rv = False
else:
width, height = image.getImageSize()
if width > 25 and height > 25:
rv = False
self._isUselessImage[hash(obj)] = rv
return rv
def hasLongDesc(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasLongDesc.get(hash(obj))
if rv is not None:
return rv
try:
action = obj.queryAction()
except NotImplementedError:
rv = False
else:
names = [action.getName(i) for i in range(action.nActions)]
rv = "showlongdesc" in names
self._hasLongDesc[hash(obj)] = rv
return rv
def inferLabelFor(self, obj):
if not self.shouldInferLabelFor(obj):
return None, []
rv = self._inferredLabels.get(hash(obj))
if rv is not None:
return rv
rv = self._script.labelInference.infer(obj, False)
self._inferredLabels[hash(obj)] = rv
return rv
def shouldInferLabelFor(self, obj):
try:
name = obj.name
except:
msg = "WEB: Exception getting name for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
else:
if name:
return False
if self._script.inSayAll():
return False
if not self.inDocumentContent():
return False
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting role for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
# TODO - JD: This is private.
if self._script._lastCommandWasCaretNav \
and role not in [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]:
return False
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_RADIO_BUTTON]
if role not in roles:
return False
if self.displayedLabel(obj):
return False
return True
def isSpinnerEntry(self, obj):
if not self.inDocumentContent(obj):
return False
# TODO - JD: Ideally, things that look and act like spinners (such number inputs)
# would look and act like platform native spinners. That's not true for Gecko. And
# the only thing that's funkier is what we get from WebKitGtk. Try to at least get
# the two engines into alignment before migrating Epiphany support to the web script.
if obj.getState().contains(pyatspi.STATE_EDITABLE) \
and obj.parent.getRole() == pyatspi.ROLE_SPIN_BUTTON:
return True
return False
def eventIsSpinnerNoise(self, event):
if event.type.startswith("object:text-changed") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def treatEventAsSpinnerValueChange(self, event):
if event.type.startswith("object:text-caret-moved") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
obj, offset = self.getCaretContext()
return event.source == obj
return False
def eventIsStatusBarNoise(self, event):
if self.inDocumentContent(event.source):
return False
eType = event.type
if eType.startswith("object:text-") or eType.endswith("accessible-name"):
return event.source.getRole() == pyatspi.ROLE_STATUS_BAR
return False
def eventIsAutocompleteNoise(self, event):
if not self.inDocumentContent(event.source):
return False
isListBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_LIST_BOX
isMenuItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_MENU
isComboBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_COMBO_BOX
if event.source.getState().contains(pyatspi.STATE_EDITABLE) \
and event.type.startswith("object:text-"):
obj, offset = self.getCaretContext()
if isListBoxItem(obj) or isMenuItem(obj):
return True
if obj == event.source and isComboBoxItem(obj):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def textEventIsDueToInsertion(self, event):
if not event.type.startswith("object:text-"):
return False
if not self.inDocumentContent(event.source) \
or not event.source.getState().contains(pyatspi.STATE_EDITABLE) \
or not event.source == orca_state.locusOfFocus:
return False
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
inputEvent = orca_state.lastNonModifierKeyEvent
return inputEvent and inputEvent.isPrintableKey()
return False
def textEventIsForNonNavigableTextObject(self, event):
if not event.type.startswith("object:text-"):
return False
return self._treatTextObjectAsWhole(event.source)
# TODO - JD: As an experiment, we're stopping these at the event manager.
# If that works, this can be removed.
def eventIsEOCAdded(self, event):
if not self.inDocumentContent(event.source):
return False
if event.type.startswith("object:text-changed:insert"):
return self.EMBEDDED_OBJECT_CHARACTER in event.any_data
return False
def caretMovedToSamePageFragment(self, event):
if not event.type.startswith("object:text-caret-moved"):
return False
linkURI = self.uri(orca_state.locusOfFocus)
docURI = self.documentFrameURI()
if linkURI == docURI:
return True
return False
@staticmethod
def getHyperlinkRange(obj):
try:
hyperlink = obj.queryHyperlink()
start, end = hyperlink.startIndex, hyperlink.endIndex
except NotImplementedError:
msg = "WEB: %s does not implement the hyperlink interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
except:
msg = "WEB: Exception getting hyperlink indices for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
return start, end
def characterOffsetInParent(self, obj):
start, end, length = self._rangeInParentWithLength(obj)
return start
def _rangeInParentWithLength(self, obj):
if not obj:
return -1, -1, 0
text = self.queryNonEmptyText(obj.parent)
if not text:
return -1, -1, 0
start, end = self.getHyperlinkRange(obj)
return start, end, text.characterCount
@staticmethod
def getChildIndex(obj, offset):
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
msg = "WEB: %s does not implement the hypertext interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
except:
msg = "WEB: Exception querying hypertext interface for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
return hypertext.getLinkIndex(offset)
def getChildAtOffset(self, obj, offset):
index = self.getChildIndex(obj, offset)
if index == -1:
return None
try:
child = obj[index]
except:
return None
return child
def hasNoSize(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasNoSize.get(hash(obj))
if rv is not None:
return rv
try:
extents = obj.queryComponent().getExtents(0)
except:
rv = True
else:
rv = not (extents.width and extents.height)
self._hasNoSize[hash(obj)] = rv
return rv
def doNotDescendForCaret(self, obj):
if not obj or self.isZombie(obj):
return True
try:
childCount = obj.childCount
except:
msg = "WEB: Exception getting childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return True
if not childCount:
return True
if self.isHidden(obj) or self.isOffScreenLabel(obj):
return True
if self.isTextBlockElement(obj):
return False
doNotDescend = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TOOL_TIP,
pyatspi.ROLE_TREE,
pyatspi.ROLE_TREE_TABLE]
return obj.getRole() in doNotDescend
def _searchForCaretContext(self, obj):
contextObj, contextOffset = None, -1
while obj:
try:
offset = obj.queryText().caretOffset
except:
obj = None
else:
contextObj, contextOffset = obj, offset
childIndex = self.getChildIndex(obj, offset)
if childIndex >= 0 and obj.childCount:
obj = obj[childIndex]
else:
break
if contextObj:
return self.findNextCaretInOrder(contextObj, max(-1, contextOffset - 1))
return None, -1
def _getCaretContextViaLocusOfFocus(self):
obj = orca_state.locusOfFocus
try:
offset = obj.queryText().caretOffset
except NotImplementedError:
offset = 0
except:
offset = -1
return obj, offset
def getCaretContext(self, documentFrame=None):
if not documentFrame or self.isZombie(documentFrame):
documentFrame = self.documentFrame()
if not documentFrame:
return self._getCaretContextViaLocusOfFocus()
context = self._caretContexts.get(hash(documentFrame.parent))
if context:
return context
obj, offset = self._searchForCaretContext(documentFrame)
self.setCaretContext(obj, offset, documentFrame)
return obj, offset
def clearCaretContext(self, documentFrame=None):
self.clearContentCache()
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts.pop(hash(parent), None)
def setCaretContext(self, obj=None, offset=-1, documentFrame=None):
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts[hash(parent)] = obj, offset
def findFirstCaretContext(self, obj, offset):
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting first caret context for %s %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
lookInChild = [pyatspi.ROLE_LIST,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_ROW]
if role in lookInChild and obj.childCount:
msg = "WEB: First caret context for %s, %i will look in child %s" % (obj, offset, obj[0])
debug.println(debug.LEVEL_INFO, msg)
return self.findFirstCaretContext(obj[0], 0)
text = self.queryNonEmptyText(obj)
if not text:
if self.isTextBlockElement(obj) or self.isAnchor(obj):
nextObj, nextOffset = self.nextContext(obj, offset)
if nextObj:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, nextObj, nextOffset)
debug.println(debug.LEVEL_INFO, msg)
return nextObj, nextOffset
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, 0)
debug.println(debug.LEVEL_INFO, msg)
return obj, 0
if offset >= text.characterCount:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, text.characterCount)
debug.println(debug.LEVEL_INFO, msg)
return obj, text.characterCount
allText = text.getText(0, -1)
offset = max (0, offset)
if allText[offset] != self.EMBEDDED_OBJECT_CHARACTER:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return obj, offset
child = self.getChildAtOffset(obj, offset)
if not child:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, None, -1)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
return self.findFirstCaretContext(child, 0)
def findNextCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
for i in range(offset + 1, len(allText)):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findNextCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findNextCaretInOrder(obj[0], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findNextCaretInOrder(parent, start)
index = obj.getIndexInParent() + 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 <= index < parentChildCount:
return self.findNextCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def findPreviousCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
if offset == -1 or offset > len(allText):
offset = len(allText)
for i in range(offset - 1, -1, -1):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findPreviousCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findPreviousCaretInOrder(obj[obj.childCount - 1], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findPreviousCaretInOrder(parent, start)
index = obj.getIndexInParent() - 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 <= index < parentChildCount:
return self.findPreviousCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def handleAsLiveRegion(self, event):
if not _settingsManager.getSetting('inferLiveRegions'):
return False
return self.isLiveRegion(event.source)
def getPageSummary(self, obj):
docframe = self.documentFrame(obj)
col = docframe.queryCollection()
headings = 0
forms = 0
tables = 0
vlinks = 0
uvlinks = 0
percentRead = None
stateset = pyatspi.StateSet()
roles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK, pyatspi.ROLE_TABLE,
pyatspi.ROLE_FORM]
rule = col.createMatchRule(stateset.raw(), col.MATCH_NONE,
"", col.MATCH_NONE,
roles, col.MATCH_ANY,
"", col.MATCH_NONE,
False)
matches = col.getMatches(rule, col.SORT_ORDER_CANONICAL, 0, True)
col.freeMatchRule(rule)
for obj in matches:
role = obj.getRole()
if role == pyatspi.ROLE_HEADING:
headings += 1
elif role == pyatspi.ROLE_FORM:
forms += 1
elif role == pyatspi.ROLE_TABLE and not self.isLayoutOnly(obj):
tables += 1
elif role == pyatspi.ROLE_LINK:
if obj.getState().contains(pyatspi.STATE_VISITED):
vlinks += 1
else:
uvlinks += 1
return [headings, forms, tables, vlinks, uvlinks, percentRead]
| pvagner/orca | src/orca/scripts/web/script_utilities.py | Python | lgpl-2.1 | 85,572 | [
"ORCA"
] | d10d2b798065bf26854fda048822996e17037edb858979fafc4788f8485962cb |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of AlgaeDICE.
Based on the publication "AlgaeDICE: Policy Gradient from Arbitrary Experience"
by Ofir Nachum, Bo Dai, Ilya Kostrikov, Yinlam Chow, Lihong Li, Dale Schuurmans.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
import algae_dice.keras_utils as keras_utils
ds = tfp.distributions
LOG_STD_MIN = -5
LOG_STD_MAX = 2
def soft_update(net, target_net, tau=0.005):
for var, target_var in zip(net.variables, target_net.variables):
new_value = var * tau + target_var * (1 - tau)
target_var.assign(new_value)
class Actor(tf.keras.Model):
"""Gaussian policy with TanH squashing."""
def __init__(self, state_dim, action_dim):
"""Creates an actor.
Args:
state_dim: State size.
action_dim: Action size.
"""
super(Actor, self).__init__()
self.trunk = tf.keras.Sequential([
tf.keras.layers.Dense(
256,
input_shape=(state_dim,),
activation=tf.nn.relu,
kernel_initializer='orthogonal'),
tf.keras.layers.Dense(
256, activation=tf.nn.relu, kernel_initializer='orthogonal'),
tf.keras.layers.Dense(2 * action_dim, kernel_initializer='orthogonal')
])
def get_dist_and_mode(self, states):
"""Returns a tf.Distribution for given states modes of this distribution.
Args:
states: A batch of states.
"""
out = self.trunk(states)
mu, log_std = tf.split(out, num_or_size_splits=2, axis=1)
mode = tf.nn.tanh(mu)
log_std = tf.nn.tanh(log_std)
log_std = LOG_STD_MIN + 0.5 * (LOG_STD_MAX - LOG_STD_MIN) * (log_std + 1)
std = tf.exp(log_std)
dist = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=tfp.bijectors.Chain([
tfp.bijectors.Tanh(),
tfp.bijectors.Affine(shift=mu, scale_diag=std),
]),
event_shape=[mu.shape[-1]],
batch_shape=[mu.shape[0]])
return dist, mode
@tf.function
def get_log_prob(self, states, actions):
"""Evaluate log probs for actions conditined on states.
Args:
states: A batch of states.
actions: A batch of actions to evaluate log probs on.
Returns:
Log probabilities of actions.
"""
dist, _ = self.get_dist_and_mode(states)
log_probs = dist.log_prob(actions)
log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting
return log_probs
@tf.function
def call(self, states):
"""Computes actions for given inputs.
Args:
states: A batch of states.
Returns:
A mode action, a sampled action and log probability of the sampled action.
"""
dist, mode = self.get_dist_and_mode(states)
samples = dist.sample()
log_probs = dist.log_prob(samples)
log_probs = tf.expand_dims(log_probs, -1) # To avoid broadcasting
return mode, samples, log_probs
class Critic(tf.keras.Model):
"""A critic network that estimates a dual Q-function."""
def __init__(self, state_dim, action_dim):
"""Creates networks.
Args:
state_dim: State size.
action_dim: Action size.
"""
super(Critic, self).__init__()
self.critic = tf.keras.Sequential([
tf.keras.layers.Dense(
256,
input_shape=(state_dim + action_dim,),
activation=tf.nn.relu,
kernel_initializer='orthogonal'),
tf.keras.layers.Dense(
256, activation=tf.nn.relu, kernel_initializer='orthogonal'),
tf.keras.layers.Dense(1, kernel_initializer='orthogonal')
])
@tf.function
def call(self, states, actions):
"""Returns Q-value estimates for given states and actions.
Args:
states: A batch of states.
actions: A batch of actions.
Returns:
Two estimates of Q-values.
"""
x = tf.concat([states, actions], -1)
q = self.critic(x)
return q
class DoubleCritic(tf.keras.Model):
"""A critic network that estimates a dual Q-function."""
def __init__(self, state_dim, action_dim):
"""Creates networks.
Args:
state_dim: State size.
action_dim: Action size.
"""
super(DoubleCritic, self).__init__()
self.critic1 = tf.keras.Sequential([
tf.keras.layers.Dense(
256,
input_shape=(state_dim + action_dim,),
activation=tf.nn.relu,
kernel_initializer='orthogonal'),
tf.keras.layers.Dense(
256, activation=tf.nn.relu, kernel_initializer='orthogonal'),
tf.keras.layers.Dense(1, kernel_initializer='orthogonal')
])
self.critic2 = tf.keras.Sequential([
tf.keras.layers.Dense(
256,
input_shape=(state_dim + action_dim,),
activation=tf.nn.relu,
kernel_initializer='orthogonal'),
tf.keras.layers.Dense(
256, activation=tf.nn.relu, kernel_initializer='orthogonal'),
tf.keras.layers.Dense(1, kernel_initializer='orthogonal')
])
@tf.function
def call(self, states, actions):
"""Returns Q-value estimates for given states and actions.
Args:
states: A batch of states.
actions: A batch of actions.
Returns:
Two estimates of Q-values.
"""
x = tf.concat([states, actions], -1)
q1 = self.critic1(x)
q2 = self.critic2(x)
return q1, q2
class ALGAE(object):
"""Class performing algae training."""
def __init__(self,
state_dim,
action_dim,
log_interval,
actor_lr=1e-3,
critic_lr=1e-3,
alpha_init=1.0,
learn_alpha=True,
algae_alpha=1.0,
use_dqn=True,
use_init_states=True,
exponent=2.0):
"""Creates networks.
Args:
state_dim: State size.
action_dim: Action size.
log_interval: Log losses every N steps.
actor_lr: Actor learning rate.
critic_lr: Critic learning rate.
alpha_init: Initial temperature value for causal entropy regularization.
learn_alpha: Whether to learn alpha or not.
algae_alpha: Algae regularization weight.
use_dqn: Whether to use double networks for target value.
use_init_states: Whether to use initial states in objective.
exponent: Exponent p of function f(x) = |x|^p / p.
"""
self.actor = Actor(state_dim, action_dim)
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=actor_lr)
self.avg_actor_loss = tf.keras.metrics.Mean('actor_loss', dtype=tf.float32)
self.avg_alpha_loss = tf.keras.metrics.Mean('alpha_loss', dtype=tf.float32)
self.avg_actor_entropy = tf.keras.metrics.Mean(
'actor_entropy', dtype=tf.float32)
self.avg_alpha = tf.keras.metrics.Mean('alpha', dtype=tf.float32)
self.avg_lambda = tf.keras.metrics.Mean('lambda', dtype=tf.float32)
self.use_init_states = use_init_states
if use_dqn:
self.critic = DoubleCritic(state_dim, action_dim)
self.critic_target = DoubleCritic(state_dim, action_dim)
else:
self.critic = Critic(state_dim, action_dim)
self.critic_target = Critic(state_dim, action_dim)
soft_update(self.critic, self.critic_target, tau=1.0)
self._lambda = tf.Variable(0.0, trainable=True)
self.critic_optimizer = tf.keras.optimizers.Adam(learning_rate=critic_lr)
self.avg_critic_loss = tf.keras.metrics.Mean(
'critic_loss', dtype=tf.float32)
self.log_alpha = tf.Variable(tf.math.log(alpha_init), trainable=True)
self.learn_alpha = learn_alpha
self.alpha_optimizer = tf.keras.optimizers.Adam()
self.log_interval = log_interval
self.algae_alpha = algae_alpha
self.use_dqn = use_dqn
self.exponent = exponent
if self.exponent <= 1:
raise ValueError('Exponent must be greather than 1, but received %f.' %
self.exponent)
self.f = lambda resid: tf.pow(tf.abs(resid), self.exponent) / self.exponent
clip_resid = lambda resid: tf.clip_by_value(resid, 0.0, 1e6)
self.fgrad = lambda resid: tf.pow(clip_resid(resid), self.exponent - 1)
@property
def alpha(self):
return tf.exp(self.log_alpha)
def critic_mix(self, s, a):
if self.use_dqn:
target_q1, target_q2 = self.critic_target(s, a)
target_q = tf.minimum(target_q1, target_q2)
q1, q2 = self.critic(s, a)
return q1 * 0.05 + target_q * 0.95, q2 * 0.05 + target_q * 0.95,
else:
return self.critic(s, a) * 0.05 + self.critic_target(s, a) * 0.95
def fit_critic(self, states, actions, next_states, rewards, masks, discount,
init_states):
"""Updates critic parameters.
Args:
states: A batch of states.
actions: A batch of actions.
next_states: A batch of next states.
rewards: A batch of rewards.
masks: A batch of masks indicating the end of the episodes.
discount: An MDP discount factor.
init_states: A batch of init states from the MDP.
Returns:
Critic loss.
"""
_, init_actions, _ = self.actor(init_states)
_, next_actions, next_log_probs = self.actor(next_states)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.critic.variables + [self._lambda])
if self.use_dqn:
target_q1, target_q2 = self.critic_mix(next_states, next_actions)
target_q1 = target_q1 - self.alpha * next_log_probs
target_q2 = target_q2 - self.alpha * next_log_probs
target_q1 = rewards + discount * masks * target_q1
target_q2 = rewards + discount * masks * target_q2
q1, q2 = self.critic(states, actions)
init_q1, init_q2 = self.critic(init_states, init_actions)
if discount == 1:
critic_loss1 = tf.reduce_mean(
self.f(self._lambda + self.algae_alpha + target_q1 - q1) -
self.algae_alpha * self._lambda)
critic_loss2 = tf.reduce_mean(
self.f(self._lambda + self.algae_alpha + target_q2 - q2) -
self.algae_alpha * self._lambda)
else:
critic_loss1 = tf.reduce_mean(
self.f(target_q1 - q1) +
(1 - discount) * init_q1 * self.algae_alpha)
critic_loss2 = tf.reduce_mean(
self.f(target_q2 - q2) +
(1 - discount) * init_q2 * self.algae_alpha)
critic_loss = (critic_loss1 + critic_loss2)
else:
target_q = self.critic_mix(next_states, next_actions)
target_q = target_q - self.alpha * next_log_probs
target_q = rewards + discount * masks * target_q
q = self.critic(states, actions)
init_q = self.critic(init_states, init_actions)
if discount == 1:
critic_loss = tf.reduce_mean(
self.f(self._lambda + self.algae_alpha + target_q - q) -
self.algae_alpha * self._lambda)
else:
critic_loss = tf.reduce_mean(
self.f(target_q - q) + (1 - discount) * init_q * self.algae_alpha)
critic_grads = tape.gradient(critic_loss,
self.critic.variables + [self._lambda])
self.critic_optimizer.apply_gradients(
zip(critic_grads, self.critic.variables + [self._lambda]))
return critic_loss
def fit_actor(self, states, actions, next_states, rewards, masks, discount,
target_entropy, init_states):
"""Updates critic parameters.
Args:
states: A batch of states.
actions: A batch of actions.
next_states: A batch of next states.
rewards: A batch of rewards.
masks: A batch of masks indicating the end of the episodes.
discount: An MDP discount factor.
target_entropy: Target entropy value for alpha.
init_states: A batch of init states from the MDP.
Returns:
Actor and alpha losses.
"""
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(self.actor.variables)
_, init_actions, _ = self.actor(init_states)
_, next_actions, next_log_probs = self.actor(next_states)
if self.use_dqn:
target_q1, target_q2 = self.critic_mix(next_states, next_actions)
target_q1 = target_q1 - self.alpha * next_log_probs
target_q2 = target_q2 - self.alpha * next_log_probs
target_q1 = rewards + discount * masks * target_q1
target_q2 = rewards + discount * masks * target_q2
q1, q2 = self.critic(states, actions)
init_q1, init_q2 = self.critic(init_states, init_actions)
if discount == 1:
actor_loss1 = -tf.reduce_mean(
tf.stop_gradient(
self.fgrad(self._lambda + self.algae_alpha + target_q1 - q1))
* (target_q1 - q1))
actor_loss2 = -tf.reduce_mean(
tf.stop_gradient(
self.fgrad(self._lambda + self.algae_alpha + target_q2 - q2))
* (target_q2 - q2))
else:
actor_loss1 = -tf.reduce_mean(
tf.stop_gradient(self.fgrad(target_q1 - q1)) * (target_q1 - q1) +
(1 - discount) * init_q1 * self.algae_alpha)
actor_loss2 = -tf.reduce_mean(
tf.stop_gradient(self.fgrad(target_q2 - q2)) * (target_q2 - q2) +
(1 - discount) * init_q2 * self.algae_alpha)
actor_loss = (actor_loss1 + actor_loss2) / 2.0
else:
target_q = self.critic_mix(next_states, next_actions)
target_q = target_q - self.alpha * next_log_probs
target_q = rewards + discount * masks * target_q
q = self.critic(states, actions)
init_q = self.critic(init_states, init_actions)
if discount == 1:
actor_loss = -tf.reduce_mean(
tf.stop_gradient(
self.fgrad(self._lambda + self.algae_alpha + target_q - q)) *
(target_q - q))
else:
actor_loss = -tf.reduce_mean(
tf.stop_gradient(self.fgrad(target_q - q)) * (target_q - q) +
(1 - discount) * init_q * self.algae_alpha)
actor_loss += keras_utils.orthogonal_regularization(self.actor.trunk)
actor_grads = tape.gradient(actor_loss, self.actor.variables)
self.actor_optimizer.apply_gradients(zip(actor_grads, self.actor.variables))
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch([self.log_alpha])
alpha_loss = tf.reduce_mean(self.alpha *
(-next_log_probs - target_entropy))
if self.learn_alpha:
alpha_grads = tape.gradient(alpha_loss, [self.log_alpha])
self.alpha_optimizer.apply_gradients(zip(alpha_grads, [self.log_alpha]))
return actor_loss, alpha_loss, -next_log_probs
@tf.function
def train(self,
replay_buffer_iter,
init_replay_buffer,
discount=0.99,
tau=0.005,
target_entropy=0,
actor_update_freq=2):
"""Performs a single training step for critic and actor.
Args:
replay_buffer_iter: An tensorflow graph iteratable object for sampling
transitions.
init_replay_buffer: An tensorflow graph iteratable object for sampling
init states.
discount: A discount used to compute returns.
tau: A soft updates discount.
target_entropy: A target entropy for alpha.
actor_update_freq: A frequency of the actor network updates.
Returns:
Actor and alpha losses.
"""
states, actions, next_states, rewards, masks = next(replay_buffer_iter)[0]
if self.use_init_states:
init_states = next(init_replay_buffer)[0]
else:
init_states = states
critic_loss = self.fit_critic(states, actions, next_states, rewards, masks,
discount, init_states)
step = 0
self.avg_critic_loss(critic_loss)
if tf.equal(self.critic_optimizer.iterations % self.log_interval, 0):
train_measurements = [
('train/critic_loss', self.avg_critic_loss.result()),
]
for (label, value) in train_measurements:
tf.summary.scalar(label, value, step=step)
keras_utils.my_reset_states(self.avg_critic_loss)
if tf.equal(self.critic_optimizer.iterations % actor_update_freq, 0):
actor_loss, alpha_loss, entropy = self.fit_actor(states, actions,
next_states, rewards,
masks, discount,
target_entropy,
init_states)
soft_update(self.critic, self.critic_target, tau=tau)
self.avg_actor_loss(actor_loss)
self.avg_alpha_loss(alpha_loss)
self.avg_actor_entropy(entropy)
self.avg_alpha(self.alpha)
self.avg_lambda(self._lambda)
if tf.equal(self.actor_optimizer.iterations % self.log_interval, 0):
train_measurements = [
('train/actor_loss', self.avg_actor_loss.result()),
('train/alpha_loss', self.avg_alpha_loss.result()),
('train/actor entropy', self.avg_actor_entropy.result()),
('train/alpha', self.avg_alpha.result()),
('train/lambda', self.avg_lambda.result()),
]
for (label, value) in train_measurements:
tf.summary.scalar(label, value, step=self.critic_optimizer.iterations)
keras_utils.my_reset_states(self.avg_actor_loss)
keras_utils.my_reset_states(self.avg_alpha_loss)
keras_utils.my_reset_states(self.avg_actor_entropy)
keras_utils.my_reset_states(self.avg_alpha)
keras_utils.my_reset_states(self.avg_lambda)
def evaluate(self, env, num_episodes=10, max_episode_steps=None):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
num_episodes: A number of episodes to average the policy on.
max_episode_steps: Max steps in an episode.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0
for _ in range(num_episodes):
state = env.reset()
done = False
episode_timesteps = 0
while not done:
action, _, _ = self.actor(np.array([state]))
action = action[0].numpy()
next_state, reward, done, _ = env.step(action)
if (max_episode_steps is not None and
episode_timesteps + 1 == max_episode_steps):
done = True
total_returns += reward
total_timesteps += 1
episode_timesteps += 1
state = next_state
return total_returns / num_episodes, total_timesteps / num_episodes
| google-research/google-research | algae_dice/algae.py | Python | apache-2.0 | 19,381 | [
"Gaussian"
] | bf6b77dc6e8c21e4d36669483e090f536a5ce9237a558f49ae1b121ebc30b1c9 |
# -*- coding: utf-8 -*-
"""
=====================
Cython related magics
=====================
Magic command interface for interactive work with Cython
.. note::
The ``Cython`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
Usage
=====
To enable the magics below, execute ``%load_ext cythonmagic``.
``%%cython``
{CYTHON_DOC}
``%%cython_inline``
{CYTHON_INLINE_DOC}
``%%cython_pyximport``
{CYTHON_PYXIMPORT_DOC}
Author:
* Brian Granger
Parts of this code were taken from Cython.inline.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import imp
import io
import os
import re
import sys
import time
try:
reload
except NameError: # Python 3
from imp import reload
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from IPython.core import display
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.utils import py3compat
from IPython.utils.path import get_ipython_cache_dir
from IPython.utils.text import dedent
import Cython
from Cython.Compiler.Errors import CompileError
from Cython.Build.Dependencies import cythonize
@magics_class
class CythonMagics(Magics):
def __init__(self, shell):
super(CythonMagics,self).__init__(shell)
self._reloads = {}
self._code_cache = {}
def _import_all(self, module):
for k,v in module.__dict__.items():
if not k.startswith('__'):
self.shell.push({k:v})
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return Cython.inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are
injected into the user's namespace. For most purposes, we recommend
the usage of the `%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with io.open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules:
import pyximport
pyximport.install(reload_support=True)
if module_name in self._reloads:
module = self._reloads[module_name]
reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'--link-args', action='append', default=[],
help="Extra flags to pass to linker via the `extra_link_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-n', '--name',
help="Specify a name for the Cython module."
)
@magic_arguments.argument(
'-L', dest='library_dirs', metavar='dir', action='append', default=[],
help="Add a path to the list of libary directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-+', '--cplus', action='store_true', default=False,
help="Output a C++ rather than C file."
)
@magic_arguments.argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of a new module, even if the source has been "
"previously compiled."
)
@magic_arguments.argument(
'-a', '--annotate', action='store_true', default=False,
help="Produce a colorized HTML version of the source."
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory `IPYTHONDIR/cython` using a filename with the hash of the
code. This file is then cythonized and compiled. The resulting module
is imported and all of its symbols are injected into the user's
namespace. The usage is similar to that of `%%cython_pyximport` but
you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
To compile OpenMP codes, pass the required `--compile-args`
and `--link-args`. For example with gcc::
%%cython --compile-args=-fopenmp --link-args=-fopenmp
...
"""
args = magic_arguments.parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell+'\n'
lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
quiet = True
key = code, sys.version_info, sys.executable, Cython.__version__
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if args.force:
# Force a new module name by adding the current time to the
# key which is hashed to determine the module name.
key += time.time(),
if args.name:
module_name = py3compat.unicode_to_str(args.name)
else:
module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
module_path = os.path.join(lib_dir, module_name + self.so_ext)
have_module = os.path.isfile(module_path)
need_cythonize = not have_module
if args.annotate:
html_file = os.path.join(lib_dir, module_name + '.html')
if not os.path.isfile(html_file):
need_cythonize = True
if need_cythonize:
c_include_dirs = args.include
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())
with io.open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
library_dirs = args.library_dirs,
extra_compile_args = args.compile_args,
extra_link_args = args.link_args,
libraries = args.lib,
language = 'c++' if args.cplus else 'c',
)
build_extension = self._get_build_extension()
try:
opts = dict(
quiet=quiet,
annotate = args.annotate,
force = True,
)
build_extension.extensions = cythonize([extension], **opts)
except CompileError:
return
if not have_module:
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
self._code_cache[key] = module_name
module = imp.load_dynamic(module_name, module_path)
self._import_all(module)
if args.annotate:
try:
with io.open(html_file, encoding='utf-8') as f:
annotated_html = f.read()
except IOError as e:
# File could not be opened. Most likely the user has a version
# of Cython before 0.15.1 (when `cythonize` learned the
# `force` keyword argument) and has already compiled this
# exact source without annotation.
print('Cython completed successfully but the annotated '
'source could not be read.', file=sys.stderr)
print(e, file=sys.stderr)
else:
return display.HTML(self.clean_annotated_html(annotated_html))
@property
def so_ext(self):
"""The extension suffix for compiled modules."""
try:
return self._so_ext
except AttributeError:
self._so_ext = self._get_build_extension().get_ext_filename('')
return self._so_ext
def _clear_distutils_mkpath_cache(self):
"""clear distutils mkpath cache
prevents distutils from skipping re-creation of dirs that have been removed
"""
try:
from distutils.dir_util import _path_created
except ImportError:
pass
else:
_path_created.clear()
def _get_build_extension(self):
self._clear_distutils_mkpath_cache()
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@staticmethod
def clean_annotated_html(html):
"""Clean up the annotated HTML source.
Strips the link to the generated C or C++ file, which we do not
present to the user.
"""
r = re.compile('<p>Raw output: <a href="(.*)">(.*)</a>')
html = '\n'.join(l for l in html.splitlines() if not r.match(l))
return html
__doc__ = __doc__.format(
# rST doesn't see the -+ flag as part of an option list, so we
# hide it from the module-level docstring.
CYTHON_DOC = dedent(CythonMagics.cython.__doc__\
.replace('-+, --cplus','--cplus ')),
CYTHON_INLINE_DOC = dedent(CythonMagics.cython_inline.__doc__),
CYTHON_PYXIMPORT_DOC = dedent(CythonMagics.cython_pyximport.__doc__),
)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(CythonMagics)
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/IPython/extensions/cythonmagic.py | Python | bsd-3-clause | 11,907 | [
"Brian"
] | 62198f8cda9cf04a067d0ddcbe639216f5aeade249ce829d7f1f46e7606ea351 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import gtk
import sys
from logging import info, warn
from zeroinstall import _, translation
from zeroinstall import SafeException
from zeroinstall.support import tasks, pretty_size
from zeroinstall.injector import download, iface_cache
from iface_browser import InterfaceBrowser
import dialog
from zeroinstall.gtkui import gtkutils
from zeroinstall.gtkui import help_box
ngettext = translation.ngettext
SHOW_PREFERENCES = 0
class MainWindow:
progress = None
progress_area = None
browser = None
window = None
cancel_download_and_run = None
driver = None
comment = None
systray_icon = None
systray_icon_blocker = None
def __init__(self, driver, widgets, download_only, select_only = False):
self.driver = driver
self.select_only = select_only
def update_ok_state():
self.window.set_response_sensitive(gtk.RESPONSE_OK, driver.solver.ready)
if driver.solver.ready and self.window.get_focus() is None:
run_button.grab_focus()
driver.watchers.append(update_ok_state)
self.window = widgets.get_widget('main')
self.window.set_default_size(gtk.gdk.screen_width() * 2 / 5, 300)
self.progress = widgets.get_widget('progress')
self.progress_area = widgets.get_widget('progress_area')
self.comment = widgets.get_widget('comment')
widgets.get_widget('stop').connect('clicked', lambda b: driver.config.handler.abort_all_downloads())
self.refresh_button = widgets.get_widget('refresh')
# Tree view
self.browser = InterfaceBrowser(driver, widgets)
prefs = widgets.get_widget('preferences')
self.window.get_action_area().set_child_secondary(prefs, True)
# Glade won't let me add this to the template!
if select_only:
run_button = dialog.MixedButton(_("_Select"), gtk.STOCK_EXECUTE, button = gtk.ToggleButton())
elif download_only:
run_button = dialog.MixedButton(_("_Download"), gtk.STOCK_EXECUTE, button = gtk.ToggleButton())
else:
run_button = dialog.MixedButton(_("_Run"), gtk.STOCK_EXECUTE, button = gtk.ToggleButton())
self.window.add_action_widget(run_button, gtk.RESPONSE_OK)
run_button.show_all()
run_button.set_can_default(True)
self.run_button = run_button
run_button.grab_focus()
def response(dialog, resp):
if resp in (gtk.RESPONSE_CANCEL, gtk.RESPONSE_DELETE_EVENT):
self.window.destroy()
sys.exit(1)
elif resp == gtk.RESPONSE_OK:
if self.cancel_download_and_run:
self.cancel_download_and_run.trigger()
if run_button.get_active():
self.cancel_download_and_run = tasks.Blocker("cancel downloads")
self.download_and_run(run_button, self.cancel_download_and_run)
elif resp == gtk.RESPONSE_HELP:
gui_help.display()
elif resp == SHOW_PREFERENCES:
import preferences
preferences.show_preferences(driver.config, notify_cb = lambda: driver.solve_with_downloads())
self.window.connect('response', response)
self.window.realize() # Make busy pointer work, even with --systray
def destroy(self):
self.window.destroy()
def show(self):
self.window.show()
def set_response_sensitive(self, response, sensitive):
self.window.set_response_sensitive(response, sensitive)
@tasks.async
def download_and_run(self, run_button, cancelled):
try:
if not self.select_only:
downloaded = self.driver.download_uncached_implementations()
if downloaded:
# We need to wait until everything is downloaded...
blockers = [downloaded, cancelled]
yield blockers
tasks.check(blockers)
if cancelled.happened:
return
uncached = self.driver.get_uncached_implementations()
else:
uncached = None # (we don't care)
if uncached:
missing = '\n- '.join([_('%(iface_name)s %(impl_version)s') % {'iface_name': iface.get_name(), 'impl_version': impl.get_version()} for iface, impl in uncached])
dialog.alert(self.window, _('Not all downloads succeeded; cannot run program.\n\nFailed to get:') + '\n- ' + missing)
else:
sels = self.driver.solver.selections
doc = sels.toDOM()
reply = doc.toxml('utf-8')
if sys.version_info[0] > 2:
stdout = sys.stdout.buffer
else:
stdout = sys.stdout
stdout.write(('Length:%8x\n' % len(reply)).encode('utf-8') + reply)
self.window.destroy()
sys.exit(0) # Success
except SystemExit:
raise
except download.DownloadAborted as ex:
run_button.set_active(False)
# Don't bother reporting this to the user
except Exception as ex:
run_button.set_active(False)
self.report_exception(ex)
def update_download_status(self, only_update_visible = False):
"""Called at regular intervals while there are downloads in progress,
and once at the end. Update the display."""
monitored_downloads = self.driver.config.handler.monitored_downloads
self.browser.update_download_status(only_update_visible)
if not monitored_downloads:
self.progress_area.hide()
self.window.get_window().set_cursor(None)
return
if not self.progress_area.get_property('visible'):
self.progress_area.show()
self.window.get_window().set_cursor(gtkutils.get_busy_pointer())
any_known = False
done = total = self.driver.config.handler.total_bytes_downloaded # Completed downloads
n_downloads = self.driver.config.handler.n_completed_downloads
# Now add downloads in progress...
for x in monitored_downloads:
if x.status != download.download_fetching: continue
n_downloads += 1
if x.expected_size:
any_known = True
so_far = x.get_bytes_downloaded_so_far()
total += x.expected_size or max(4096, so_far) # Guess about 4K for feeds/icons
done += so_far
progress_text = '%s / %s' % (pretty_size(done), pretty_size(total))
self.progress.set_text(
ngettext('Downloading one file (%(progress)s)',
'Downloading %(number)d files (%(progress)s)', n_downloads)
% {'progress': progress_text, 'number': n_downloads})
if total == 0 or (n_downloads < 2 and not any_known):
self.progress.pulse()
else:
self.progress.set_fraction(float(done) / total)
def set_message(self, message):
import pango
self.comment.set_text(message)
attrs = pango.AttrList()
attrs.insert(pango.AttrWeight(pango.WEIGHT_BOLD, end_index = len(message)))
self.comment.set_attributes(attrs)
self.comment.show()
def use_systray_icon(self):
try:
if sys.version_info[0] > 2:
self.systray_icon = gtk.StatusIcon.new_from_icon_name("zeroinstall")
else:
self.systray_icon = gtk.status_icon_new_from_icon_name("zeroinstall")
except Exception as ex:
info(_("No system tray support: %s"), ex)
else:
root_iface = iface_cache.iface_cache.get_interface(self.driver.requirements.interface_uri)
self.systray_icon.set_tooltip(_('Checking for updates for %s') % root_iface.get_name())
self.systray_icon.connect('activate', self.remove_systray_icon)
self.systray_icon_blocker = tasks.Blocker('Tray icon clicked')
def remove_systray_icon(self, i = None):
assert self.systray_icon, i
self.show()
self.systray_icon.set_visible(False)
self.systray_icon = None
self.systray_icon_blocker.trigger()
self.systray_icon_blocker = None
def report_exception(self, ex, tb = None):
if not isinstance(ex, SafeException):
if isinstance(ex, AssertionError):
# Assertions often don't say that they're errors (and are frequently
# blank).
ex = repr(ex)
if tb is None:
warn(ex, exc_info = True)
else:
warn(ex, exc_info = (type(ex), ex, tb))
if self.systray_icon:
self.systray_icon.set_blinking(True)
self.systray_icon.set_tooltip(str(ex) + '\n' + _('(click for details)'))
else:
dialog.alert(self.window, str(ex) or repr(ex))
gui_help = help_box.HelpBox(_("Injector Help"),
(_('Overview'), '\n' +
_("""A program is made up of many different components, typically written by different \
groups of people. Each component is available in multiple versions. Zero Install is \
used when starting a program. Its job is to decide which implementation of each required \
component to use.
Zero Install starts with the program you want to run (like 'The Gimp') and chooses an \
implementation (like 'The Gimp 2.2.0'). However, this implementation \
will in turn depend on other components, such as 'GTK' (which draws the menus \
and buttons). Thus, it must choose implementations of \
each dependency (each of which may require further components, and so on).""")),
(_('List of components'), '\n' +
_("""The main window displays all these components, and the version of each chosen \
implementation. The top-most one represents the program you tried to run, and each direct \
child is a dependency. The 'Fetch' column shows the amount of data that needs to be \
downloaded, or '(cached)' if it is already on this computer.
If you are happy with the choices shown, click on the Download (or Run) button to \
download (and run) the program.""")),
(_('Choosing different versions'), '\n' +
_("""To control which implementations (versions) are chosen you can click on Preferences \
and adjust the network policy and the overall stability policy. These settings affect \
all programs run using Zero Install.
Alternatively, you can edit the policy of an individual component by clicking on the \
button at the end of its line in the table and choosing "Show Versions" from the menu. \
See that dialog's help text for more information.""") + '\n'),
(_('Reporting bugs'), '\n' +
_("""To report a bug, right-click over the component which you think contains the problem \
and choose 'Report a Bug...' from the menu. If you don't know which one is the cause, \
choose the top one (i.e. the program itself). The program's author can reassign the \
bug if necessary, or switch to using a different version of the library.""") + '\n'),
(_('The cache'), '\n' +
_("""Each version of a program that is downloaded is stored in the Zero Install cache. This \
means that it won't need to be downloaded again each time you run the program. The \
"0store manage" command can be used to view the cache.""") + '\n'),
)
| dsqmoore/0install | zeroinstall/0launch-gui/mainwindow.py | Python | lgpl-2.1 | 10,052 | [
"VisIt"
] | 595de48104383e4b9ea3900670eb05143e37eea8b84c310ba72ff10c685df9dd |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel']
@inherit_doc
class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept,
HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the squared error, with regularization.
The specific squared error loss function used is: L = 1/2n ||A coefficients - y||^2^
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight")
>>> model = lr.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.transform(test0).head().prediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().prediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
class LinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_lrt_summary = self._call_java("summary")
return LinearRegressionTrainingSummary(java_lrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns the explained variance regression score.
explainedVariance = 1 - variance(y - \hat{y}) / variance(y)
.. seealso:: `Wikipedia explain variation \
<http://en.wikipedia.org/wiki/Explained_variation>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2^, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`
.. note:: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
.. note:: Experimental
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
@inherit_doc
class IsotonicRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
isotonic = \
Param(Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = \
Param(Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
class IsotonicRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
class TreeEnsembleParams(DecisionTreeParams):
"""
Mixin for Decision Tree-based ensemble algorithms parameters.
"""
subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the training data " +
"used for learning each decision tree, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
def __init__(self):
super(TreeEnsembleParams, self).__init__()
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("1.4.0")
def getSubsamplingRate(self):
"""
Gets the value of subsamplingRate or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
class TreeRegressorParams(Params):
"""
Private class to track supported impurity measures.
"""
supportedImpurities = ["variance"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeRegressorParams, self).__init__()
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class RandomForestParams(TreeEnsembleParams):
"""
Private class to track supported random forest parameters.
"""
supportedFeatureSubsetStrategies = ["auto", "all", "onethird", "sqrt", "log2"]
numTrees = Param(Params._dummy(), "numTrees", "Number of trees to train (>= 1).",
typeConverter=TypeConverters.toInt)
featureSubsetStrategy = \
Param(Params._dummy(), "featureSubsetStrategy",
"The number of features to consider for splits at each tree node. Supported " +
"options: " + ", ".join(supportedFeatureSubsetStrategies) + ", (0.0-1.0], [1-n].",
typeConverter=TypeConverters.toString)
def __init__(self):
super(RandomForestParams, self).__init__()
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def getNumTrees(self):
"""
Gets the value of numTrees or its default value.
"""
return self.getOrDefault(self.numTrees)
@since("1.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("1.4.0")
def getFeatureSubsetStrategy(self):
"""
Gets the value of featureSubsetStrategy or its default value.
"""
return self.getOrDefault(self.featureSubsetStrategy)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
"""
supportedLossTypes = ["squared", "absolute"]
@inherit_doc
class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
DecisionTreeParams, TreeRegressorParams, HasCheckpointInterval,
HasSeed, JavaMLWritable, JavaMLReadable, HasVarianceCol):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
>>> model = dt.fit(df)
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@inherit_doc
class DecisionTreeModel(JavaModel, JavaPredictionModel):
"""
Abstraction for Decision Tree models.
.. versionadded:: 1.5.0
"""
@property
@since("1.5.0")
def numNodes(self):
"""Return number of nodes of the decision tree."""
return self._call_java("numNodes")
@property
@since("1.5.0")
def depth(self):
"""Return depth of the decision tree."""
return self._call_java("depth")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class TreeEnsembleModel(JavaModel):
"""
(private abstraction)
Represents a tree ensemble model.
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def getNumTrees(self):
"""Number of trees in ensemble."""
return self._call_java("getNumTrees")
@property
@since("1.5.0")
def treeWeights(self):
"""Return the weights for each tree"""
return list(self._call_java("javaTreeWeights"))
@property
@since("2.0.0")
def totalNumNodes(self):
"""Total number of nodes, summed over all trees in the ensemble."""
return self._call_java("totalNumNodes")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class DecisionTreeRegressionModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
RandomForestParams, TreeRegressorParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2, seed=42)
>>> model = rf.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
Sets params for linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@inherit_doc
class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable, TreeRegressorParams):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxIter=5, maxDepth=2, seed=42)
>>> print(gbt.getImpurity())
variance
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class AFTSurvivalRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (0.0, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> model = aftsr.fit(df)
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-----+---------+------+----------+
|label| features|censor|prediction|
+-----+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
| 0.0|(1,[],[])| 0.0| 1.0|
+-----+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
censorCol = Param(Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = \
Param(Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
class AFTSurvivalRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale paramter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
@since("2.0.0")
def predict(self, features):
"""
Predicted value
"""
return self._call_java("predict", features)
@inherit_doc
class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol,
HasSolver, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson", "gamma" and "tweedie" as family. Valid link functions for
each family is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
* "tweedie" -> power link function specified through "linkPower". \
The default link power in the tweedie family is 1 - variancePower.
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> model = glr.fit(df)
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson, gamma and tweedie.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
variancePower = Param(Params._dummy(), "variancePower", "The power in the variance function " +
"of the Tweedie distribution which characterizes the relationship " +
"between the variance and mean of the distribution. Only applicable " +
"for the Tweedie family. Supported values: 0 and [1, Inf).",
typeConverter=TypeConverters.toFloat)
linkPower = Param(Params._dummy(), "linkPower", "The index in the power link function. " +
"Only applicable to the Tweedie family.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls",
variancePower=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None,
variancePower=0.0, linkPower=None):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None, \
variancePower=0.0, linkPower=None)
Sets params for generalized linear regression.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
@since("2.2.0")
def setVariancePower(self, value):
"""
Sets the value of :py:attr:`variancePower`.
"""
return self._set(variancePower=value)
@since("2.2.0")
def getVariancePower(self):
"""
Gets the value of variancePower or its default value.
"""
return self.getOrDefault(self.variancePower)
@since("2.2.0")
def setLinkPower(self, value):
"""
Sets the value of :py:attr:`linkPower`.
"""
return self._set(linkPower=value)
@since("2.2.0")
def getLinkPower(self):
"""
Gets the value of linkPower or its default value.
"""
return self.getOrDefault(self.linkPower)
class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
if self.hasSummary:
java_glrt_summary = self._call_java("summary")
return GeneralizedLinearRegressionTrainingSummary(java_glrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
.. note:: Experimental
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| MLnick/spark | python/pyspark/ml/regression.py | Python | apache-2.0 | 60,886 | [
"Gaussian"
] | 4e067acb0dcb5347082d5dc61541cfd22a32dcb00a9bb7106ac5fbf9de2f91ae |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
from mantid.api import PythonAlgorithm, AlgorithmFactory, IMDHistoWorkspaceProperty, PropertyMode, WorkspaceProperty, Progress
from mantid.kernel import (Direction, EnabledWhenProperty, PropertyCriterion, Property, StringListValidator, FloatArrayBoundedValidator,
FloatArrayProperty, FloatBoundedValidator)
from mantid.geometry import SpaceGroupFactory
from mantid import logger
import numpy as np
from scipy import ndimage, signal as ssignal
class DeltaPDF3D(PythonAlgorithm):
def category(self):
return 'Diffraction\\Utility'
def name(self):
return 'DeltaPDF3D'
def summary(self):
return 'Calculates the 3D-deltaPDF from a HKL workspace'
def PyInit(self):
self.declareProperty(IMDHistoWorkspaceProperty("InputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Input),
"Input Workspace with HKL dimensions centered on zero.")
self.declareProperty(WorkspaceProperty("IntermediateWorkspace", "",
optional=PropertyMode.Optional,
direction=Direction.Output),
"The resulting workspace after reflection removal and filters applied. What is the input of the FFT.")
self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Output),
"Output Workspace")
self.declareProperty("Method", 'KAREN', StringListValidator(['None', 'Punch and fill', 'KAREN']), "Bragg peak removal method")
self.declareProperty("WindowFunction", 'Blackman', StringListValidator(['None', 'Gaussian', 'Blackman', 'Tukey', 'Kaiser']),
"Apply a window function to the data")
self.declareProperty("WindowParameter", defaultValue=0.5, validator=FloatBoundedValidator(0.),
doc="Parameter for window function, depends on window type, see algorithm docs")
# Punch and fill
condition = EnabledWhenProperty("Method", PropertyCriterion.IsEqualTo, 'Punch and fill')
self.declareProperty("Shape", "sphere", doc="Shape to punch out reflections",
validator=StringListValidator(['sphere', 'cube']))
self.setPropertySettings("Shape", condition)
val_min_zero = FloatArrayBoundedValidator()
val_min_zero.setLower(0.)
self.declareProperty(FloatArrayProperty("Size", [0.2], validator=val_min_zero),
"Width of cube/diameter of sphere used to remove reflections, in (HKL) (one or three values)")
self.setPropertySettings("Size", condition)
self.declareProperty("SpaceGroup", "",
doc="Space group for reflection removal, either full name or number. If empty all HKL's will be removed.")
self.setPropertySettings("SpaceGroup", condition)
self.declareProperty("Convolution", True, "Apply convolution to fill in removed reflections")
self.setPropertySettings("Convolution", condition)
self.declareProperty("ConvolutionWidth", 2.0, validator=FloatBoundedValidator(0.),
doc="Width of gaussian convolution in pixels")
self.setPropertySettings("ConvolutionWidth", condition)
self.declareProperty("CropSphere", False, "Limit min/max q values. Can help with edge effects.")
condition = EnabledWhenProperty("CropSphere", PropertyCriterion.IsNotDefault)
self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero),
"HKL values below which will be removed (one or three values)")
self.setPropertySettings("SphereMin", condition)
self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero),
"HKL values above which will be removed (one or three values)")
self.setPropertySettings("SphereMax", condition)
self.declareProperty("FillValue", Property.EMPTY_DBL, "Value to replace with outside sphere")
self.setPropertySettings("FillValue", condition)
# KAREN
self.declareProperty("KARENWidth", 7, "Size of filter window")
# Reflections
self.setPropertyGroup("Shape","Punch and fill")
self.setPropertyGroup("Size","Punch and fill")
self.setPropertyGroup("SpaceGroup","Punch and fill")
# Sphere
self.setPropertyGroup("CropSphere","Cropping to a sphere")
self.setPropertyGroup("SphereMin","Cropping to a sphere")
self.setPropertyGroup("SphereMax","Cropping to a sphere")
self.setPropertyGroup("FillValue","Cropping to a sphere")
# Convolution
self.setPropertyGroup("Convolution","Convolution")
self.setPropertyGroup("ConvolutionWidth","Convolution")
def validateInputs(self):
issues = dict()
inWS = self.getProperty("InputWorkspace").value
dimX=inWS.getXDimension()
dimY=inWS.getYDimension()
dimZ=inWS.getZDimension()
if dimX.name != '[H,0,0]' or dimY.name != '[0,K,0]' or dimZ.name != '[0,0,L]':
issues['InputWorkspace'] = 'dimensions must be [H,0,0], [0,K,0] and [0,0,L]'
for d in range(inWS.getNumDims()):
dim = inWS.getDimension(d)
if not np.isclose(dim.getMaximum(), -dim.getMinimum(), atol=1e-5):
issues['InputWorkspace'] = 'dimensions must be centered on zero'
if self.getProperty("Convolution").value and self.getProperty("Method").value == 'Punch and fill':
try:
import astropy # noqa
except ImportError:
issues["Convolution"] = 'python-astropy required to do convolution'
size = self.getProperty("Size").value
if len(size) != 1 and len(size) != 3:
issues["Size"] = 'Must provide 1 or 3 sizes'
if self.getProperty("SpaceGroup").value:
space_group=self.getProperty("SpaceGroup").value
try:
if not SpaceGroupFactory.isSubscribedNumber(int(space_group)):
issues["SpaceGroup"] = 'Space group number is not valid'
except ValueError:
if not SpaceGroupFactory.isSubscribedSymbol(space_group):
issues["SpaceGroup"] = 'Space group name is not valid'
sphereMin = self.getProperty("SphereMin").value
if len(sphereMin) != 1 and len(sphereMin) != 3:
issues["SphereMin"] = 'Must provide 1 or 3 diameters'
sphereMax = self.getProperty("SphereMax").value
if len(sphereMax) != 1 and len(sphereMax) != 3:
issues["SphereMax"] = 'Must provide 1 or 3 diameters'
if self.getProperty("WindowFunction").value == 'Tukey':
try:
ssignal.tukey
except AttributeError:
issues["WindowFunction"] = 'Tukey window requires scipy >= 0.16.0'
return issues
def PyExec(self):
progress = Progress(self, 0.0, 1.0, 5)
inWS = self.getProperty("InputWorkspace").value
signal = inWS.getSignalArray().copy()
if self.getProperty("CropSphere").value:
signal = self._crop_sphere(signal, inWS.getXDimension(), inWS.getYDimension(), inWS.getZDimension())
window_function = self.getProperty("WindowFunction").value
if window_function != 'None':
paramater = self.getProperty("WindowParameter").value
_, _, Xbins, _ = self._get_dim_params(inWS.getXDimension())
_, _, Ybins, _ = self._get_dim_params(inWS.getYDimension())
_, _, Zbins, _ = self._get_dim_params(inWS.getZDimension())
if window_function == 'Gaussian':
progress.report("Applying Gaussian window")
window = self._gaussian_window((Xbins, Ybins, Zbins), paramater)
elif window_function == 'Blackman':
progress.report("Applying Blackman window")
window = self._blackman_window((Xbins, Ybins, Zbins))
elif window_function == 'Tukey':
progress.report("Applying Tukey window")
window = self._tukey_window((Xbins, Ybins, Zbins), paramater)
elif window_function == 'Kaiser':
progress.report("Applying Kaiser window")
window = self._kaiser_window((Xbins, Ybins, Zbins), paramater)
signal = np.multiply(signal, window)
if self.getProperty("Method").value == 'Punch and fill':
progress.report("Removing Reflections")
signal = self._punch_and_fill(signal, inWS.getXDimension(), inWS.getYDimension(), inWS.getZDimension())
if self.getProperty("Convolution").value:
progress.report("Convoluting signal")
signal = self._convolution(signal)
elif self.getProperty("Method").value == 'KAREN':
progress.report("Running KAREN")
signal = self._karen(signal, self.getProperty("KARENWidth").value)
if self.getPropertyValue("IntermediateWorkspace"):
cloneWS_alg = self.createChildAlgorithm("CloneMDWorkspace", enableLogging=False)
cloneWS_alg.setProperty("InputWorkspace",inWS)
cloneWS_alg.execute()
signalOutWS = cloneWS_alg.getProperty("OutputWorkspace").value
signalOutWS.setSignalArray(signal)
self.setProperty("IntermediateWorkspace", signalOutWS)
# Do FFT
progress.report("Running FFT")
# Replace any remaining nan's or inf's with 0
# Otherwise you end up with a lot of nan's
signal[np.isnan(signal)]=0
signal[np.isinf(signal)]=0
signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal)))
number_of_bins = signal.shape
# CreateMDHistoWorkspace expects Fortan `column-major` ordering
signal = signal.real.flatten('F')
createWS_alg = self.createChildAlgorithm("CreateMDHistoWorkspace", enableLogging=False)
createWS_alg.setProperty("SignalInput", signal)
createWS_alg.setProperty("ErrorInput", signal**2)
createWS_alg.setProperty("Dimensionality", 3)
createWS_alg.setProperty("Extents", self._calc_new_extents(inWS))
createWS_alg.setProperty("NumberOfBins", number_of_bins)
createWS_alg.setProperty("Names", 'x,y,z')
createWS_alg.setProperty("Units", 'a,b,c')
createWS_alg.execute()
outWS = createWS_alg.getProperty("OutputWorkspace").value
# Copy first experiment info
if inWS.getNumExperimentInfo() > 0:
outWS.copyExperimentInfos(inWS)
progress.report()
self.setProperty("OutputWorkspace", outWS)
def _punch_and_fill(self, signal, dimX, dimY, dimZ): # noqa
Xmin, Xmax, _, Xwidth = self._get_dim_params(dimX)
Ymin, Ymax, _, Ywidth = self._get_dim_params(dimY)
Zmin, Zmax, _, Zwidth = self._get_dim_params(dimZ)
X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)
size = self.getProperty("Size").value
if len(size)==1:
size = np.repeat(size, 3)
size/=2.0 # We want radii or half box width
cut_shape = self.getProperty("Shape").value
space_group = self.getProperty("SpaceGroup").value
if space_group:
check_space_group = True
try:
space_group=SpaceGroupFactory.subscribedSpaceGroupSymbols(int(space_group))[0]
except ValueError:
pass
logger.information('Using space group: '+space_group)
sg=SpaceGroupFactory.createSpaceGroup(space_group)
else:
check_space_group = False
if cut_shape == 'cube':
for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
if not check_space_group or sg.isAllowedReflection([h,k,l]):
signal[int((h-size[0]-Xmin)/Xwidth+1):int((h+size[0]-Xmin)/Xwidth),
int((k-size[1]-Ymin)/Ywidth+1):int((k+size[1]-Ymin)/Ywidth),
int((l-size[2]-Zmin)/Zwidth+1):int((l+size[2]-Zmin)/Zwidth)]=np.nan
else: # sphere
mask=((X-np.round(X))**2/size[0]**2 + (Y-np.round(Y))**2/size[1]**2 + (Z-np.round(Z))**2/size[2]**2 < 1)
# Unmask invalid reflections
if check_space_group:
for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
if not sg.isAllowedReflection([h,k,l]):
mask[int((h-0.5-Xmin)/Xwidth+1):int((h+0.5-Xmin)/Xwidth),
int((k-0.5-Ymin)/Ywidth+1):int((k+0.5-Ymin)/Ywidth),
int((l-0.5-Zmin)/Zwidth+1):int((l+0.5-Zmin)/Zwidth)]=False
signal[mask]=np.nan
return signal
def _crop_sphere(self, signal, dimX, dimY, dimZ):
X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)
sphereMin = self.getProperty("SphereMin").value
if sphereMin[0] < Property.EMPTY_DBL:
if len(sphereMin)==1:
sphereMin = np.repeat(sphereMin, 3)
signal[X**2/sphereMin[0]**2 + Y**2/sphereMin[1]**2 + Z**2/sphereMin[2]**2 < 1]=np.nan
sphereMax = self.getProperty("SphereMax").value
if sphereMax[0] < Property.EMPTY_DBL:
if len(sphereMax)==1:
sphereMax = np.repeat(sphereMax, 3)
if self.getProperty("FillValue").value == Property.EMPTY_DBL:
fill_value = np.nan
else:
fill_value = self.getProperty("FillValue").value
signal[X**2/sphereMax[0]**2 + Y**2/sphereMax[1]**2 + Z**2/sphereMax[2]**2 > 1]=fill_value
return signal
def _get_XYZ_ogrid(self, dimX, dimY, dimZ):
"""
Returns X, Y and Z as ogrid
"""
Xmin, Xmax, Xbins, _ = self._get_dim_params(dimX)
Ymin, Ymax, Ybins, _ = self._get_dim_params(dimY)
Zmin, Zmax, Zbins, _ = self._get_dim_params(dimZ)
return np.ogrid[(dimX.getX(0)+dimX.getX(1))/2:(dimX.getX(Xbins)+dimX.getX(Xbins-1))/2:Xbins*1j,
(dimY.getX(0)+dimY.getX(1))/2:(dimY.getX(Ybins)+dimY.getX(Ybins-1))/2:Ybins*1j,
(dimZ.getX(0)+dimZ.getX(1))/2:(dimZ.getX(Zbins)+dimZ.getX(Zbins-1))/2:Zbins*1j]
def _get_dim_params(self, dim):
"""
Return the min, max, number_of_bins and bin_width of dim
"""
return dim.getMinimum(), dim.getMaximum(), dim.getNBins(), dim.getBinWidth()
def _convolution(self, signal):
from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
try:
logger.debug('Trying astropy.convolution.convolve_fft for convolution')
return convolve_fft(signal, G3D) # Faster but will fail with large signal and kernel arrays
except ValueError:
logger.debug('Using astropy.convolution.convolve for convolution')
return convolve(signal, G3D)
def _calc_new_extents(self, inWS):
# Calculate new extents for fft space
extents=''
for d in range(inWS.getNumDims()):
dim = inWS.getDimension(d)
if dim.getNBins() == 1:
fft_dim = 1./(dim.getMaximum()-dim.getMinimum())
extents+=str(-fft_dim/2.)+','+str(fft_dim/2.)+','
else:
fft_dim=np.fft.fftshift(np.fft.fftfreq(dim.getNBins(), (dim.getMaximum()-dim.getMinimum())/dim.getNBins()))
extents+=str(fft_dim[0])+','+str(fft_dim[-1])+','
return extents[:-1]
def _karen(self, signal, width):
"""
Bragg peaks are located as outliers in some moving window
Outliers are defined as values more than 3sigma away from the median
Sigma is estimated using 1.4826*MAD
Returns median+2.2*MAD of window for values detected to be outliers
Input dataset (dset) and window width (x)
Input an odd window or the window will be asymmetric and stuff breaks
"""
med = ndimage.filters.median_filter(signal, size=width, mode='nearest') # Get median of input data set
mad = ndimage.filters.median_filter(np.abs(signal-med), size=width, mode='nearest') # Get median absolute deviation (MAD)
asigma = np.abs(mad*3*1.4826) # Absolute value of approximate sigma
mask = np.logical_or(signal < (med-asigma), signal > (med+asigma)) # Check if value is outlier based on MAD
signal[mask] = (med+2.2*mad)[mask] # Return median+2.2*MAD if value is outlier
return signal
def _gaussian_window(self, width, sigma):
"""
Generates a gaussian window
sigma is based on the dat being in a range 0 to 1
"""
return (ssignal.gaussian(width[0], sigma*width[0]).reshape((-1,1,1)) *
ssignal.gaussian(width[1], sigma*width[1]).reshape((-1,1)) *
ssignal.gaussian(width[2], sigma*width[2]))
def _blackman_window(self, width):
"""
Generates a blackman window
"""
return np.blackman(width[0]).reshape((-1,1,1)) * np.blackman(width[1]).reshape((-1,1)) * np.blackman(width[2])
def _tukey_window(self, width, alpha):
"""
Generates a tukey window
0 <= alpha <=1
alpha = 0 becomes rectangular
alpha = 1 becomes a Hann window
"""
return (ssignal.tukey(width[0], alpha).reshape((-1,1,1)) *
ssignal.tukey(width[1], alpha).reshape((-1,1)) *
ssignal.tukey(width[2], alpha))
def _kaiser_window(self, width, beta):
"""
Generates a kaiser window
beta Window shape
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
"""
return np.kaiser(width[0], beta).reshape((-1,1,1)) * np.kaiser(width[1], beta).reshape((-1,1)) * np.kaiser(width[2], beta)
AlgorithmFactory.subscribe(DeltaPDF3D)
| mganeva/mantid | Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py | Python | gpl-3.0 | 19,144 | [
"Gaussian"
] | 3271fb66084a38a4c8214b82b00a4fdbd095170bd97a2389144613bf2ccda855 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016 Brian C. Lane <bcl@redhat.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
# pylint: disable=W0611
"""some functions that may be useful for various checkers
"""
import functools
import itertools
import re
import sys
import string
import warnings
import six
from six.moves import map, builtins # pylint: disable=redefined-builtin
import astroid
from astroid import scoped_nodes
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (astroid.ListComp, astroid.SetComp,
astroid.DictComp, astroid.GeneratorExp)
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = set(('abc.abstractproperty', 'abc.abstractmethod',
'abc.abstractclassmethod', 'abc.abstractstaticmethod'))
ITER_METHOD = '__iter__'
NEXT_METHOD = 'next' if six.PY2 else '__next__'
GETITEM_METHOD = '__getitem__'
CONTAINS_METHOD = '__contains__'
KEYS_METHOD = 'keys'
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ('__new__', '__init__', '__call__'),
0: ('__del__', '__repr__', '__str__', '__bytes__', '__hash__', '__bool__',
'__dir__', '__len__', '__length_hint__', '__iter__', '__reversed__',
'__neg__', '__pos__', '__abs__', '__invert__', '__complex__', '__int__',
'__float__', '__neg__', '__pos__', '__abs__', '__complex__', '__int__',
'__float__', '__index__', '__enter__', '__aenter__', '__getnewargs_ex__',
'__getnewargs__', '__getstate__', '__reduce__', '__copy__',
'__unicode__', '__nonzero__', '__await__', '__aiter__', '__anext__'),
1: ('__format__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__',
'__ge__', '__getattr__', '__getattribute__', '__delattr__',
'__delete__', '__instancecheck__', '__subclasscheck__',
'__getitem__', '__missing__', '__delitem__', '__contains__',
'__add__', '__sub__', '__mul__', '__truediv__', '__floordiv__',
'__mod__', '__divmod__', '__lshift__', '__rshift__', '__and__',
'__xor__', '__or__', '__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rmod__', '__rdivmod__', '__rpow__', '__rlshift__', '__rrshift__',
'__rand__', '__rxor__', '__ror__', '__iadd__', '__isub__', '__imul__',
'__itruediv__', '__ifloordiv__', '__imod__', '__ilshift__',
'__irshift__', '__iand__', '__ixor__', '__ior__', '__ipow__',
'__setstate__', '__reduce_ex__', '__deepcopy__', '__cmp__',
'__matmul__', '__rmatmul__', '__div__'),
2: ('__setattr__', '__get__', '__set__', '__setitem__'),
3: ('__exit__', '__aexit__'),
(0, 1): ('__round__', ),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def get_all_elements(node):
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(node):
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return (True, (node.attrname, 'object %r' % (node.expr.as_string(),)))
elif isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return (True, (name, 'builtins'))
else:
stmts = node.lookup(name)[1]
if (stmts and not isinstance(stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign,
astroid.ExceptHandler))):
return (True, (name, 'outer scope (line %s)' % stmts[0].fromlineno))
return (False, None)
def is_super(node):
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, 'name', None) == 'super' and \
node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node):
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
def is_raising(body):
"""return true if the given statement node raise an exception"""
for node in body:
if isinstance(node, astroid.Raise):
return True
return False
builtins = builtins.__dict__.copy()
SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__')
def is_builtin_object(node):
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name):
"""return true if <name> could be considered as a builtin defined by python
"""
return name in builtins or name in SPECIAL_BUILTINS
def is_defined_before(var_node):
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if isinstance(_node, COMP_NODE_TYPES):
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.For):
for ass_node in _node.target.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(_node, astroid.With):
for expr, ids in _node.items:
if expr.parent_of(var_node):
break
if (ids and
isinstance(ids, astroid.AssignName) and
ids.name == varname):
return True
elif isinstance(_node, (astroid.Lambda, astroid.FunctionDef)):
if _node.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if _node.args.parent_of(var_node):
try:
_node.args.default_value(varname)
_node = _node.parent
continue
except astroid.NoDefault:
pass
return True
if getattr(_node, 'name', None) == varname:
return True
break
elif isinstance(_node, astroid.ExceptHandler):
if isinstance(_node.name, astroid.AssignName):
ass_node = _node.name
if ass_node.name == varname:
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for ass_node in _node.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_func_default(node):
"""return true if the given Name node is used in function default argument's
value
"""
parent = node.scope()
if isinstance(parent, astroid.FunctionDef):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node):
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if (parent.is_statement or
isinstance(parent, astroid.Lambda) or
isinstance(parent, (scoped_nodes.ComprehensionScope,
scoped_nodes.ListComp))):
break
parent = parent.parent
return False
def is_ancestor_name(frame, node):
"""return True if `frame` is a astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node):
"""return the higher parent which is not an AssName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssignName,
astroid.Tuple,
astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node, name):
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages):
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
pass
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(format_string):
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == '%':
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == '(':
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == '(':
depth += 1
elif char == ')':
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in '#0- +':
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == '.':
i, char = next_char(i)
if char == '*':
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in 'hlL':
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = 'diouxXeEfFgGcrs%a'
else:
flags = 'diouxXeEfFgGcrs%'
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
elif char != '%':
num_args += 1
i += 1
return keys, num_args
def is_attr_protected(attrname):
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return attrname[0] == '_' and attrname != '_' and not (
attrname.startswith('__') and attrname.endswith('__'))
def node_frame_class(node):
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname):
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile('^_{2,}.*[^_]+_?$')
return regex.match(attrname)
def get_argument_from_call(callfunc_node, position=None, keyword=None):
"""Returns the specified argument from a function call.
:param astroid.Call callfunc_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError('Must specify at least one of: position or keyword.')
if position is not None:
try:
return callfunc_node.args[position]
except IndexError:
pass
if keyword and callfunc_node.keywords:
for arg in callfunc_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node):
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if node.name in ('Exception', 'BaseException') \
and node.root().name == EXCEPTIONS_MODULE:
return True
return any(inherit_from_std_ex(parent)
for parent in node.ancestors(recurs=True))
def error_of_type(handler, error_type):
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, six.string_types):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type, )
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
# bare except. While this indeed catches anything, if the desired errors
# aren't specified directly, then we just ignore it.
return False
return handler.catch(expected_errors)
def decorated_with_property(node):
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
for infered in decorator.infer():
if isinstance(infered, astroid.ClassDef):
if (infered.root().name == BUILTINS_NAME and
infered.name == 'property'):
return True
for ancestor in infered.ancestors():
if (ancestor.name == 'property' and
ancestor.root().name == BUILTINS_NAME):
return True
except astroid.InferenceError:
pass
def decorated_with(func, qnames):
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
dec = safe_infer(decorator_node)
if dec and dec.qname() in qnames:
return True
def unimplemented_abstract_methods(node, is_abstract_cb=None):
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = functools.partial(
decorated_with, qnames=ABC_METHODS)
visited = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def _import_node_context(node):
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def is_from_fallback_block(node):
"""Check if the given node is from a fallback import block."""
context = _import_node_context(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers)
handlers = context.handlers
has_fallback_imports = any(isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(handlers, exception):
func = functools.partial(error_of_type,
error_type=(exception, ))
return any(map(func, handlers))
def node_ignores_exception(node, exception):
"""Check if the node is in a TryExcept which handles the given exception."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, astroid.TryExcept):
return _except_handlers_ignores_exception(current.parent.handlers, exception)
return False
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _hasattr(value, attr):
try:
value.getattr(attr)
return True
except astroid.NotFoundError:
return False
def is_comprehension(node):
comprehensions = (astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value):
return _hasattr(value, GETITEM_METHOD) and _hasattr(value, KEYS_METHOD)
def _supports_membership_test_protocol(value):
return _hasattr(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value):
return _hasattr(value, ITER_METHOD) or _hasattr(value, GETITEM_METHOD)
def _supports_subscript_protocol(value):
return _hasattr(value, GETITEM_METHOD)
def _is_abstract_class_name(name):
lname = name.lower()
is_mixin = lname.endswith('mixin')
is_abstract = lname.startswith('abstract')
is_base = lname.startswith('base') or lname.endswith('base')
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node):
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, 'name', None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def is_iterable(value):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if _supports_iteration_protocol(meta):
return True
if isinstance(value, astroid.Instance):
if not has_known_bases(value):
return True
if _supports_iteration_protocol(value):
return True
return False
def is_mapping(value):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be a mapping if it has a metaclass is mapping
meta = value.metaclass()
if meta is not None:
if _supports_mapping_protocol(meta):
return True
if isinstance(value, astroid.Instance):
if not has_known_bases(value):
return True
if _supports_mapping_protocol(value):
return True
return False
def supports_membership_test(value):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
meta = value.metaclass()
if meta is not None and _supports_membership_test_protocol(meta):
return True
if isinstance(value, astroid.Instance):
if not has_known_bases(value):
return True
if _supports_membership_test_protocol(value):
return True
return is_iterable(value)
def supports_subscript(value):
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
meta = value.metaclass()
if meta is not None and _supports_subscript_protocol(meta):
return True
if isinstance(value, astroid.Instance):
if not has_known_bases(value):
return True
if _supports_subscript_protocol(value):
return True
return False
# TODO(cpopa): deprecate these or leave them as aliases?
def safe_infer(node, context=None):
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except astroid.InferenceError:
return
try:
next(inferit)
return # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass, context=None):
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (not isinstance(result, astroid.ClassDef) or
result is klass or
not has_known_bases(result, context=context)):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
| axbaretto/beam | sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/checkers/utils.py | Python | apache-2.0 | 27,930 | [
"Brian"
] | 1f29a4dd55294d1502d0f3e36a2f5f56253f47b85e3b35a5e7397ea9e4bd8f5d |
# -*- coding: utf-8 -*-
#
# hl_api_info.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to get information on NEST.
"""
from .hl_api_helper import *
@check_stack
def sysinfo():
"""
Print information on the platform on which NEST was compiled.
"""
sr("sysinfo")
@check_stack
def version():
"""
Return the NEST version.
"""
sr("statusdict [[ /kernelname /version ]] get")
return " ".join(spp())
@check_stack
def authors():
"""
Print the authors of NEST.
"""
sr("authors")
@check_stack
def helpdesk(browser="firefox"):
"""
Open the NEST helpdesk in the given browser. The default browser is firefox.
"""
sr("/helpdesk << /command (%s) >> SetOptions" % browser)
sr("helpdesk")
@check_stack
def help(obj=None, pager="less"):
"""
Show the help page for the given object using the given pager. The
default pager is less.
"""
if obj is not None:
sr("/page << /command (%s) >> SetOptions" % pager)
sr("/%s help" % obj)
else:
print("Type 'nest.helpdesk()' to access the online documentation in a browser.")
print("Type 'nest.help(object)' to get help on a NEST object or command.")
print()
print("Type 'nest.Models()' to see a list of available models in NEST.")
print()
print("Type 'nest.authors()' for information about the makers of NEST.")
print("Type 'nest.sysinfo()' to see details on the system configuration.")
print("Type 'nest.version()' for information about the NEST version.")
print()
print("For more information visit http://www.nest-simulator.org.")
@check_stack
def get_verbosity():
"""
Return verbosity level of NEST's messages.
"""
sr('verbosity')
return spp()
@check_stack
def set_verbosity(level):
"""
Change verbosity level for NEST's messages. level is a string and
can be one of M_FATAL, M_ERROR, M_WARNING, or M_INFO.
"""
sr("%s setverbosity" % level)
@check_stack
def get_argv():
"""
Return argv as seen by NEST. This is similar to Python sys.argv
but might have changed after MPI initialization.
"""
sr ('statusdict')
statusdict = spp ()
return statusdict['argv']
@check_stack
def message(level,sender,text):
"""
Print a message using NEST's message system.
"""
sps(level)
sps(sender)
sps(text)
sr('message')
@check_stack
def SetStatus(nodes, params, val=None):
"""
Set the parameters of nodes (identified by global ids) or
connections (identified by handles as returned by
GetConnections()) to params, which may be a single dictionary or a
list of dictionaries. If val is given, params has to be the name
of an attribute, which is set to val on the nodes/connections. val
can be a single value or a list of the same size as nodes.
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty list,
# which is an artifact of the API operating on lists, rather than
# relying on language idioms, such as comprehensions
#
if len(nodes) == 0:
return
if val is not None and is_literal(params):
if is_iterable(val) and not isinstance(val, (uni_str, dict)):
params = [{params: x} for x in val]
else:
params = {params: val}
params = broadcast(params, len(nodes), (dict,), "params")
if len(nodes) != len(params):
raise TypeError("status dict must be a dict, or list of dicts of length 1 or len(nodes)")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
@check_stack
def GetStatus(nodes, keys=None):
"""
Return the parameter dictionaries of the given list of nodes
(identified by global ids) or connections (identified
by handles as returned by GetConnections()). If keys is given, a
list of values is returned instead. keys may also be a list, in
which case the returned list contains lists of values.
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
if len(nodes) == 0:
return nodes
if keys is None:
cmd = '{ GetStatus } Map'
elif is_literal(keys):
cmd = '{{ GetStatus /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = '{{ GetStatus }} Map {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sr(cmd)
return spp()
| zifeo/nest-simulator | pynest/nest/lib/hl_api_info.py | Python | gpl-2.0 | 5,653 | [
"VisIt"
] | 3a02a0ceb397fe1ccdbe09cad1fe57d5a03c540bf10970bd6d633d529686ebb4 |
import os
import sys
import json
import logging
import argparse
from typing import Iterable, TextIO
from collections import defaultdict
import dinopy
import networkx
from phasm.io import gfa
from phasm.io.sequences import FastaSource
from phasm.typing import ReadMapping, LocalAlignment, AlignmentsT
from phasm.overlapper import ExactOverlapper
from phasm.assembly_graph import (build_assembly_graph, clean_graph,
remove_transitive_edges, remove_tips,
make_symmetric, merge_unambiguous_paths,
average_coverage_path, build_bubblechains,
identify_contigs, remove_diamond_tips)
from phasm.filter import (ContainedReads, MaxOverhang, MinReadLength,
MinOverlapLength)
from phasm.phasing import BubbleChainPhaser
from phasm.utils import DebugDataLogger
logger = logging.getLogger()
def overlap(args):
args.output.write(gfa.gfa_header())
overlapper = ExactOverlapper()
fr = dinopy.FastaReader(args.fasta_input)
logger.info("Building suffix tree and searching for pairwise overlaps...")
for entry in fr.entries():
name = entry.name.decode('utf-8')
seq = entry.sequence.decode('utf-8')
args.output.write(gfa.gfa_line("S", name, entry.length, "*"))
overlapper.add_sequence(name + "+", seq)
overlapper.add_sequence(name + "-", dinopy.reverse_complement(seq))
overlaps = overlapper.overlaps(args.min_length)
logger.info("Writing to GFA2...")
for aread, bread, astart, aend, bstart, bend in overlaps:
args.output.write(gfa.gfa_line(
"E", "*", aread, bread, astart, aend, bstart, bend, "*"))
logger.info("Done.")
def layout(args):
logger.info("======== STAGE 1: Build Assembly Graph =========")
logger.info("Pass [1/2] of GFA2 file to import reads (segments)...")
reads = {}
with open(args.gfa_file) as f:
reads = gfa.gfa2_parse_segments(f)
logger.info("Read %d reads from GFA2 file.", len(reads))
logger.info("Pass [2/2] of GFA2 file to import local alignments "
"and build assembly graph...")
read_alignments = defaultdict(dict)
def alignment_recorder(la_iter: Iterable[LocalAlignment]) -> Iterable[
LocalAlignment]:
nonlocal read_alignments
for la in la_iter:
a_read, b_read = la.get_oriented_reads()
read_alignments[a_read][b_read] = la
read_alignments[b_read][a_read] = la.switch()
yield la
filters = [ContainedReads()]
if args.min_read_length:
filters.append(MinReadLength(args.min_read_length))
if args.min_overlap_length:
filters.append(MinOverlapLength(args.min_overlap_length))
filters.append(MaxOverhang(args.max_overhang_abs,
args.max_overhang_rel))
if args.metadata:
for f in filters:
f.debug = True
with open(args.gfa_file) as gfa_file:
la_iter = map(gfa.gfa2_line_to_la(reads),
(l for l in gfa_file if l.startswith('E')))
la_iter = alignment_recorder(la_iter)
la_iter = filter(lambda x: all(f(x) for f in filters), la_iter)
g = build_assembly_graph(la_iter)
logger.info("Built initial assembly graph with %d nodes and %d "
"edges.",
networkx.number_of_nodes(g),
networkx.number_of_edges(g))
for f in filters:
filtered = f.filtered
if (f.debug_data or f.nodes_to_remove) and args.metadata:
json.dump({
'filter': f.__class__.__name__,
'la_filtered': [
(str(r1), str(r2)) for r1, r2 in f.debug_data
],
'reads_filtered': [str(r) for r in f.nodes_to_remove]
}, args.metadata)
args.metadata.write("\n")
if f.nodes_to_remove:
for read in f.nodes_to_remove:
orig = read.with_orientation('-')
reverse = read.with_orientation('+')
if orig in g:
filtered += g.degree(orig)
g.remove_node(orig)
if reverse in g:
filtered += g.degree(reverse)
g.remove_node(reverse)
logger.info("Filter %s removed %d alignments.",
f.__class__.__name__, f.filtered)
# Free up memory
del filters
logger.info("Final graph: %d nodes and %d edges.",
networkx.number_of_nodes(g),
networkx.number_of_edges(g))
logger.info("======== STAGE 2: Graph Cleaning =========")
num_asymm_edges = 0
edges_to_remove = remove_transitive_edges(g, args.length_fuzz)
if args.metadata:
json.dump({
'filter': 'TransitiveReduction',
'la_filtered': [
(str(r1), str(r2)) for r1, r2 in edges_to_remove
],
'reads_filtered': []
}, args.metadata)
args.metadata.write("\n")
logger.info("Removing %d transitive edges...", len(edges_to_remove))
g.remove_edges_from(edges_to_remove)
num_asymm_edges += make_symmetric(g)
logger.info("Removing tips...")
num_in_tips, num_out_tips = remove_tips(g, args.max_tip_length,
args.max_tip_length_bases)
num_asymm_edges += make_symmetric(g)
logger.info("Removing isolated nodes...")
num_isolated_nodes = clean_graph(g)
logger.info("Removed %d tip edges, %d isolated nodes, %d asymmetric "
"edges.",
num_in_tips+num_out_tips, num_isolated_nodes, num_asymm_edges)
num_diamond_tips = remove_diamond_tips(g)
logger.info("Removed %d diamond tips", num_diamond_tips)
logger.info("Removing tips (stage 2)...")
num_in_tips, num_out_tips = remove_tips(g, args.max_tip_length)
num_asymm_edges = make_symmetric(g)
num_isolated_nodes = clean_graph(g)
logger.info("Removed %d tip edges, %d isolated nodes, "
"%d asymmetric edges.", num_in_tips+num_out_tips,
num_isolated_nodes, num_asymm_edges)
logger.info("Merging unambiguous paths...")
num_nodes_merged = merge_unambiguous_paths(g)
logger.info("Merged %d nodes.", num_nodes_merged)
logger.info("Done.")
logger.info("Calculating average coverage for each edge...")
for u, v in g.edges_iter():
g[u][v]['avg_coverage'] = average_coverage_path(g, read_alignments,
[u, v])
logger.info("Writing graph...")
if not args.output:
args.output = [sys.stdout]
for f in args.output:
if f == sys.stdout:
gfa.write_graph(f, g, args.gfa_version)
else:
ext = f.name[f.name.rfind('.')+1:]
if ext == 'gfa':
gfa.write_graph(f, g, args.gfa_version)
elif ext == 'graphml':
networkx.write_graphml(g, f, encoding='unicode')
else:
logger.error("File extension '%s' not recognised, ignoring "
"output file %s.", ext, f.name)
def _write_graphs(g, output_dir, filename_tpl, formats):
for file_format in formats:
if file_format.startswith('gfa'):
version = int(file_format[-1])
filename = filename_tpl + "gfa"
path = os.path.join(output_dir, filename)
with open(path, "w") as f:
gfa.write_graph(f, g, version)
else:
filename = filename_tpl + "graphml"
path = os.path.join(output_dir, filename)
with open(path, "w") as f:
networkx.write_graphml(g, f, encoding='unicode')
def chain(args):
logger.info("Readig reads and fragments part of the assembly graph...")
graph_reads = {}
with open(args.graph_gfa) as f:
graph_reads = gfa.gfa2_parse_segments_with_fragments(f)
logger.info("Reconstructing assembly graph...")
with open(args.graph_gfa) as f:
g = gfa.gfa2_reconstruct_assembly_graph(f, graph_reads)
logger.info("Done.")
logger.info("Enumerate weakly connected components in the graph...")
num_components = 0
num_bubblechains = 0
num_contigs = 0
allowed_formats = {'gfa1', 'gfa2', 'graphml'}
formats = []
for file_format in args.format.split(','):
file_format = file_format.strip()
if file_format in allowed_formats:
formats.append(file_format)
else:
logger.warning("File format '%s' not recognised, ignoring.",
file_format)
if not formats:
logger.critical("No valid file formats specified.")
sys.exit(1)
os.makedirs(args.output_dir, exist_ok=True)
for i, component in enumerate(
networkx.weakly_connected_component_subgraphs(g, copy=False)):
logger.info("Connected component %d with %d nodes and %d edges.",
i, networkx.number_of_nodes(component),
networkx.number_of_edges(component))
bubblechain_nodes = set()
for j, bubblechain in enumerate(build_bubblechains(component)):
logger.info("Found bubblechain #%d with %d nodes and %d edges",
j, networkx.number_of_nodes(bubblechain),
networkx.number_of_edges(bubblechain))
for n in bubblechain.nodes_iter():
component.node[n]['bubblechain'] = j
bubblechain_nodes.update(bubblechain.nodes_iter())
filename_tpl = "component{}.bubblechain{}.".format(i, j)
_write_graphs(bubblechain, args.output_dir, filename_tpl,
formats)
num_bubblechains += 1
logger.info("Building contigs not part of a bubble chain...")
for j, path in enumerate(identify_contigs(
component, bubblechain_nodes, args.min_length)):
# Update attribute for visualisation in Cytoscape (or other
# graph viewer)
if len(path) > 1:
for u, v in g.node_path_edges(path):
component[u][v]['contig'] = j
filename_tpl = "component{}.contig{}.".format(i, j)
_write_graphs(g.subgraph(path), args.output_dir, filename_tpl,
formats)
num_contigs += 1
filename_tpl = "component{}.".format(i)
_write_graphs(component, args.output_dir, filename_tpl, formats)
num_components += 1
logger.info("Built %d bubblechains and %d contigs from %d weakly "
"connected components.",
num_bubblechains, num_contigs, num_components)
def _get_read_alignments(f: TextIO, reads: ReadMapping) -> AlignmentsT:
logger.info("Pass 2 of alignments GFA2 file to import all pairwise local "
"alignments...")
read_alignments = defaultdict(dict)
la_iter = map(gfa.gfa2_line_to_la(reads),
(l for l in f if l.startswith('E')))
for la in la_iter:
a_read, b_read = la.get_oriented_reads()
read_alignments[a_read][b_read] = la
read_alignments[b_read][a_read] = la.switch()
logger.info("Done.")
return read_alignments
def phase(args):
# Original reads are used for assembly graph reconstruction below
logger.info("Pass 1 of alignments GFA2 file to import all original reads "
"(segments)...")
with open(args.alignments_gfa) as f:
reads = gfa.gfa2_parse_segments(f)
logger.info("Read %d reads from GFA2 file.", len(reads))
# Setup sequence source
sequence_src = FastaSource(args.reads_fasta)
read_alignments = None
debug_data_log = DebugDataLogger(args.debug_data)
with dinopy.FastaWriter(args.output, force_overwrite=True) as fw:
for gfa_file in args.subgraphs:
logger.info("Subgraph %s", gfa_file)
logger.info("Readig reads and fragments part of assembly graph...")
with open(gfa_file) as f:
graph_reads = gfa.gfa2_parse_segments_with_fragments(f)
logger.info("Reconstructing assembly graph...")
with open(gfa_file) as f:
g = gfa.gfa2_reconstruct_assembly_graph(f, graph_reads, reads)
g.sequence_src = sequence_src
logger.info("Done.")
logger.info("Start phasing process, ploidy %d...", args.ploidy)
phaser = BubbleChainPhaser(g, args.ploidy, args.min_spanning_reads,
args.max_bubble_size, args.threshold,
args.prune_factor, args.max_candidates,
args.max_prune_rounds,
args.prune_step_size,
debug_data_log=debug_data_log)
id_base = os.path.basename(gfa_file[:gfa_file.rfind('.')])
if len(phaser.bubbles) == 0:
logger.info("No bubbles found, simple contig path with %d "
"nodes.", g.number_of_nodes())
# This is just a simple "contig" path (linear non-braching
# path)
if g.number_of_nodes() == 1:
seq = g.get_sequence(g.nodes()[0])
else:
seq = g.sequence_for_path(
g.node_path_edges(networkx.topological_sort(g),
data=True),
edge_len=g.edge_len
)
fw.write_entry((seq, id_base.encode('ascii')))
else:
logger.info("Bubble chain with %d bubbles",
len(phaser.bubbles))
if not read_alignments:
with open(args.alignments_gfa) as f:
read_alignments = _get_read_alignments(f, reads)
for i, (haploblock, include_last) in enumerate(
phaser.phase(read_alignments)):
# Output the DNA sequence for each haplotype
logger.info("Haploblock %d, building DNA sequences for "
"each haplotype...", i)
for j, haplotype in enumerate(haploblock.haplotypes):
seq = g.sequence_for_path(
g.node_path_edges(haplotype, data=True),
include_last=include_last
)
if haploblock.from_large_bubble:
name = "{}.largebubble{}".format(id_base, i)
else:
name = "{}.haploblock{}.{}".format(id_base, i, j)
fw.write_entry((seq, name.encode('utf-8')))
if haploblock.from_large_bubble:
# Only first haplotype is filled
break
debug_data_log.haploblock(
haploblock, "{}.haploblock{}".format(
id_base, i)
)
logger.info("Done with %s", gfa_file)
def main():
parser = argparse.ArgumentParser(
description="PHASM: Haplotype-aware de novo genome assembly.")
parser.set_defaults(func=None)
parser.add_argument(
'-v', '--verbose', action='count', default=0, required=False,
help="Increase verbosity level, number of levels: 0, 1, 2"
)
subparsers = parser.add_subparsers()
# -------------------------------------------------------------------------
# Overlap command
# -------------------------------------------------------------------------
overlap_parser = subparsers.add_parser(
'overlap', help="Find pairwise exact overlaps between reads with a "
"given minimum length."
)
overlap_parser.set_defaults(func=overlap)
overlap_parser.add_argument(
'-l', '--min-length', type=int, required=False, default=1000,
help="The minimum overlap length between two reads (default: 1000)."
)
overlap_parser.add_argument(
'-o', '--output', type=argparse.FileType('w'), required=False,
default=sys.stdout,
help="The output file. Defaults to standard output."
)
overlap_parser.add_argument(
'fasta_input', help="The fasta file with reads.")
# -------------------------------------------------------------------------
# Layout command
# -------------------------------------------------------------------------
layout_parser = subparsers.add_parser(
'layout', help="Build an assembly graph based on pairwise local "
"alignments."
)
layout_parser.set_defaults(func=layout)
alignment_group = layout_parser.add_argument_group(
'Alignment and read filtering')
graph_cleaning_group = layout_parser.add_argument_group('Graph cleaning')
layout_io_group = layout_parser.add_argument_group('Input/output')
alignment_group.add_argument(
'-l', '--min-read-length', type=int, required=False, default=0,
metavar="LENGTH",
help="Filter reads smaller than the given length (default: disabled)"
)
alignment_group.add_argument(
'-s', '--min-overlap-length', type=int, required=False, default=0,
metavar="LENGTH",
help="Minimum length of the overlap between two reads, otherwise "
"this alignment is ignored. Default is disabled, because it's "
"something that's usually handled by your overlapper."
)
alignment_group.add_argument(
'-a', '--max-overhang-abs', type=int, default=1000, required=False,
metavar="LENGTH",
help="Max absolute overhang length (default: 1000)."
)
alignment_group.add_argument(
'-r', '--max-overhang-rel', type=float, default=0.8, required=False,
metavar="FRACTION",
help="Max overhang length as fraction of the overlap length (default: "
"0.8)."
)
graph_cleaning_group.add_argument(
'-t', '--max-tip-length', type=int, default=4, required=False,
metavar="NUM",
help="Maximum number of edges of a path to be called a tip "
"(default: 4)."
)
graph_cleaning_group.add_argument(
'-T', '--max-tip-length-bases', type=int, default=5000, required=False,
help="The maximum length (in bases instead of edges) of a tip "
"(default 5000)."
)
graph_cleaning_group.add_argument(
'-F', '--length-fuzz', type=int, default=1000, required=False,
metavar="LENGTH",
help="Transitive reduction length fuzz parameter (default: 1000). "
"See Myers (2005). "
)
layout_io_group.add_argument(
'-g', '--gfa-version', choices=(1, 2), default=2,
help="Which GFA version to use when writing a graph to a GFA file "
"(default GFA2)."
)
layout_io_group.add_argument(
'-o', '--output', type=argparse.FileType('w'), default=[],
metavar="FILE", action="append",
help="Output file (default stdout). If a filename is given, it checks "
"the file extension for output type. Supported file extensions "
"are 'graphml' and 'gfa'. This option can be used multiple times "
"to write multiple files."
)
layout_io_group.add_argument(
'-M', '--metadata', type=argparse.FileType('w'), default=None,
metavar="JSON_FILE", required=False,
help="Output a bit of debug data to the given JSON file. Optional. "
"This will increase memory usage when enabled."
)
layout_io_group.add_argument(
'gfa_file', help="Input GFA2 file with all pairwise local alignments."
)
# ------------------------------------------------------------------------
# Chain command
# ------------------------------------------------------------------------
chain_parser = subparsers.add_parser(
'chain', help="Identify and build bubblechains")
chain_parser.set_defaults(func=chain)
chain_parser.add_argument(
'-l', '--min-length', type=int, required=False, default=5000,
help="Some paths in the assembly graph are not part of a bubble chain,"
" PHASM will not try to phase these paths but outputs them as "
"'normal' contigs. With this flag you can specify the minimum "
"length of a contig to be included (default: 5000)."
)
chain_parser.add_argument(
'-f', '--format', default="gfa2",
help="Comma separated list of output formats. Supported: gfa1, gfa2, "
"graphml (default: only GFA2). If multiple formats given, each "
"bubble chain will get a file in each specified format. This "
"allows you for example to both write GFA2 and GraphML files "
"at the same time."
)
chain_parser.add_argument(
'-o', '--output-dir',
help="Output directory. If the directory does not exist, it will "
"be created. All identified bubble chains will be written to a "
"separate file in this directory."
)
chain_parser.add_argument(
'graph_gfa',
help="The assembly graph in GFA2 format. Other graph formats are not "
"supported. Note that this is a different file than the GFA2 file"
" with pairwise local alignments."
)
# ------------------------------------------------------------------------
# Phase command
# ------------------------------------------------------------------------
phase_parser = subparsers.add_parser(
'phase',
help="Phase a bubblechain and output DNA sequences for each haplotype"
" in FASTA format."
)
phase_parser.set_defaults(func=phase)
phase_parser.add_argument(
'-p', '--ploidy', type=int, required=True,
help="The ploidy level."
)
phase_parser.add_argument(
'-s', '--min-spanning-reads', type=int, default=3, required=False,
help="If there re less spanning reads between two bubbles than the "
"given number then PHASM will start a new haploblock."
)
phase_parser.add_argument(
'-b', '--max-bubble-size', type=int, default=10, required=False,
help="The maximum number of simple paths through a bubble. If a "
"bubble contains more paths from its entrance to exit than the "
"given number, it is considered too big, and a new haploblock "
"will be created. The bubble itself will be phased on its own "
"and not in conjunction with other bubbles. Especially for "
"larger ploidies you may want to lower this number a bit, as the "
"number of k-tuples is p^k, where p is the number of paths. "
"Default value is 10."
)
phase_parser.add_argument(
'-t', '--threshold', type=float, default=1e-3, required=False,
help="The minimum relative likelihood of a candidate haplotype set "
"to be considered for any following bubbles (default: 1e-3)."
)
phase_parser.add_argument(
'-d', '--prune-factor', type=float, default=0.1, required=False,
help="Any candidate haplotype set with a relative likelihood lower "
"than the given prune factor times the top scoring candidate "
"will be pruned (default: 0.1)."
)
phase_parser.add_argument(
'-c', '--max-candidates', type=int, default=500, required=False,
help="At each bubble, limit the number of candidate haplotype sets. "
"If there more candidates than the given number even after an "
"initial pruning step, we increasingly prune more stringest "
"another time, until the number of candidates falls below the "
"given number (default 500). The maximum number of pruning "
"rounds can be specified with the option '-r'."
)
phase_parser.add_argument(
'-r', '--max-prune-rounds', type=int, default=9, required=False,
help="Maximum number of pruning rounds if the number of candidate "
"haplotype sets is to high (default: 9)."
)
phase_parser.add_argument(
'-S', '--prune-step-size', type=float, default=0.1, required=False,
help="With each pruning round, increase the prune factor with the "
"given number (default: 0.1)."
)
phase_parser.add_argument(
'-D', '--debug-data', type=argparse.FileType('w'), default=None,
required=False,
help="Output another file containing loads of debug data produced "
"during the phasing process (optional)."
)
phase_parser.add_argument(
'-o', '--output', type=argparse.FileType('wb'), default=sys.stdout,
help="Output file (default: stdout)."
)
phase_parser.add_argument(
'reads_fasta',
help="The FASTA file with all your reads. A FASTA index file should "
"be present."
)
phase_parser.add_argument(
'alignments_gfa',
help="The GFA2 file with all pairwise local alignments (used to create"
" the initial assembly graph). This is a different file than your"
" bubblechain GFA2 file."
)
phase_parser.add_argument(
'subgraphs', nargs='+',
help="The bubblechain/contig graph GFA2 file(s). If given multiple "
"files, these files will be processed sequentially, but the DNA "
"sequences will be written to the same file."
)
# ------------------------------------------------------------------------
# Argument parsing
# ------------------------------------------------------------------------
args = parser.parse_args()
# Setup logging
logger.setLevel(logging.INFO)
phasm_logger = logging.getLogger('phasm')
phasm_logger.setLevel(logging.WARNING)
formatter = logging.Formatter("{asctime} - {levelname}:{name}:{message}",
style="{")
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.verbose > 0:
phasm_logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
phasm_logger.setLevel(logging.DEBUG)
if not args.func:
parser.print_help()
else:
args.func(args)
| AbeelLab/phasm | phasm/cli/assembler.py | Python | mit | 26,901 | [
"Cytoscape"
] | 67b920ff4d950334f322b5e02a483958b09647785faad78c9d3be766c966d625 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.17', '82cba7ef695538e6a38b9d4156837381')
version('0.7.16a', 'c5115c9a5ea0406848500e4b23a7708c')
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
version('0.7.13', 'f094f609438511766c434178a3635ab4')
version('0.7.12', 'e24a587baaad411d5da89516ad7a261a',
url='https://github.com/lh3/bwa/archive/0.7.12.tar.gz')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man.man1)
install('bwa.1', prefix.man.man1)
| mfherbst/spack | var/spack/repos/builtin/packages/bwa/package.py | Python | lgpl-2.1 | 2,458 | [
"BWA"
] | 8b4789bd317e744c0e260e12ff2bf62580c52743450481196d3422bc19a86ee1 |
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerFaucetOpenEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.8, 0.0)
obj_high = (0.05, 0.85, 0.0)
self._handle_length = 0.175
self._target_radius = 0.07
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.8, 0.0]),
'hand_init_pos': np.array([0., .4, .2]),
}
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_faucet.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, _, target_to_obj, object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= 0.07),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
@property
def _target_site_config(self):
return [('goal_open', self._target_pos),
('goal_close', np.array([10., 10., 10.]))]
def _get_pos_objects(self):
return self._get_site_pos('handleStartOpen') + np.array(
[0., 0., -0.01])
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('faucetBase')
def reset_model(self):
self._reset_hand()
# Compute faucet position
self.obj_init_pos = self._get_state_rand_vec() if self.random_init \
else self.init_config['obj_init_pos']
# Set mujoco body to computed position
self.sim.model.body_pos[self.model.body_name2id(
'faucetBase')] = self.obj_init_pos
self._target_pos = self.obj_init_pos + np.array(
[+self._handle_length, .0, .125])
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.reachCompleted = False
def compute_reward(self, action, obs):
del action
obj = obs[4:7] + np.array([-.04, .0, .03])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.obj_init_pos - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self._target_radius),
margin=abs(target_to_obj_init - self._target_radius),
sigmoid='long_tail',
)
faucet_reach_radius = 0.01
tcp_to_obj = np.linalg.norm(obj - tcp)
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, faucet_reach_radius),
margin=abs(tcp_to_obj_init - faucet_reach_radius),
sigmoid='gaussian',
)
tcp_opened = 0
object_grasped = reach
reward = 2 * reach + 3 * in_place
reward *= 2
reward = 10 if target_to_obj <= self._target_radius else reward
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped,
in_place)
| rlworkgroup/metaworld | metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_open_v2.py | Python | mit | 4,112 | [
"Gaussian"
] | f741c1f8ec3f223de89512d342abd89ff1887564ba66c741b46953f3bd77b44f |
# Licensed under an MIT open source license - see LICENSE
"""
Utility functions for fil-finder package
"""
import itertools
import numpy as np
from scipy import optimize as op
import thread
import threading
import time
import os
def removearray(l, arr):
'''
Removes an array from a list. Code from
http://stackoverflow.com/questions/3157374/
how-do-you-remove-a-numpy-array-from-a-list-of-numpy-arrays
'''
ind = 0
size = len(l)
while ind != size and not np.array_equal(l[ind], arr):
ind += 1
if ind != size:
l.pop(ind)
else:
raise ValueError('Array not contained in this list.')
def weighted_av(items, weight):
weight = np.array(weight)[~np.isnan(weight)]
if len(weight) == 0:
return sum(items) / len(items)
else:
items = np.array(items)[~np.isnan(weight)]
num = sum(items[i] * weight[i] for i in range(len(items)))
denom = sum(weight[i] for i in range(len(items)))
return (num / denom) if denom != 0 else None
def raw_input_with_timeout(prompt, timeout=30.0):
'''
Manual input with a timeout. Code from
http://stackoverflow.com/questions/2933399/how-to-set-time-limit-on-input.
'''
print prompt
timer = threading.Timer(timeout, thread.interrupt_main)
astring = None
try:
timer.start()
astring = raw_input(prompt)
except KeyboardInterrupt:
pass
timer.cancel()
return astring
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx]
def timeit(method):
'''
Timing decorator from
https://www.andreas-jung.com/contents/
a-python-decorator-for-measuring-the-execution-time-of-methods.
'''
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print '%r (%r, %r) %2.2f sec' % \
(method.__name__, args, kw, te - ts)
return result
return timed
##########################################################################
# 2D Gaussian Fit Code from
# http://www.scipy.org/Cookbook/FittingData
# (functions twodgaussian,moments,fit2dgaussian)
##########################################################################
def twodgaussian(h, cx, cy, wx, wy, b):
wx = float(wx)
wy = float(wy)
return lambda x, y: h * np.exp(-(((cx - x) / wx) ** 2. + ((cy - y) / wy) ** 2.) / 2) + b
def moments(data):
total = data.sum()
X, Y = np.indices(data.shape)
x = (X * data).sum() / total
y = (Y * data).sum() / total
col = data[:, int(y)]
wx = np.sqrt(np.abs((np.arange(col.size) - y) ** 2 * col).sum()/col.sum())
row = data[int(x), :]
wy = np.sqrt(np.abs((np.arange(row.size) - x) ** 2 * row).sum()/row.sum())
b = abs(np.median(data.ravel()))
h = data.max() - b
return h, x, y, wx, wy, b
def fit2dgaussian(data):
params = moments(data)
errorfunction = lambda p: np.ravel(
twodgaussian(*p)(*np.indices(data.shape)) - data)
fit, cov = op.leastsq(
errorfunction, params, maxfev=(1000 * len(data)), full_output=True)[:2]
if cov is None: # Bad fit
fiterr = np.abs(fit)
else:
fiterr = np.sqrt(np.diag(cov))
return fit, fiterr
##########################################################################
# Simple fcns used throughout module
##########################################################################
def chunks(l, n):
return [l[x:x + n] for x in range(0, len(l), n)]
def eight_con():
return np.ones((3, 3))
def distance(x, x1, y, y1):
return np.sqrt((x - x1) ** 2.0 + (y - y1) ** 2.0)
def padwithzeros(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 0
vector[-pad_width[1]:] = 0
return vector
def padwithnans(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = np.NaN
vector[-pad_width[1]:] = np.NaN
return vector
def round_figs(x, n):
return round(x, int(n - np.ceil(np.log10(abs(x)))))
def shifter(l, n):
return l[n:] + l[:n]
def product_gen(n):
for r in itertools.count(1):
for i in itertools.product(n, repeat=r):
yield "".join(i)
def planck(T, freq):
return ((2.0 * (6.63 * 10 ** (-34)) * freq ** 3) / (9 * 10 ** 16)) *\
(1 / (np.expm1((6.63 * 10 ** (-34) * freq) / (1.38 * 10 ** (-23) * float(T)))))
def dens_func(B, kappa, I):
kappa = 100 * kappa
return (I / (B * 10 ** 20)) * (1 / (kappa)) * 4787 # into sol.mass/pc
def red_chisq(data, fit, nparam, sd):
N = data.shape[0]
return np.sum(((fit - data) / sd) ** 2.) / float(N - nparam - 1)
def try_mkdir(name):
'''
Checks if a folder exists, and makes it if it doesn't
'''
if not os.path.isdir(os.path.join(os.getcwd(), name)):
os.mkdir(os.path.join(os.getcwd(), name))
| dcolombo/FilFinder | fil_finder/utilities.py | Python | mit | 4,848 | [
"Gaussian"
] | 78bb0c14335f9bc32f678b6d28e716430fed8fb9d1afdc0f007e71db7ea2f2ed |
"""
This module calculates corrections for the species listed below, fitted to the experimental and computed
entries given to the CorrectionCalculator constructor.
"""
import os
import warnings
from collections import OrderedDict
from typing import Dict, List, Tuple, Union, Optional
import numpy as np
import plotly.graph_objects as go
from monty.serialization import loadfn
from scipy.optimize import curve_fit
from pymatgen.core import yaml
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.reaction_calculator import ComputedReaction
from pymatgen.analysis.structure_analyzer import sulfide_type
def _func(x, *m):
"""
Helper function for curve_fit.
"""
return np.dot(x, m)
class CorrectionCalculator:
"""
A CorrectionCalculator contains experimental and computed entries which it uses to compute corrections.
It graphs residual errors after applying the computed corrections and creates the MPCompatibility.yaml
file the Correction classes use.
Attributes:
species: list of species that corrections are being calculated for
exp_compounds: list of dictionaries which each contain a compound's formula and experimental data
calc_compounds: dictionary of ComputedEntry objects
corrections: list of corrections in same order as species list
corrections_std_error: list of the variances of the corrections in same order as species list
corrections_dict: dictionary of format {'species': (value, uncertainty)} for easier correction lookup
"""
def __init__(
self,
species: List[str] = [
"oxide",
"peroxide",
"superoxide",
"S",
"F",
"Cl",
"Br",
"I",
"N",
"Se",
"Si",
"Sb",
"Te",
"V",
"Cr",
"Mn",
"Fe",
"Co",
"Ni",
"W",
"Mo",
"H",
],
max_error: float = 0.1,
allow_unstable: Union[float, bool] = 0.1,
exclude_polyanions: List[str] = [
"SO4",
"SO3",
"CO3",
"NO3",
"NO2",
"OCl3",
"ClO3",
"ClO4",
"HO",
"ClO",
"SeO3",
"TiO3",
"TiO4",
"WO4",
"SiO3",
"SiO4",
"Si2O5",
"PO3",
"PO4",
"P2O7",
],
) -> None:
"""
Initializes a CorrectionCalculator.
Args:
species: list of species to calculate corrections for
max_error: maximum tolerable relative uncertainty in experimental energy.
Compounds with relative uncertainty greater than this value will be excluded from the fit
allow_unstable: whether unstable entries are to be included in the fit. If True, all compounds will
be included regardless of their energy above hull. If False or a float, compounds with
energy above hull greater than the given value (defaults to 0.1 eV/atom) will be
excluded
exclude_polyanions: a list of polyanions that contain additional sources of error that may negatively
influence the quality of the fitted corrections. Compounds with these polyanions
will be excluded from the fit
"""
self.species = species
self.max_error = max_error
if not allow_unstable:
self.allow_unstable = 0.1
else:
self.allow_unstable = allow_unstable
self.exclude_polyanions = exclude_polyanions
self.corrections: List[float] = []
self.corrections_std_error: List[float] = []
self.corrections_dict: Dict[str, Tuple[float, float]] = {} # {'species': (value, uncertainty)}
# to help the graph_residual_error_per_species() method differentiate between oxygen containing compounds
if "oxide" in self.species:
self.oxides: List[str] = []
if "peroxide" in self.species:
self.peroxides: List[str] = []
if "superoxide" in self.species:
self.superoxides: List[str] = []
if "S" in self.species:
self.sulfides: List[str] = []
def compute_from_files(self, exp_gz: str, comp_gz: str):
"""
Args:
exp_gz: name of .json.gz file that contains experimental data
data in .json.gz file should be a list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
comp_gz: name of .json.gz file that contains computed entries
data in .json.gz file should be a dictionary of {chemical formula: ComputedEntry}
"""
exp_entries = loadfn(exp_gz)
calc_entries = loadfn(comp_gz)
return self.compute_corrections(exp_entries, calc_entries)
def compute_corrections(self, exp_entries: list, calc_entries: dict) -> dict:
"""
Computes the corrections and fills in correction, corrections_std_error, and corrections_dict.
Args:
exp_entries: list of dictionary objects with the following keys/values:
{"formula": chemical formula, "exp energy": formation energy in eV/formula unit,
"uncertainty": uncertainty in formation energy}
calc_entries: dictionary of computed entries, of the form {chemical formula: ComputedEntry}
Raises:
ValueError: calc_compounds is missing an entry
"""
self.exp_compounds = exp_entries
self.calc_compounds = calc_entries
self.names: List[str] = []
self.diffs: List[float] = []
self.coeff_mat: List[List[float]] = []
self.exp_uncer: List[float] = []
# remove any corrections in calc_compounds
for entry in self.calc_compounds.values():
entry.correction = 0
for cmpd_info in self.exp_compounds:
# to get consistent element ordering in formula
name = Composition(cmpd_info["formula"]).reduced_formula
allow = True
compound = self.calc_compounds.get(name, None)
if not compound:
warnings.warn(f"Compound {name} is not found in provided computed entries and is excluded from the fit")
continue
# filter out compounds with large uncertainties
relative_uncertainty = abs(cmpd_info["uncertainty"] / cmpd_info["exp energy"])
if relative_uncertainty > self.max_error:
allow = False
warnings.warn(
"Compound {} is excluded from the fit due to high experimental uncertainty ({}%)".format(
name, relative_uncertainty
)
)
# filter out compounds containing certain polyanions
for anion in self.exclude_polyanions:
if anion in name or anion in cmpd_info["formula"]:
allow = False
warnings.warn(f"Compound {name} contains the polyanion {anion} and is excluded from the fit")
break
# filter out compounds that are unstable
if isinstance(self.allow_unstable, float):
try:
eah = compound.data["e_above_hull"]
except KeyError:
raise ValueError("Missing e above hull data")
if eah > self.allow_unstable:
allow = False
warnings.warn(f"Compound {name} is unstable and excluded from the fit (e_above_hull = {eah})")
if allow:
comp = Composition(name)
elems = list(comp.as_dict())
reactants = []
for elem in elems:
try:
elem_name = Composition(elem).reduced_formula
reactants.append(self.calc_compounds[elem_name])
except KeyError:
raise ValueError("Computed entries missing " + elem)
rxn = ComputedReaction(reactants, [compound])
rxn.normalize_to(comp)
energy = rxn.calculated_reaction_energy
coeff = []
for specie in self.species:
if specie == "oxide":
if compound.data["oxide_type"] == "oxide":
coeff.append(comp["O"])
self.oxides.append(name)
else:
coeff.append(0)
elif specie == "peroxide":
if compound.data["oxide_type"] == "peroxide":
coeff.append(comp["O"])
self.peroxides.append(name)
else:
coeff.append(0)
elif specie == "superoxide":
if compound.data["oxide_type"] == "superoxide":
coeff.append(comp["O"])
self.superoxides.append(name)
else:
coeff.append(0)
elif specie == "S":
if Element("S") in comp:
sf_type = "sulfide"
if compound.data.get("sulfide_type"):
sf_type = compound.data["sulfide_type"]
elif hasattr(compound, "structure"):
sf_type = sulfide_type(compound.structure)
if sf_type == "sulfide":
coeff.append(comp["S"])
self.sulfides.append(name)
else:
coeff.append(0)
else:
coeff.append(0)
else:
try:
coeff.append(comp[specie])
except ValueError:
raise ValueError(f"We can't detect this specie: {specie}")
self.names.append(name)
self.diffs.append((cmpd_info["exp energy"] - energy) / comp.num_atoms)
self.coeff_mat.append([i / comp.num_atoms for i in coeff])
self.exp_uncer.append((cmpd_info["uncertainty"]) / comp.num_atoms)
# for any exp entries with no uncertainty value, assign average uncertainty value
sigma = np.array(self.exp_uncer)
sigma[sigma == 0] = np.nan
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=RuntimeWarning
) # numpy raises warning if the entire array is nan values
mean_uncer = np.nanmean(sigma)
sigma = np.where(np.isnan(sigma), mean_uncer, sigma)
if np.isnan(mean_uncer):
# no uncertainty values for any compounds, don't try to weight
popt, self.pcov = curve_fit(_func, self.coeff_mat, self.diffs, p0=np.ones(len(self.species)))
else:
popt, self.pcov = curve_fit(
_func,
self.coeff_mat,
self.diffs,
p0=np.ones(len(self.species)),
sigma=sigma,
absolute_sigma=True,
)
self.corrections = popt.tolist()
self.corrections_std_error = np.sqrt(np.diag(self.pcov)).tolist()
for i, v in enumerate(self.species):
self.corrections_dict[v] = (
round(self.corrections[i], 3),
round(self.corrections_std_error[i], 4),
)
# set ozonide correction to 0 so that this species does not recieve a correction
# while other oxide types do
self.corrections_dict["ozonide"] = (0, 0)
return self.corrections_dict
def graph_residual_error(self) -> go.Figure:
"""
Graphs the residual errors for all compounds after applying computed corrections.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_graph = self.names.copy()
abs_errors, labels_graph = (list(t) for t in zip(*sorted(zip(abs_errors, labels_graph)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_graph,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors"),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(self.diffs)))))
print("Mean = " + str(abs(np.mean(np.array(self.diffs)))))
print("Std Dev = " + str(np.std(np.array(self.diffs))))
return fig
def graph_residual_error_per_species(self, specie: str) -> go.Figure:
"""
Graphs the residual errors for each compound that contains specie after applying computed corrections.
Args:
specie: the specie/group that residual errors are being plotted for
Raises:
ValueError: the specie is not a valid specie that this class fits corrections for
"""
if specie not in self.species:
raise ValueError("not a valid specie")
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
abs_errors = [abs(i) for i in self.diffs - np.dot(self.coeff_mat, self.corrections)]
labels_species = self.names.copy()
diffs_cpy = self.diffs.copy()
num = len(labels_species)
if specie in ("oxide", "peroxide", "superoxide", "S"):
if specie == "oxide":
compounds = self.oxides
elif specie == "peroxide":
compounds = self.peroxides
elif specie == "superoxides":
compounds = self.superoxides
else:
compounds = self.sulfides
for i in range(num):
if labels_species[num - i - 1] not in compounds:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
else:
for i in range(num):
if not Composition(labels_species[num - i - 1])[specie]:
del labels_species[num - i - 1]
del abs_errors[num - i - 1]
del diffs_cpy[num - i - 1]
abs_errors, labels_species = (list(t) for t in zip(*sorted(zip(abs_errors, labels_species)))) # sort by error
num = len(abs_errors)
fig = go.Figure(
data=go.Scatter(
x=np.linspace(1, num, num),
y=abs_errors,
mode="markers",
text=labels_species,
),
layout=go.Layout(
title=go.layout.Title(text="Residual Errors for " + specie),
yaxis=go.layout.YAxis(title=go.layout.yaxis.Title(text="Residual Error (eV/atom)")),
),
)
print("Residual Error:")
print("Median = " + str(np.median(np.array(abs_errors))))
print("Mean = " + str(np.mean(np.array(abs_errors))))
print("Std Dev = " + str(np.std(np.array(abs_errors))))
print("Original Error:")
print("Median = " + str(abs(np.median(np.array(diffs_cpy)))))
print("Mean = " + str(abs(np.mean(np.array(diffs_cpy)))))
print("Std Dev = " + str(np.std(np.array(diffs_cpy))))
return fig
def make_yaml(self, name: str = "MP2020", dir: Optional[str] = None) -> None:
"""
Creates the _name_Compatibility.yaml that stores corrections as well as _name_CompatibilityUncertainties.yaml
for correction uncertainties.
Args:
name: str, alternate name for the created .yaml file.
Default: "MP2020"
dir: str, directory in which to save the file. Pass None (default) to
save the file in the current working directory.
"""
if len(self.corrections) == 0:
raise RuntimeError("Please call compute_corrections or compute_from_files to calculate corrections first")
# elements with U values
ggaucorrection_species = ["V", "Cr", "Mn", "Fe", "Co", "Ni", "W", "Mo"]
comp_corr: "OrderedDict[str, float]" = OrderedDict()
o: "OrderedDict[str, float]" = OrderedDict()
f: "OrderedDict[str, float]" = OrderedDict()
comp_corr_error: "OrderedDict[str, float]" = OrderedDict()
o_error: "OrderedDict[str, float]" = OrderedDict()
f_error: "OrderedDict[str, float]" = OrderedDict()
for specie in list(self.species) + ["ozonide"]:
if specie in ggaucorrection_species:
o[specie] = self.corrections_dict[specie][0]
f[specie] = self.corrections_dict[specie][0]
o_error[specie] = self.corrections_dict[specie][1]
f_error[specie] = self.corrections_dict[specie][1]
else:
comp_corr[specie] = self.corrections_dict[specie][0]
comp_corr_error[specie] = self.corrections_dict[specie][1]
outline = """\
Name:
Corrections:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
Uncertainties:
GGAUMixingCorrections:
O:
F:
CompositionCorrections:
"""
fn = name + "Compatibility.yaml"
if dir:
path = os.path.join(dir, fn)
else:
path = fn
yml = yaml.YAML()
yml.Representer.add_representer(OrderedDict, yml.Representer.represent_dict)
yml.default_flow_style = False
contents = yml.load(outline)
contents["Name"] = name
# make CommentedMap so comments can be added
contents["Corrections"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o)
contents["Corrections"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f)
contents["Corrections"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr)
contents["Uncertainties"]["GGAUMixingCorrections"]["O"] = yaml.comments.CommentedMap(o_error)
contents["Uncertainties"]["GGAUMixingCorrections"]["F"] = yaml.comments.CommentedMap(f_error)
contents["Uncertainties"]["CompositionCorrections"] = yaml.comments.CommentedMap(comp_corr_error)
contents["Corrections"].yaml_set_start_comment("Energy corrections in eV/atom", indent=2)
contents["Corrections"]["GGAUMixingCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to transition metal oxides\nand fluorides to "
+ 'make GGA and GGA+U energies compatible\nwhen compat_type = "Advanced" (default)',
indent=4,
)
contents["Corrections"]["CompositionCorrections"].yaml_set_start_comment(
"Composition-based corrections applied to any compound containing\nthese species as anions",
indent=4,
)
contents["Uncertainties"].yaml_set_start_comment(
"Uncertainties corresponding to each energy correction (eV/atom)", indent=2
)
with open(path, "w") as file:
yml.dump(contents, file)
| vorwerkc/pymatgen | pymatgen/entries/correction_calculator.py | Python | mit | 20,606 | [
"pymatgen"
] | a452c8f27d399e7c3536029f4d8f19dc1ff2ed1448a1649bfd24a2c98e458431 |
from datetime import datetime
from django.core.urlresolvers import reverse
from edc.audit.audit_trail import AuditTrail
from edc.subject.visit_tracking.models import BaseVisitTracking
from edc.subject.visit_tracking.settings import VISIT_REASON_NO_FOLLOW_UP_CHOICES
from ..choices import VISIT_REASON
class MaternalVisit(BaseVisitTracking):
""" Maternal visit form that links all follow-up forms """
history = AuditTrail()
@property
def registered_subject(self):
return self.get_registered_subject()
def __unicode__(self):
return unicode(self.appointment)
def get_visit_reason_choices(self):
return VISIT_REASON
def get_visit_reason_no_follow_up_choices(self):
"""Returns the visit reasons that do not imply any data collection; that is, the subject is not available."""
dct = {}
if self.appointment.visit_definition.code == '2180M':
return dct
else:
for item in VISIT_REASON_NO_FOLLOW_UP_CHOICES:
dct.update({item: item})
dct.update({'vital status': 'vital status'})
del dct['death']
del dct['lost']
return dct
def save(self, *args, **kwargs):
super(MaternalVisit, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('admin:eit_maternal_maternalvisit_change', args=(self.id,))
class Meta:
verbose_name = "Maternal Visit"
app_label = "eit_maternal"
| botswana-harvard/eit | eit/apps/eit_maternal/models/maternal_visit.py | Python | gpl-3.0 | 1,498 | [
"VisIt"
] | b336c12540edb3562c8047ede11edda41b9fcb8157419f2385968f8f4f2b6551 |
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Masked autoencoder for distribution estimation."""
import numpy as np
import tensorflow as tf
class MADE(tf.keras.Model):
"""Masked autoencoder for distribution estimation (Germain et al., 2015).
MADE takes as input a real Tensor of shape [..., length, channels] and returns
a Tensor of shape [..., length, units] and same dtype. It masks layer weights
to satisfy autoregressive constraints with respect to the length dimension. In
particular, for a given ordering, each input dimension of length can be
reconstructed from previous dimensions.
The output's units dimension captures per-time-step representations. For
example, setting units to 2 can parameterize the location and log-scale of an
autoregressive Gaussian distribution.
"""
def __init__(self,
units,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right',
activation=None,
use_bias=True,
**kwargs):
"""Constructs network.
Args:
units: Positive integer, dimensionality of the output space.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
the input dimension multiplied by `num_heads`. Each hidden unit size
must be at least the size of length (otherwise autoregressivity is not
possible).
input_order: Order of degrees to the input units: 'random',
'left-to-right', 'right-to-left', or an array of an explicit order.
For example, 'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
activation: Activation function.
use_bias: Whether to use a bias.
**kwargs: Keyword arguments of parent class.
"""
super(MADE, self).__init__(**kwargs)
self.units = int(units)
self.hidden_dims = hidden_dims
self.input_order = input_order
self.hidden_order = hidden_order
self.activation = tf.keras.activations.get(activation)
self.use_bias = use_bias
self.network = tf.keras.Sequential([])
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
length = input_shape[-2]
channels = input_shape[-1]
if length is None or channels is None:
raise ValueError('The two last dimensions of the inputs to '
'`MADE` should be defined. Found `None`.')
masks = create_masks(input_dim=length,
hidden_dims=self.hidden_dims,
input_order=self.input_order,
hidden_order=self.hidden_order)
# Input-to-hidden layer: [..., length, channels] -> [..., hidden_dims[0]].
self.network.add(tf.keras.layers.Reshape([length * channels]))
# Tile the mask so each element repeats contiguously; this is compatible
# with the autoregressive contraints unlike naive tiling.
mask = masks[0]
mask = tf.tile(mask[:, tf.newaxis, :], [1, channels, 1])
mask = tf.reshape(mask, [mask.shape[0] * channels, mask.shape[-1]])
if self.hidden_dims:
layer = tf.keras.layers.Dense(
self.hidden_dims[0],
kernel_initializer=make_masked_initializer(mask),
kernel_constraint=make_masked_constraint(mask),
activation=self.activation,
use_bias=self.use_bias)
self.network.add(layer)
# Hidden-to-hidden layers: [..., hidden_dims[l-1]] -> [..., hidden_dims[l]].
for l in range(1, len(self.hidden_dims)):
layer = tf.keras.layers.Dense(
self.hidden_dims[l],
kernel_initializer=make_masked_initializer(masks[l]),
kernel_constraint=make_masked_constraint(masks[l]),
activation=self.activation,
use_bias=self.use_bias)
self.network.add(layer)
# Hidden-to-output layer: [..., hidden_dims[-1]] -> [..., length, units].
# Tile the mask so each element repeats contiguously; this is compatible
# with the autoregressive contraints unlike naive tiling.
if self.hidden_dims:
mask = masks[-1]
mask = tf.tile(mask[..., tf.newaxis], [1, 1, self.units])
mask = tf.reshape(mask, [mask.shape[0], mask.shape[1] * self.units])
layer = tf.keras.layers.Dense(
length * self.units,
kernel_initializer=make_masked_initializer(mask),
kernel_constraint=make_masked_constraint(mask),
activation=None,
use_bias=self.use_bias)
self.network.add(layer)
self.network.add(tf.keras.layers.Reshape([length, self.units]))
self.built = True
def call(self, inputs):
return self.network(inputs)
def create_degrees(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of degree vectors, one for each input and hidden layer.
A unit with degree d can only receive input from units with degree < d. Output
units always have the same degree as their associated input unit.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer. Each hidden unit size must be at least the size
of length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
if (isinstance(input_order, str) and
input_order not in ('random', 'left-to-right', 'right-to-left')):
raise ValueError('Input order is not valid.')
if hidden_order not in ('random', 'left-to-right'):
raise ValueError('Hidden order is not valid.')
degrees = []
if isinstance(input_order, str):
input_degrees = np.arange(1, input_dim + 1)
if input_order == 'right-to-left':
input_degrees = np.flip(input_degrees, 0)
elif input_order == 'random':
np.random.shuffle(input_degrees)
else:
input_order = np.array(input_order)
if np.all(np.sort(input_order) != np.arange(1, input_dim + 1)):
raise ValueError('invalid input order')
input_degrees = input_order
degrees.append(input_degrees)
for units in hidden_dims:
if hidden_order == 'random':
min_prev_degree = min(np.min(degrees[-1]), input_dim - 1)
hidden_degrees = np.random.randint(
low=min_prev_degree, high=input_dim, size=units)
elif hidden_order == 'left-to-right':
hidden_degrees = (np.arange(units) % max(1, input_dim - 1) +
min(1, input_dim - 1))
degrees.append(hidden_degrees)
return degrees
def create_masks(input_dim,
hidden_dims,
input_order='left-to-right',
hidden_order='left-to-right'):
"""Returns a list of binary mask matrices respecting autoregressive ordering.
Args:
input_dim: Number of inputs.
hidden_dims: list with the number of hidden units per layer. It does not
include the output layer; those number of units will always be set to
input_dim downstream. Each hidden unit size must be at least the size of
length (otherwise autoregressivity is not possible).
input_order: Order of degrees to the input units: 'random', 'left-to-right',
'right-to-left', or an array of an explicit order. For example,
'left-to-right' builds an autoregressive model
p(x) = p(x1) p(x2 | x1) ... p(xD | x<D).
hidden_order: Order of degrees to the hidden units: 'random',
'left-to-right'. If 'left-to-right', hidden units are allocated equally
(up to a remainder term) to each degree.
"""
degrees = create_degrees(input_dim, hidden_dims, input_order, hidden_order)
masks = []
# Create input-to-hidden and hidden-to-hidden masks.
for input_degrees, output_degrees in zip(degrees[:-1], degrees[1:]):
mask = tf.cast(input_degrees[:, np.newaxis] <= output_degrees, tf.float32)
masks.append(mask)
# Create hidden-to-output mask.
mask = tf.cast(degrees[-1][:, np.newaxis] < degrees[0], tf.float32)
masks.append(mask)
return masks
def make_masked_initializer(mask):
initializer = tf.keras.initializers.GlorotUniform()
def masked_initializer(shape, dtype=None):
return mask * initializer(shape, dtype)
return masked_initializer
def make_masked_constraint(mask):
constraint = tf.identity
def masked_constraint(x):
return mask * constraint(x)
return masked_constraint
| google/edward2 | edward2/tensorflow/layers/made.py | Python | apache-2.0 | 9,581 | [
"Gaussian"
] | b7f7496e0f94d9e5074249eaa4f56ebd2eedbef47f11927e56eb17353c324ee2 |
from textwrap import dedent
import py, pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request, tmpdir_factory):
from _pytest.tmpdir import tmpdir
tmpdir = tmpdir(request, tmpdir_factory)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace:
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal:
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod('a', basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
l = conftest._getconftestmodules(conf)
assert len(l) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
l = conftest._getconftestmodules(p)
assert len(l) == 0
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
l = conftest._getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest._getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert 'warning: could not load initial' not in result.stdout.str()
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(py.code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
monkeypatch.setattr(conftest, '_importconftest', impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""))
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""))
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""))
p = sub.join("test_hello.py")
p.write(py.std.textwrap.dedent("""
import pytest
def test_hello(found):
assert found == 1
"""))
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines("""
*--hello-world*
""")
class TestConftestVisibility:
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""))
package.join("test_pkgroot.py").write(dedent("""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""))
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""))
swc.join("test_with_conftest.py").write(dedent("""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""))
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(dedent("""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""))
print ("created directory structure:")
for x in testdir.tmpdir.visit():
print (" " + x.relto(testdir.tmpdir))
return {
"runner": runner,
"package": package,
"swc": swc,
"snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
])
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir,testarg, expect_ntests_passed):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" %(
dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" %(testarg))
print("expected pass : %s" %(expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize('confcutdir,passed,error', [
('.', 2, 0),
('src', 1, 1),
(None, 1, 1),
])
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to a ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join('src').ensure(dir=1)
src.join('pytest.ini').write('[pytest]')
src.join('conftest.py').write(py.code.Source("""
import pytest
@pytest.fixture
def fix1(): pass
"""))
src.join('test_foo.py').write(py.code.Source("""
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""))
root.join('conftest.py').write(py.code.Source("""
import pytest
@pytest.fixture
def out_of_reach(): pass
"""))
args = [str(src)]
if confcutdir:
args = ['--confcutdir=%s' % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ''
if passed:
match += '*%d passed*' % passed
if error:
match += '*%d error*' % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest("""
class DontTouchMe:
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
""")
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 0
| ionelmc/pytest | testing/test_conftest.py | Python | mit | 14,181 | [
"VisIt"
] | c878f95ac5c595b2f3779941f28d238e4283c66d82fe91529fc74c4d00a27cb5 |
"""
pygments.lexers.teal
~~~~~~~~~~~~~~~~~~~~
Lexer for TEAL.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include, words
from pygments.token import Comment, Name, Number, String, Text, Keyword
__all__ = ['TealLexer']
class TealLexer(RegexLexer):
"""
For the `Transaction Execution Approval Language (TEAL)
<https://developer.algorand.org/docs/reference/teal/specification/>`
For more information about the grammar, see:
https://github.com/algorand/go-algorand/blob/master/data/transactions/logic/assembler.go
.. versionadded:: 2.9
"""
name = 'teal'
aliases = ['teal']
filenames = ['*.teal']
keywords = words({
'Sender', 'Fee', 'FirstValid', 'FirstValidTime', 'LastValid', 'Note',
'Lease', 'Receiver', 'Amount', 'CloseRemainderTo', 'VotePK',
'SelectionPK', 'VoteFirst', 'VoteLast', 'VoteKeyDilution', 'Type',
'TypeEnum', 'XferAsset', 'AssetAmount', 'AssetSender', 'AssetReceiver',
'AssetCloseTo', 'GroupIndex', 'TxID', 'ApplicationID', 'OnCompletion',
'ApplicationArgs', 'NumAppArgs', 'Accounts', 'NumAccounts',
'ApprovalProgram', 'ClearStateProgram', 'RekeyTo', 'ConfigAsset',
'ConfigAssetTotal', 'ConfigAssetDecimals', 'ConfigAssetDefaultFrozen',
'ConfigAssetUnitName', 'ConfigAssetName', 'ConfigAssetURL',
'ConfigAssetMetadataHash', 'ConfigAssetManager', 'ConfigAssetReserve',
'ConfigAssetFreeze', 'ConfigAssetClawback', 'FreezeAsset',
'FreezeAssetAccount', 'FreezeAssetFrozen',
'NoOp', 'OptIn', 'CloseOut', 'ClearState', 'UpdateApplication',
'DeleteApplication',
'MinTxnFee', 'MinBalance', 'MaxTxnLife', 'ZeroAddress', 'GroupSize',
'LogicSigVersion', 'Round', 'LatestTimestamp', 'CurrentApplicationID',
'AssetBalance', 'AssetFrozen',
'AssetTotal', 'AssetDecimals', 'AssetDefaultFrozen', 'AssetUnitName',
'AssetName', 'AssetURL', 'AssetMetadataHash', 'AssetManager',
'AssetReserve', 'AssetFreeze', 'AssetClawback',
}, suffix = r'\b')
identifier = r'[^ \t\n]+(?=\/\/)|[^ \t\n]+'
newline = r'\r?\n'
tokens = {
'root': [
include('whitespace'),
# pragmas match specifically on the space character
(r'^#pragma .*' + newline, Comment.Directive),
# labels must be followed by a space,
# but anything after that is ignored
('(' + identifier + ':' + ')' + '([ \t].*)',
bygroups(Name.Label, Comment.Single)),
(identifier, Name.Function, 'function-args'),
],
'function-args': [
include('whitespace'),
(r'"', String, 'string'),
(r'(b(?:ase)?(?:32|64) ?)(\(?[a-zA-Z0-9+/=]+\)?)',
bygroups(String.Affix, String.Other)),
(r'[A-Z2-7]{58}', Number), # address
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(keywords, Keyword),
(identifier, Name.Attributes), # branch targets
(newline, Text, '#pop'),
],
'string': [
(r'\\(?:["nrt\\]|x\d\d)', String.Escape),
(r'[^\\\"\n]+', String),
(r'"', String, '#pop'),
],
'whitespace': [
(r'[ \t]+', Text),
(r'//[^\n]+', Comment.Single),
],
}
| dscorbett/pygments | pygments/lexers/teal.py | Python | bsd-2-clause | 3,495 | [
"ASE"
] | 6e03db37ff54b5a8c7eb37e9ea58f895b46449e917fbf189684befc9017e7616 |
import numpy as np
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as u
eu = u.kilocalories_per_mole
temperature = 288 * u.kelvin
pressure = 1.0 * u.atmosphere
friction = 1.0 / u.picosecond
pdb = app.PDBFile('resi-1-15.pdb')
forcefield = app.ForceField('amber99sb.xml',"tip3p.xml")
system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.PME,nonbondedCutoff=0.95 * u.nanometer, constraints=app.HAngles)
barostat = mm.MonteCarloBarostat(pressure, temperature)
system.addForce(barostat)
integrator = mm.LangevinIntegrator(temperature, friction, 0.001*u.picoseconds)
simulation = app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
simulation.reporters.append(app.PDBReporter("equil.pdb", 10000))
print("running")
simulation.step(100000)
| kyleabeauchamp/AlphaSynuclein | python/equilibrate.py | Python | apache-2.0 | 824 | [
"OpenMM"
] | 053fc8dc0b8690619bc40d6ceb73336175e983dda342db5c6fb119c964ad5202 |
# Copyright 2010-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Testing Bio.TogoWS online code.
"""
from __future__ import print_function
import unittest
from Bio._py3k import StringIO
import requires_internet
requires_internet.check()
# We want to test these:
from Bio import TogoWS
# In order to check any sequences returned
from Bio import SeqIO
from Bio.SeqUtils.CheckSum import seguid
from Bio import Medline
#####################################################################
class TogoFields(unittest.TestCase):
def test_invalid_database(self):
"""Check asking for fields of invalid database fails"""
self.assertRaises(IOError, TogoWS._get_fields,
"http://togows.dbcls.jp/entry/invalid?fields")
def test_databases(self):
"""Check supported databases"""
dbs = set(TogoWS._get_entry_dbs())
expected = set(['nuccore', 'nucest', 'nucgss',
'nucleotide', 'protein', 'gene',
'homologene', 'snp',
'mesh', 'pubmed', # 'embl',
'uniprot', 'uniparc', 'uniref100',
'uniref90', 'uniref50', 'ddbj',
'dad', 'pdb', 'compound', 'drug',
'enzyme', 'genes', 'glycan',
'orthology', 'reaction', 'module',
'pathway'])
self.assertTrue(dbs.issuperset(expected),
"Missing DB: %s" % ", ".join(sorted(expected.difference(dbs))))
def test_pubmed(self):
"""Check supported fields for pubmed database"""
fields = set(TogoWS._get_entry_fields("pubmed"))
self.assertTrue(fields.issuperset(['abstract', 'au', 'authors',
'doi', 'mesh', 'so',
'title']), fields)
def test_ncbi_protein(self):
"""Check supported fields for NCBI protein database"""
fields = set(TogoWS._get_entry_fields("ncbi-protein"))
self.assertTrue(fields.issuperset(['entry_id', 'length', 'strand',
'moltype', 'linearity', 'division',
'date', 'definition', 'accession',
'accessions', 'version', 'versions',
'acc_version', 'gi', 'keywords',
'organism', 'common_name',
'taxonomy', 'comment', 'seq']),
fields)
def test_ddbj(self):
"""Check supported fields for ddbj database"""
fields = set(TogoWS._get_entry_fields("ddbj"))
self.assertTrue(fields.issuperset(['entry_id', 'length', 'strand',
'moltype', 'linearity', 'division',
'date', 'definition', 'accession',
'accessions', 'version', 'versions',
'acc_version', 'gi', 'keywords',
'organism', 'common_name',
'taxonomy', 'comment', 'seq']),
fields)
# def test_embl(self):
# """Check supported fields for embl database"""
# fields = set(TogoWS._get_entry_fields("embl"))
# self.assertTrue(fields.issuperset(["definition", "entry_id", "seq"]),
# fields)
def test_uniprot(self):
"""Check supported fields for uniprot database"""
fields = set(TogoWS._get_entry_fields("uniprot"))
self.assertTrue(fields.issuperset(["definition", "entry_id", "seq"]),
fields)
def test_pdb(self):
"""Check supported fields for pdb database"""
fields = set(TogoWS._get_entry_fields("pdb"))
self.assertTrue(fields.issuperset(["accession", "chains", "keywords",
"models"]), fields)
class TogoEntry(unittest.TestCase):
def test_pubmed_16381885(self):
"""Bio.TogoWS.entry("pubmed", "16381885")"""
# Gives Medline plain text
handle = TogoWS.entry("pubmed", "16381885")
data = Medline.read(handle)
handle.close()
self.assertEqual(data["TI"],
'From genomics to chemical genomics: new developments in KEGG.')
self.assertEqual(data["AU"], ['Kanehisa M', 'Goto S', 'Hattori M',
'Aoki-Kinoshita KF', 'Itoh M',
'Kawashima S', 'Katayama T', 'Araki M',
'Hirakawa M'])
def test_pubmed_16381885_ti(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="title")"""
handle = TogoWS.entry("pubmed", "16381885", field="title")
data = handle.read().strip()
handle.close()
self.assertEqual(data,
'From genomics to chemical genomics: new developments in KEGG.')
def test_pubmed_16381885_title(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="title")"""
handle = TogoWS.entry("pubmed", "16381885", field="title")
data = handle.read().strip()
handle.close()
self.assertEqual(data,
'From genomics to chemical genomics: new developments in KEGG.')
def test_pubmed_16381885_au(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="au")"""
# Gives one name per line (i.e. \n separated), no dots
handle = TogoWS.entry("pubmed", "16381885", field="au")
data = handle.read().strip().split("\n")
handle.close()
self.assertEqual(data, ['Kanehisa M', 'Goto S', 'Hattori M',
'Aoki-Kinoshita KF', 'Itoh M',
'Kawashima S', 'Katayama T', 'Araki M',
'Hirakawa M'])
def test_pubmed_16381885_authors(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="authors")"""
# Gives names tab separated (i.e. \t separated)
handle = TogoWS.entry("pubmed", "16381885", field="authors")
data = handle.read().strip().split("\t")
handle.close()
self.assertEqual(data, ['Kanehisa, M.', 'Goto, S.', 'Hattori, M.',
'Aoki-Kinoshita, K. F.', 'Itoh, M.',
'Kawashima, S.', 'Katayama, T.', 'Araki, M.',
'Hirakawa, M.'])
def test_pubmed_16381885_invalid_field(self):
"""Bio.TogoWS.entry("pubmed", "16381885", field="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"pubmed", "16381885", field="invalid_for_testing")
def test_pubmed_16381885_invalid_format(self):
"""Bio.TogoWS.entry("pubmed", "16381885", format="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"pubmed", "16381885", format="invalid_for_testing")
def test_pubmed_invalid_id(self):
"""Bio.TogoWS.entry("pubmed", "invalid_for_testing")"""
self.assertRaises(IOError, TogoWS.entry,
"pubmed", "invalid_for_testing")
def test_pubmed_16381885_and_19850725(self):
"""Bio.TogoWS.entry("pubmed", "16381885,19850725")"""
handle = TogoWS.entry("pubmed", "16381885,19850725")
records = list(Medline.parse(handle))
handle.close()
self.assertEqual(len(records), 2)
self.assertEqual(records[0]["TI"],
'From genomics to chemical genomics: new developments in KEGG.')
self.assertEqual(records[0]["AU"], ['Kanehisa M', 'Goto S',
'Hattori M', 'Aoki-Kinoshita KF',
'Itoh M', 'Kawashima S',
'Katayama T', 'Araki M',
'Hirakawa M'])
self.assertEqual(records[1]["TI"],
'DDBJ launches a new archive database with analytical tools ' +
'for next-generation sequence data.')
self.assertEqual(records[1]["AU"], ['Kaminuma E', 'Mashima J',
'Kodama Y', 'Gojobori T',
'Ogasawara O', 'Okubo K',
'Takagi T', 'Nakamura Y'])
def test_pubmed_16381885_and_19850725_authors(self):
"""Bio.TogoWS.entry("pubmed", "16381885,19850725", field="authors")"""
handle = TogoWS.entry("pubmed", "16381885,19850725", field="authors")
# Little hack to remove blank lines...
# names = handle.read().replace("\n\n", "\n").strip().split("\n")
names = handle.read().strip().split("\n")
handle.close()
self.assertEqual(2, len(names))
names1, names2 = names
self.assertEqual(names1.split("\t"),
['Kanehisa, M.', 'Goto, S.', 'Hattori, M.',
'Aoki-Kinoshita, K. F.', 'Itoh, M.',
'Kawashima, S.', 'Katayama, T.',
'Araki, M.', 'Hirakawa, M.'])
self.assertEqual(names2.split("\t"),
['Kaminuma, E.', 'Mashima, J.', 'Kodama, Y.',
'Gojobori, T.', 'Ogasawara, O.', 'Okubo, K.',
'Takagi, T.', 'Nakamura, Y.'])
def test_invalid_db(self):
"""Bio.TogoWS.entry("invalid_db", "invalid_id")"""
self.assertRaises(ValueError, TogoWS.entry,
"invalid_db", "invalid_id")
def test_ddbj_genbank_length(self):
"""Bio.TogoWS.entry("ddbj", "X52960", field="length")"""
handle = TogoWS.entry("ddbj", "X52960", field="length")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "248")
def test_ddbj_genbank(self):
"""Bio.TogoWS.entry("ddbj", "X52960")"""
handle = TogoWS.entry("ddbj", "X52960") # Returns "genbank" format
record = SeqIO.read(handle, "gb")
handle.close()
self.assertEqual(record.id, "X52960.1")
self.assertEqual(record.name, "X52960")
self.assertEqual(len(record), 248)
self.assertEqual(seguid(record.seq), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_nucleotide_genbank_length(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="length")"""
handle = TogoWS.entry("nucleotide", "X52960", field="length")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "248")
def test_nucleotide_genbank_seq(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="seq")"""
handle = TogoWS.entry("nucleotide", "X52960", field="seq")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(seguid(data), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_nucleotide_genbank_definition(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="definition")"""
handle = TogoWS.entry("nucleotide", "X52960", field="definition")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "Coleus blumei viroid 1 (CbVd) RNA.")
def test_nucleotide_genbank_accession(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="accession")"""
handle = TogoWS.entry("nucleotide", "X52960", field="accession")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "X52960")
def test_nucleotide_genbank_accession(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="version")"""
handle = TogoWS.entry("nucleotide", "X52960", field="version")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "1")
def test_nucleotide_genbank_acc_version(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="acc_version")"""
handle = TogoWS.entry("nucleotide", "X52960", field="acc_version")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "X52960.1")
def test_nucleotide_genbank_organism(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="organism")"""
handle = TogoWS.entry("nucleotide", "X52960", field="organism")
data = handle.read().strip() # ignore trailing \n
handle.close()
self.assertEqual(data, "Coleus blumei viroid 1")
def test_ddbj_genbank_invalid_field(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", field="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"nucleotide", "X52960", field="invalid_for_testing")
def test_nucleotide_invalid_format(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", format="invalid_for_testing")"""
self.assertRaises(ValueError, TogoWS.entry,
"nucleotide", "X52960", format="invalid_for_testing")
def test_ddbj_gff3(self):
"""Bio.TogoWS.entry("ddbj", "X52960", format="gff")"""
handle = TogoWS.entry("ddbj", "X52960", format="gff")
data = handle.read()
handle.close()
self.assertTrue(data.startswith("##gff-version 3\nX52960\tDDBJ\t"), data)
def test_genbank_gff3(self):
"""Bio.TogoWS.entry("nucleotide", "X52960", format="gff")"""
# Note - Using manual URL with genbank instead of nucleotide works
handle = TogoWS.entry("nucleotide", "X52960", format="gff")
data = handle.read()
handle.close()
self.assertTrue(data.startswith("##gff-version 3\nX52960\tGenbank\t"), data)
# def test_embl_AM905444_gff3(self):
# """Bio.TogoWS.entry("embl", "AM905444", format="gff")"""
# handle = TogoWS.entry("embl", "AM905444", format="gff")
# data = handle.read()
# handle.close()
# self.assertTrue(data.startswith("##gff-version 3\nAM905444\tembl\t"), data)
# def test_embl_AM905444_seq(self):
# """Bio.TogoWS.entry("embl", "AM905444", field="seq")"""
# handle = TogoWS.entry("embl", "AM905444", field="seq")
# data = handle.read().strip() # ignore any trailing \n
# handle.close()
# self.assertEqual(seguid(data), "G0HtLpwF7i4FXUaUjDUPTjok79c")
# def test_embl_AM905444_definition(self):
# """Bio.TogoWS.entry("embl", "AM905444", field="definition")"""
# handle = TogoWS.entry("embl", "AM905444", field="definition")
# data = handle.read().strip() # ignore any trailing \n
# handle.close()
# self.assertEqual(data, "Herbaspirillum seropedicae locus tag HS193.0074 for porin")
# def test_embl_AM905444(self):
# """Bio.TogoWS.entry("embl", "AM905444")"""
# handle = TogoWS.entry("embl", "AM905444")
# record = SeqIO.read(handle, "embl")
# handle.close()
# self.assertTrue("AM905444" in record.id, record.id)
# self.assertTrue("AM905444" in record.name, record.name)
# self.assertTrue("porin" in record.description, record.description)
# self.assertEqual(len(record), 1164)
# self.assertEqual(seguid(record.seq), "G0HtLpwF7i4FXUaUjDUPTjok79c")
def test_ddbj_fasta(self):
"""Bio.TogoWS.entry("ddbj", "X52960", "fasta")"""
handle = TogoWS.entry("ddbj", "X52960", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
self.assertTrue("X52960" in record.id, record.id)
self.assertTrue("X52960" in record.name, record.name)
self.assertEqual(len(record), 248)
self.assertEqual(seguid(record.seq), "Ktxz0HgMlhQmrKTuZpOxPZJ6zGU")
def test_uniprot_swiss(self):
"""Bio.TogoWS.entry("uniprot", ["A1AG1_HUMAN","A1AG1_MOUSE"])"""
# Returns "swiss" format:
handle = TogoWS.entry("uniprot", ["A1AG1_HUMAN", "A1AG1_MOUSE"])
record1, record2 = SeqIO.parse(handle, "swiss")
handle.close()
self.assertEqual(record1.id, "P02763")
self.assertEqual(record1.name, "A1AG1_HUMAN")
self.assertEqual(len(record1), 201)
self.assertEqual(seguid(record1.seq), "LHDJJ6oC7gUXo8CC7Xn6EUeA8Gk")
self.assertEqual(record2.id, "Q60590")
self.assertEqual(record2.name, "A1AG1_MOUSE")
self.assertEqual(len(record2), 207)
self.assertEqual(seguid(record2.seq), "FGcj+RFQhP2gRusCmwPFty5PJT0")
def test_nucleotide_fasta(self):
"""Bio.TogoWS.entry("nucleotide", "6273291", "fasta")"""
handle = TogoWS.entry("nucleotide", "6273291", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
self.assertTrue("6273291" in record.id, record.id)
self.assertTrue("6273291" in record.name, record.name)
self.assertEqual(len(record), 902)
self.assertEqual(seguid(record.seq), "bLhlq4mEFJOoS9PieOx4nhGnjAQ")
def test_protein_fasta(self):
"""Bio.TogoWS.entry("protein", "16130152", "fasta")"""
handle = TogoWS.entry("protein", "16130152", "fasta")
record = SeqIO.read(handle, "fasta")
handle.close()
# Could use assertIn but requires Python 2.7+
self.assertTrue("16130152" in record.id, record.id)
self.assertTrue("16130152" in record.name, record.name)
self.assertTrue("porin protein" in record.description, record.description)
self.assertEqual(len(record), 367)
self.assertEqual(seguid(record.seq), "fCjcjMFeGIrilHAn6h+yju267lg")
class TogoSearch(unittest.TestCase):
"""Search tests."""
def test_bad_args_just_limit(self):
"""Reject Bio.TogoWS.search(...) with just limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", limit=10)
def test_bad_args_just_offset(self):
"""Reject Bio.TogoWS.search(...) with just offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=10)
def test_bad_args_zero_limit(self):
"""Reject Bio.TogoWS.search(...) with zero limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=1, limit=0)
def test_bad_args_zero_offset(self):
"""Reject Bio.TogoWS.search(...) with zero offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=0, limit=10)
def test_bad_args_non_int_offset(self):
"""Reject Bio.TogoWS.search(...) with non-integer offset"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset="test", limit=10)
def test_bad_args_non_int_limit(self):
"""Reject Bio.TogoWS.search(...) with non-integer limit"""
self.assertRaises(ValueError, TogoWS.search,
"pubmed", "lung+cancer", offset=1, limit="lots")
def test_pubmed_search_togows(self):
"""Bio.TogoWS.search_iter("pubmed", "TogoWS") etc"""
self.check("pubmed", "TogoWS", ["20472643"])
def test_pubmed_search_bioruby(self):
"""Bio.TogoWS.search_iter("pubmed", "BioRuby") etc"""
self.check("pubmed", "BioRuby", ["22994508", "22399473",
"20739307", "20015970", "14693808"])
def test_pubmed_search_porin(self):
"""Bio.TogoWS.search_iter("pubmed", "human porin") etc
Count was 357 at time of writing, this was choosen to
be larger than the default chunk size for iteration,
but still not too big to download the full list.
"""
self.check("pubmed", "human porin", ["21189321", "21835183"])
def test_pdb_search_porin(self):
"""Bio.TogoWS.search_iter("pdb", "porin") etc
Count was about 161 at time of writing.
"""
self.check("pdb", "porin", ["2j1n", "2vqg", "3m8b", "2k0l"])
# def test_embl_search_porin(self):
# """Bio.TogoWS.search_iter("embl", "human pore", limit=200) etc
#
# Count was about 297 at time of writing.
# """
# self.check("embl", "human pore", limit=200)
def test_uniprot_search_lung_cancer(self):
"""Bio.TogoWS.search_iter("uniprot", "terminal+lung+cancer", limit=150) etc
Search count was 211 at time of writing, a bit large to
download all the results in a unit test. Want to use a limit
larger than the batch size (100) to ensure at least two
batches.
"""
self.check("uniprot", "terminal+lung+cancer", limit=150)
def check(self, database, search_term, expected_matches=[], limit=None):
if expected_matches and limit:
raise ValueError("Bad test - TogoWS makes no promises about order")
search_count = TogoWS.search_count(database, search_term)
if expected_matches and search_count < len(expected_matches):
raise ValueError("Only %i matches, expected at least %i"
% (search_count, len(expected_matches)))
if search_count > 5000 and not limit:
print("%i results, skipping" % search_count)
return
if limit:
count = min(search_count, limit)
else:
count = search_count
# Iteration should find everything... unless a limit is used
search_iter = list(TogoWS.search_iter(database, search_term, limit))
self.assertEqual(count, len(search_iter))
for match in expected_matches:
self.assertTrue(match in search_iter,
"Expected %s in results but not" % match)
class TogoConvert(unittest.TestCase):
"""Conversion tests."""
def test_invalid_format(self):
"""Check convert file format checking."""
self.assertRaises(ValueError, TogoWS.convert,
StringIO("PLACEHOLDER"),
"genbank", "invalid_for_testing")
self.assertRaises(ValueError, TogoWS.convert,
StringIO("PLACEHOLDER"),
"invalid_for_testing", "fasta")
def test_genbank_to_fasta(self):
"""Conversion of GenBank to FASTA."""
filename = "GenBank/NC_005816.gb"
old = SeqIO.read(filename, "gb")
with open(filename) as handle:
new = SeqIO.read(TogoWS.convert(handle, "genbank", "fasta"), "fasta")
self.assertEqual(str(old.seq), str(new.seq))
# def test_genbank_to_embl(self):
# """Conversion of GenBank to EMBL."""
# filename = "GenBank/NC_005816.gb"
# old = SeqIO.read(filename, "gb")
# with open(filename) as handle:
# new = SeqIO.read(TogoWS.convert(handle, "genbank", "embl"), "embl")
# self.assertEqual(str(old.seq), str(new.seq))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_TogoWS.py | Python | gpl-2.0 | 23,363 | [
"Biopython"
] | 6941ae768599c5a5bbe92730da596d0597ad2ad895ec975b15df52cfe5283533 |
# Copyright 2017-2020 The GPflow Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Type, Union, cast
import numpy as np
import tensorflow as tf
from .. import kernels
from .. import mean_functions as mfn
from ..base import TensorType
from ..covariances import Kuf
from ..inducing_variables import InducingVariables
from ..probability_distributions import DiagonalGaussian, Gaussian, MarkovGaussian
from ..quadrature import mvnquad
from . import dispatch
from .expectations import ExpectationObject, PackedExpectationObject, quadrature_expectation
register = dispatch.quadrature_expectation.register
NoneType: Type[None] = type(None)
EllipsisType = Any
def get_eval_func(
obj: ExpectationObject,
inducing_variable: Optional[InducingVariables],
slice: Union[slice, EllipsisType, None] = None,
) -> Callable[[TensorType], tf.Tensor]:
"""
Return the function of interest (kernel or mean) for the expectation
depending on the type of :obj: and whether any inducing are given
"""
slice = ... if slice is None else slice
if inducing_variable is not None:
# kernel + inducing_variable combination
if not isinstance(inducing_variable, InducingVariables) or not isinstance(
obj, kernels.Kernel
):
raise TypeError("If `inducing_variable` is supplied, `obj` must be a kernel.")
return lambda x: tf.transpose(Kuf(inducing_variable, obj, x))[slice]
elif isinstance(obj, mfn.MeanFunction):
return lambda x: obj(x)[slice] # type: ignore
elif isinstance(obj, kernels.Kernel):
return lambda x: obj(x, full_cov=False) # type: ignore
raise NotImplementedError()
@dispatch.quadrature_expectation.register(
(Gaussian, DiagonalGaussian),
object,
(InducingVariables, NoneType),
object,
(InducingVariables, NoneType),
)
def _quadrature_expectation_gaussian(
p: Union[Gaussian, DiagonalGaussian],
obj1: ExpectationObject,
inducing_variable1: Optional[InducingVariables],
obj2: ExpectationObject,
inducing_variable2: Optional[InducingVariables],
nghp: Optional[int] = None,
) -> tf.Tensor:
"""
General handling of quadrature expectations for Gaussians and DiagonalGaussians
Fallback method for missing analytic expectations
"""
nghp = 100 if nghp is None else nghp
# logger.warning(
# "Quadrature is used to calculate the expectation. This means that "
# "an analytical implementations is not available for the given combination."
# )
if obj1 is None:
raise NotImplementedError("First object cannot be None.")
if not isinstance(p, DiagonalGaussian):
cov = p.cov
else:
if (
isinstance(obj1, kernels.Kernel)
and isinstance(obj2, kernels.Kernel)
and obj1.on_separate_dims(obj2)
): # no joint expectations required
eKxz1 = quadrature_expectation(
p, cast(PackedExpectationObject, (obj1, inducing_variable1)), nghp=nghp
)
eKxz2 = quadrature_expectation(
p, cast(PackedExpectationObject, (obj2, inducing_variable2)), nghp=nghp
)
return eKxz1[:, :, None] * eKxz2[:, None, :]
cov = tf.linalg.diag(p.cov)
if obj2 is None:
def eval_func(x: TensorType) -> tf.Tensor:
fn = get_eval_func(obj1, inducing_variable1)
return fn(x)
else:
def eval_func(x: TensorType) -> tf.Tensor:
fn1 = get_eval_func(obj1, inducing_variable1, np.s_[:, :, None])
fn2 = get_eval_func(obj2, inducing_variable2, np.s_[:, None, :])
return fn1(x) * fn2(x)
return mvnquad(eval_func, p.mu, cov, nghp)
@dispatch.quadrature_expectation.register(
MarkovGaussian, object, (InducingVariables, NoneType), object, (InducingVariables, NoneType)
)
def _quadrature_expectation_markov(
p: MarkovGaussian,
obj1: ExpectationObject,
inducing_variable1: Optional[InducingVariables],
obj2: ExpectationObject,
inducing_variable2: Optional[InducingVariables],
nghp: Optional[int] = None,
) -> tf.Tensor:
"""
Handling of quadrature expectations for Markov Gaussians (useful for time series)
Fallback method for missing analytic expectations wrt Markov Gaussians
Nota Bene: obj1 is always associated with x_n, whereas obj2 always with x_{n+1}
if one requires e.g. <x_{n+1} K_{x_n, Z}>_p(x_{n:n+1}), compute the
transpose and then transpose the result of the expectation
"""
nghp = 40 if nghp is None else nghp
# logger.warning(
# "Quadrature is used to calculate the expectation. This means that "
# "an analytical implementations is not available for the given combination."
# )
if obj2 is None:
def eval_func(x: TensorType) -> tf.Tensor:
return get_eval_func(obj1, inducing_variable1)(x)
mu, cov = p.mu[:-1], p.cov[0, :-1] # cross covariances are not needed
elif obj1 is None:
def eval_func(x: TensorType) -> tf.Tensor:
return get_eval_func(obj2, inducing_variable2)(x)
mu, cov = p.mu[1:], p.cov[0, 1:] # cross covariances are not needed
else:
def eval_func(x: TensorType) -> tf.Tensor:
x1 = tf.split(x, 2, 1)[0]
x2 = tf.split(x, 2, 1)[1]
res1 = get_eval_func(obj1, inducing_variable1, np.s_[:, :, None])(x1)
res2 = get_eval_func(obj2, inducing_variable2, np.s_[:, None, :])(x2)
return res1 * res2
mu = tf.concat((p.mu[:-1, :], p.mu[1:, :]), 1) # Nx2D
cov_top = tf.concat((p.cov[0, :-1, :, :], p.cov[1, :-1, :, :]), 2) # NxDx2D
cov_bottom = tf.concat((tf.linalg.adjoint(p.cov[1, :-1, :, :]), p.cov[0, 1:, :, :]), 2)
cov = tf.concat((cov_top, cov_bottom), 1) # Nx2Dx2D
return mvnquad(eval_func, mu, cov, nghp)
| GPflow/GPflow | gpflow/expectations/quadratures.py | Python | apache-2.0 | 6,486 | [
"Gaussian"
] | 5e96ff4d968c232885add520bc234ba4adf29f0fc08dc469533ae2f0a0107f2b |
"""
Acceptance tests for course in studio
"""
from nose.plugins.attrib import attr
from .base_studio_test import StudioCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.users import CourseTeamPage
from ...pages.studio.index import DashboardPage
@attr('shard_2')
class CourseTeamPageTest(StudioCourseTest):
""" As a course author, I want to be able to add others to my team """
def _make_user(self, username):
""" Registers user and returns user representation dictionary as expected by `log_in` function """
user = {
'username': username,
'email': username + "@example.com",
'password': username + '123'
}
AutoAuthPage(
self.browser, no_login=True,
username=user.get('username'), email=user.get('email'), password=user.get('password')
).visit()
return user
def setUp(self, is_staff=False):
"""
Install a course with no content using a fixture.
"""
super(CourseTeamPageTest, self).setUp(is_staff)
self.other_user = self._make_user('other') # pylint:disable=attribute-defined-outside-init
self.dashboard_page = DashboardPage(self.browser) # pylint:disable=attribute-defined-outside-init
self.page = CourseTeamPage( # pylint:disable=attribute-defined-outside-init
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _go_to_course_team_page(self):
""" Opens Course Team page """
self.page.visit()
self.page.wait_until_no_loading_indicator()
def _refresh_page(self):
"""
Reload the page.
"""
self.page = CourseTeamPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self._go_to_course_team_page()
def _assert_current_course(self, visible=True):
""" Checks if current course is accessible to current user """
self.dashboard_page.visit()
courses = self.dashboard_page.list_courses()
def check_course_equality(course1, course2):
""" Compares to course dictionaries using org, number and run as keys"""
return (
course1['org'] == course2['org'] and
course1['number'] == course2['number'] and
course1['run'] == course2['run']
)
actual_visible = any((check_course_equality(course, self.course_info) for course in courses))
self.assertEqual(actual_visible, visible)
def _assert_user_present(self, user, present=True):
""" Checks if specified user present on Course Team page """
if present:
self.assertIn(user.get('username'), self.page.usernames)
else:
self.assertNotIn(user.get('username'), self.page.usernames)
def _should_see_dialog(self, dialog_type, dialog_message):
""" Asserts dialog with specified message is shown """
self.page.modal_dialog_visible(dialog_type)
self.assertIn(dialog_message, self.page.modal_dialog_text(dialog_type))
def _assert_is_staff(self, user, can_manage=True):
""" Checks if user have staff permissions, can be promoted and can't be demoted """
self.assertIn("staff", user.role_label.lower())
if can_manage:
self.assertTrue(user.can_promote)
self.assertFalse(user.can_demote)
self.assertIn("Add Admin Access", user.promote_button_text)
def _assert_is_admin(self, user):
""" Checks if user have admin permissions, can't be promoted and can be demoted """
self.assertIn("admin", user.role_label.lower())
self.assertFalse(user.can_promote)
self.assertTrue(user.can_demote)
self.assertIn("Remove Admin Access", user.demote_button_text)
def _assert_can_manage_users(self):
""" Checks if current user can manage course team """
self.assertTrue(self.page.has_add_button)
for user in self.page.users:
self.assertTrue(user.can_promote or user.can_demote) # depending on actual user role
self.assertTrue(user.can_delete)
def _assert_can_not_manage_users(self):
""" Checks if current user can't manage course team """
self.assertFalse(self.page.has_add_button)
for user in self.page.users:
self.assertFalse(user.can_promote)
self.assertFalse(user.can_demote)
self.assertFalse(user.can_delete)
def test_admins_can_add_other_users(self):
"""
Scenario: Admins can add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
Then he does see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
def test_added_users_cannot_add_or_delete_other_users(self):
"""
Scenario: Added users cannot delete or add other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And other user logs in
And he selects the new course
And he views the course team settings
Then he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.log_in(self.other_user)
self._assert_current_course(visible=True)
self._go_to_course_team_page()
bob = self.page.get_user(self.other_user.get('email'))
self.assertTrue(bob.is_current_user)
self.assertFalse(self.page.has_add_button)
self._assert_can_not_manage_users()
def test_admins_can_delete_other_users(self):
"""
Scenario: Admins can delete other users
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I delete other user from the course team
And other user logs in
Then he does not see the course on her page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
self.page.delete_user_from_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=False)
self.log_in(self.other_user)
self._assert_current_course(visible=False)
def test_admins_cannot_add_users_that_do_not_exist(self):
"""
Scenario: Admins cannot add users that do not exist
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add "dennis" to the course team
Then I should see "Could not find user by email address" somewhere on the page
"""
self.page.add_user_to_course("dennis@example.com")
self._should_see_dialog('error', "Could not find user by email address")
def test_admins_should_be_able_to_make_other_people_into_admins(self):
"""
Scenario: Admins should be able to make other people into admins
Given I have opened a new course in Studio
And I am viewing the course team settings
And I add other user to the course team
When I make other user a course team admin
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should be marked as an admin
And he can manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
self._assert_is_admin(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
def test_admins_should_be_able_to_remove_other_admins(self):
"""
Scenario: Admins should be able to remove other admins
Given I have opened a new course in Studio
And I grant admin rights to other user
Then he can add, delete, promote and demote users
And I am viewing the course team settings
When I remove admin rights from other user
And other user logs in
And he selects the new course
And he views the course team settings
Then other user should not be marked as an admin
And he cannot manage users
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
# precondition check - frank is an admin and can add/delete/promote/demote users
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_manage_users()
self.log_in(self.user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
other.click_demote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_staff(other)
self.log_in(self.other_user)
self._go_to_course_team_page()
other = self.page.get_user(self.other_user.get('email'))
self.assertTrue(other.is_current_user)
self._assert_can_not_manage_users()
def test_admins_should_be_able_to_remove_themself_if_other_admin_exists(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
And I'm the only course admin
Then I cannot delete or demote myself
When I add other user to the course team
And I make other user a course team admin
Then I can delete or demote myself
When I delete myself from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_delete()
self.log_in(self.user)
self._assert_current_course(visible=False)
def test_admins_should_be_able_to_give_course_ownership_to_someone_else(self):
"""
Scenario: Admins should be able to give course ownership to someone else
Given I have opened a new course in Studio
And I am viewing the course team settings
When I add other user to the course team
And I make other user a course team admin
When I remove admin rights from myself
Then I should not be marked as an admin
And I cannot manage users
And I cannot make myself a course team admin
When other user logs in
And he selects the new course
And he views the course team settings
And he deletes me from the course team
And I am logged into studio
Then I do not see the course on my page
"""
self.page.add_user_to_course(self.other_user.get('email'))
self._assert_user_present(self.other_user, present=True)
current = self.page.get_user(self.user.get('email'))
self.assertFalse(current.can_demote)
self.assertFalse(current.can_delete)
self.assertIn("Promote another member to Admin to remove your admin rights", current.no_change_warning_text)
other = self.page.get_user(self.other_user.get('email'))
other.click_promote()
self._refresh_page()
other = self.page.get_user(self.other_user.get('email'))
self._assert_is_admin(other)
current = self.page.get_user(self.user.get('email'))
self.assertTrue(current.can_demote)
self.assertTrue(current.can_delete)
current.click_demote()
self._refresh_page()
current = self.page.get_user(self.user.get('email'))
self._assert_is_staff(current, can_manage=False)
self._assert_can_not_manage_users()
self.assertFalse(current.can_promote)
self.log_in(self.other_user)
self._go_to_course_team_page()
current = self.page.get_user(self.user.get('email'))
current.click_delete()
self._refresh_page()
self._assert_user_present(self.user, present=False)
self.log_in(self.user)
self._assert_current_course(visible=False)
| GbalsaC/bitnamiP | common/test/acceptance/tests/studio/test_studio_course_team.py | Python | agpl-3.0 | 14,133 | [
"VisIt"
] | a0fdef8105f121fdb8a6f2f2d5c64ae59217b6d62612c07b29283dd35b3b7845 |
# -*- coding: utf-8 -*-
'''
This module contains the necessary tools to crawl the sitemeter.com site and
extract the blog stats for the blogmeter project.
'''
# Imports
import bs4
import logging
import os.path
import random
import socket
import sys
import time
import traceback
import unicodedata
import urllib
from datetime import date, datetime, timedelta
# Django general
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
# Append the current project path
sys.path.append(os.path.abspath('../lib/'))
sys.path.append(os.path.abspath('../lib/bmdjango/'))
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Socket timeout in seconds
socket.setdefaulttimeout(60)
# Import the django models
from meter.models import Blog, Stats
##
## Utils
##
def get_int( st ):
if st.strip() == '-': return 0
return int(st.replace(',',''))
def get_float( st ):
if st.strip() == '-': return 0
return float(st.replace(',',''))
def get_time( st ):
if st.strip() == '-': return 0
mm, ss = [ int(xi) for xi in st.split(':') ]
return 60 * mm + ss
def debug_unicode( st ):
if isinstance( st, unicode):
return unicodedata.normalize('NFKD', st).encode('ascii','ignore')
else:
return unicodedata.normalize('NFKD', unicode( st, 'ascii', 'ignore')).encode('ascii')
du = debug_unicode
class UpdateStats(object):
def __init__(self, blog):
self.blog = blog
def get_raw(self, soup):
try:
stats_table = soup.find('table', { 'id':'Table_02' } ).contents[7
].contents[3].contents[3].contents[1
].contents[3].contents[1].contents
# Unparsed list of visit stats
visits_stats = [ stats_table[i].contents[2].font.renderContents()
for i in range(4,10) ]
# Unparsed list of page views stats
stats_pages = [ stats_table[i].contents[2].font.renderContents()
for i in range(13,19) ]
return visits_stats + stats_pages
except IndexError:
txt = list(soup.findAll(text=True))
txt = [ xi for xi in txt[txt.index(u'VISITS'):] if xi != u'\xa0\xa0' ][:26]
remove = [u'VISITS', u'Total', u'Average Per Day',u'Average Visit Length',u'Last Hour',u'Today',u'This Week',u'PAGE VIEWS', u'Total',u'Average Per Day',u'Average Per Visit',u'Last Hour',u'Today',u'This Week']
txt = [ str(xi) for xi in txt if xi not in remove ]
return txt
def parse(self, raw_result):
result = {}
result['visits_total'] = get_int(raw_result[0])
result['visits_daily_average'] = get_int(raw_result[1])
result['visits_lenght_average'] = get_time(raw_result[2])
result['visits_last_hour'] = get_int(raw_result[3])
result['visits_today'] = get_int(raw_result[4])
result['visits_this_week'] = get_int(raw_result[5])
result['pages_total'] = get_int(raw_result[6])
result['pages_daily_average'] = get_int(raw_result[7])
result['pages_visit_average'] = get_float(raw_result[8])
result['pages_last_hour'] = get_int(raw_result[9])
result['pages_today'] = get_int(raw_result[10])
result['pages_this_week'] = get_int(raw_result[11])
return result
def save_reading(self, result):
stats = Stats()
stats.blog = self.blog
stats.visits_total = result['visits_total']
stats.visits_daily_average = result['visits_daily_average']
stats.visits_lenght_average = result['visits_lenght_average']
stats.visits_last_hour = result['visits_last_hour']
stats.visits_today = result['visits_today']
stats.visits_this_week = result['visits_this_week']
stats.pages_total = result['pages_total']
stats.pages_daily_average = result['pages_daily_average']
stats.pages_visit_average = result['pages_visit_average']
stats.pages_last_hour = result['pages_last_hour']
stats.pages_today = result['pages_today']
stats.pages_this_week = result['pages_this_week']
stats.save()
def run(self):
logger.info( '* Getting stats for: %s' % du(self.blog.name) )
logger.info( ' %s' % self.blog.sitemeter_url() )
html = urllib.urlopen(self.blog.sitemeter_url()).read()
soup = bs4.BeautifulSoup(html)
raw_result = self.get_raw(soup)
parsed_result = self.parse(raw_result)
self.save_reading(parsed_result)
MAXBLOGERROR = 10
STOPHOUR = 8 # Stop the scraper after STOPHOUR
class SitemeterScraper(object):
def __init__(self):
self.blog_list = Blog.objects.filter(error_count__lt = MAXBLOGERROR )
def check_stat(self, blog):
'''Returns True if we have read the blog stats today
'''
try:
Stats.objects.get( blog = blog, date = date.today() )
logger.info( " Already got today's stats." )
return True
except ObjectDoesNotExist:
return False
def read_blog(self, sitemeter_key ):
try:
blog = Blog.objects.get( sitemeter_key = sitemeter_key )
if self.check_stat( blog ):
logger.info( "* ERROR: We have read this blog's stats today, bailing out." )
return
except ObjectDoesNotExist:
logger.info( "* ERROR: Sorry, we don't have %s key in our db." % sitemeter_key )
return
try:
stats = UpdateStats(blog).run()
# Reset the read error count
blog.error_count = 0
blog.save()
logger.info( "* Success!" )
return True
except socket.timeout:
# There was a timeout
logger.info( "* ERROR: There was a time out maybe the server is busy, try again later" )
except Exception, msg:
# Uncaught error
logger.info( "* ERROR: %s" % msg )
raise
def calc_stop_hour(self):
now = datetime.now()
if now.hour > STOPHOUR:
# Will only stop tomorrow
tomorrow = now + timedelta(1)
year, month, day = tomorrow.year, tomorrow.month, tomorrow.day
else:
year, month, day = now.year, now.month, now.day
return datetime(year, month, day, STOPHOUR)
def run(self):
stop_hour = self.calc_stop_hour()
blog_list = list(self.blog_list)
while blog_list:
blog = blog_list.pop()
# Take the stat once per day
if self.check_stat(blog):
continue
# Sleep for a little bit if the last stat read was less than
# 5 minutes ago.
if (datetime.now() - blog.last_try) < timedelta(0, 5 * 60):
logger.info( ' Read this stat less than 5 minutes ago. Sleeping a bit' )
time.sleep(60)
# Read the blog stats
try:
stats = UpdateStats(blog).run()
# Reset the read error count
blog.error_count = 0
except (socket.timeout, IndexError) as e:
# Recoverable error, going to try again
blog_list.insert(0, blog)
logger.info( ' Returned the blog to the queue.' )
except Exception, msg:
# Increase the blog's error count
blog.error_count = blog.error_count + 1
logger.info( msg )
# Print the error traceback (for debugging):
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
tb = traceback.format_exc()
logger.info( tb )
finally:
blog.last_try = datetime.now()
blog.save()
if datetime.now() > stop_hour:
logger.info( '* Exceeded the alloted time, bailing out.' )
break
t = 0.5
logger.info( ' Sleeping %4.2f seconds' % t )
time.sleep(t)
| heldergg/blogmeter | lib/webscraper/scrapstats.py | Python | gpl-3.0 | 8,225 | [
"VisIt"
] | 75cbfbc3f3262f0070cea94fdf82fc69bb30df5b773c711079a3c72e0dba8a1d |
#!/usr/bin/env python
from setuptools import setup
from CLAM.config import __version__
def main():
setup(name='CLAM',
version=__version__,
description='CLIP-seq Analysis of Multi-mapped reads',
author='Zijun Zhang',
author_email='zj.z@ucla.edu',
url='https://github.com/Xinglab/CLAM',
packages=['CLAM', 'CLAM.stats'],
scripts=['bin/CLAM'],
install_requires=[
'scipy',
'pysam',
'numpy',
'statsmodels',
'tqdm',
'pybedtools',
'mpmath']
)
return
if __name__ == '__main__':
main()
| Xinglab/CLAM | setup.py | Python | gpl-3.0 | 549 | [
"pysam"
] | 85a84cd2d5f09b70dee41c94855f17c709a6844e5cf4b9193a292e6545dd8fc1 |
from views import *
from lookups import *
import rest as annotation
import requests
import primer3
import myvariant
import re
from utils import *
import itertools
import pysam
import csv
#hpo lookup
import phizz
import random
import orm
@app.route('/variant3/<variant_str>')
def variant_page3(variant_str):
db=get_db()
variant=db.variants.find_one({'VARIANT_ID':variant_str})
patients=[p for p in db.patients.find({'external_id':{'$in': variant['HET']+variant['HOM']}})]
hpo_terms=[p['features'] for p in patients]
print(hpo_terms)
print 'Rendering variant: %s' % variant_str
return render_template( 'test.html', variant=variant)
@app.route('/variant/<variant_str>')
def variant_page(variant_str):
db = get_db()
variant=orm.Variant(db=db,variant_id=variant_str)
# pos, ref, alt = get_minimal_representation(pos, ref, alt)
#v=load_variant(db,variant_id)
#xpos = get_xpos(chrom, pos)
if variant is None:
variant = {
'chrom': chrom,
'pos': pos,
'xpos': xpos,
'ref': ref,
'alt': alt
}
consequences = []
ordered_csqs = []
# Adds major_consequence
#base_coverage = lookups.get_coverage_for_bases(db, xpos, xpos + len(ref) - 1)
base_coverage = []
#any_covered = any([x['has_coverage'] for x in base_coverage])
any_covered = any([x['has_coverage'] for x in base_coverage])
# check the appropriate sqlite db to get the *expected* number of
# available bams and *actual* number of available bams for this variant
print 'Rendering variant: %s' % variant_str
return render_template(
'variant.html',
variant=variant,
base_coverage=base_coverage,
consequences=consequences,
any_covered=any_covered,
ordered_csqs=ordered_csqs,
metrics=[]
)
@app.route('/variant_json/<variant_str>')
def variant_json(variant_str):
variant_str=str(variant_str).strip().replace('_','-')
chrom, pos, ref, alt = variant_str.split('-')
tb=pysam.TabixFile('/slms/UGI/vm_exports/vyp/phenotips/uclex_files/current/chr%s.vcf.gz' % chrom,)
#mainset_February2016_chrX_filtered.vcf.gz
region=str('%s:%s-%s'%(chrom, pos, int(pos),))
headers=[h for h in tb.header]
headers=(headers[len(headers)-1]).strip().split('\t')
records=tb.fetch(region=region)
records=[r.split('\t') for r in records]
for r in records:
geno=dict(zip(headers, r))
POS=geno['POS']
REF=geno['REF']
print 'POS', POS
print 'REF', REF
for i, ALT, in enumerate(geno['ALT'].split(',')):
print 'ALT', ALT
# insertion
if ref=='-' and REF+alt==ALT:
return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# deletion
# replace leftmost
elif alt=='-' and ALT==REF.replace(ref,''):
return reponse(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
# replace rightmost
elif alt=='-' and ALT==REF[::-1].replace(ref[::-1], "", 1)[::-1]:
return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
#
elif alt=='-' and ref==REF and ALT=='*':
return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
elif alt==ALT and ref==REF:
return response(POS=int(POS), REF=REF, ALT=ALT, index=i+1, geno=geno, chrom=chrom, pos=pos)
continue
@app.route('/set_variant_causal/<individual>/<variant_str>')
def set_variant_causal(individual, variant_str):
print individual, variant_str
db=get_db()
#get_db().patients.update({'patient_id':individual},{'$addToSet':{'causal_variants':variant_str}})
var=db.variants.find_one({'variant_id':variant_str})
gene_id=var['genes'][0]
gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']
print 'GENE_NAME', gene_name
# update Gene in phenotips
conn=PhenotipsClient()
auth='%s:%s' % (session['user'],session['password2'],)
p=conn.get_patient(eid=individual,auth=auth)
p['genes']=p.get('genes',[])+[{'gene':gene_name}]
print conn.update_patient( eid=p['external_id'], auth=auth, patient=p )
print get_db('patients').patients.update({'external_id':individual},{'$set':p},w=0)
p=db.patients.find_one({'external_id':individual})
p['causal_variants']=list(frozenset(p.get('causal_variants',[])+[variant_str]))
db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
return redirect(referrer+'/individual/'+individual)
@app.route('/unset_variant_causal/<individual>/<variant_str>')
def unset_variant_causal(individual, variant_str):
print individual, variant_str
db=get_db()
p=db.patients.find_one({'external_id':individual})
if 'causal_variants' in p and not p['causal_variants']: p['causal_variants']=[]
if variant_str in p.get('causal_variants',[]):
p['causal_variants']=p['causal_variants'].remove(variant_str)
db.patients.update({'external_id':individual},{'$set':{'causal_variants':p['causal_variants']}},w=0)
conn=PhenotipsClient()
auth='%s:%s' % (session['user'],session['password2'],)
p2=conn.get_patient(eid=individual,auth=auth)
p2['genes']=[]
for var in p['causal_variants']:
var=db.variants.find_one({'variant_id':var})
gene_id=var['genes'][0]
gene_name=db.genes.find_one({'gene_id':gene_id})['gene_name_upper']
print 'GENE_NAME', gene_name
p2['genes']=list(frozenset(p2.get('genes',[])+[{'gene':gene_name}]))
# update Gene in phenotips
print conn.update_patient( eid=p2['external_id'], auth=auth, patient=p2 )
print get_db('patients').patients.update({'external_id':individual},{'$set':p2},w=0)
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
return redirect(referrer+'/individual/'+individual)
@app.route('/set_variant_status/<individual>/<variant_str>/<status>')
def set_variant_status(individual, variant_str, status):
print individual, variant_str, status
db=get_db()
#print get_db().patients.update({'patient_id':individual},{'$addToSet':{'variant_status':{variant_str:status}}})
rare_variants=db.patients.find_one({'external_id':individual},{'rare_variants':1})['rare_variants']
for rv in rare_variants:
if rv['variant_id']==variant_str:
rv['status']=status
print db.patients.update({'external_id':individual},{'$set':{'rare_variants':rare_variants}})
return status
| pontikos/uclex_browser | views/variant.py | Python | mit | 7,029 | [
"pysam"
] | e0cca600c462942e3cb3a3b69874a435a88295c0611339b248a8bfce61e2e6e7 |
import predict as pd
import copy
import os
import numpy as np
import util
import shutil
import pickle
import pylab as plt
import pandas
import local_multiprocessing
import load_data
import features.featurization as feat
def check_feature_set_dims(feature_sets):
F2 = None
for set in feature_sets.keys():
F = feature_sets[set].shape[0]
if F2 is None: F = F2
assert F == F2, "not same # individuals for feature %s" % set
assert feature_sets !={}, "features are empty, check learn_options"
def set_target(learn_options, classification):
assert 'target_name' not in learn_options.keys() or learn_options['target_name'] is not None, "changed it to be automatically set here"
if not classification:
learn_options["target_name"] = learn_options['rank-transformed target name']
learn_options["training_metric"] = 'spearmanr'
learn_options['ground_truth_label'] = learn_options['target_name']
else:
learn_options["target_name"] = learn_options['binary target name']
learn_options["training_metric"] = 'AUC'
learn_options['ground_truth_label'] = learn_options['binary target name']
if learn_options["V"]==3:
assert learn_options['target_name']=='score_drug_gene_rank' or learn_options['target_name']=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
assert learn_options["ground_truth_label"]=='score_drug_gene_rank' or learn_options["ground_truth_label"]=='score_drug_gene_threshold', "cannot use raw scores when mergind data"
return learn_options
def GP_setup(learn_options, likelihood='gaussian', degree=3, set_target_fn=set_target):
learn_options["method"] = "GPy"
learn_options['kernel degree'] = degree
if likelihood == 'warped':
learn_options['warpedGP'] = True
else:
learn_options['warpedGP'] = False
learn_options = set_target_fn(learn_options, classification=False)
return learn_options
def SVC_setup(learn_options, likelihood='gaussian', degree=3, set_target_fn=set_target):
learn_options["method"] = "SVC"
learn_options = set_target_fn(learn_options, classification=True)
return learn_options
def L1_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def L2_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "L2"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def mean_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'mean'
return learn_options
def random_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'random'
return learn_options
def elasticnet_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "linreg"
learn_options["penalty"] = "EN"
learn_options["feature_select"] = False
learn_options["loss"] = "squared"
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-5*pow(2,x) for x in range(0,30)])
return learn_options
def DNN_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'DNN'
learn_options['DNN target variable'] = 'score'#'score_drug_gene_quantized'
# learn_options['DNN architecture'] = (119, 10, 10, 10, 2)
return learn_options
def RF_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'RandomForestRegressor'
return learn_options
def doench_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options['method'] = 'doench'
return learn_options
def sgrna_from_doench_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options['method'] = 'sgrna_from_doench'
return learn_options
def linreg_setup(learn_options, set_target_fn=set_target):
learn_options["method"] = "linreg"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([0.0])
learn_options["loss"] = "squared"
learn_options = set_target_fn(learn_options, classification=False)
return learn_options
def logregL1_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options["method"] = "logregL1"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
return learn_options
def LASSOs_ensemble_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=False)
learn_options["method"] = "lasso_ensemble"
learn_options["penalty"] = "L1"
learn_options["feature_select"] = False
if "alpha" not in learn_options.keys():
learn_options["alpha"] = np.array([1e-6*pow(1.3,x) for x in range(0,100)])
learn_options["loss"] = "squared"
return learn_options
def xu_et_al_setup(learn_options, set_target_fn=set_target):
learn_options = set_target_fn(learn_options, classification=True)
learn_options["method"] = "xu_et_al"
return learn_options
def adaboost_setup(learn_options, num_estimators=100, max_depth=3, learning_rate=0.1, set_target_fn=set_target, model="AdaBoost"):
"""
"""
learn_options = set_target_fn(learn_options, classification=False)
if model=="AdaBoost":
learn_options['method'] = "AdaBoostRegressor"
elif model=="AdaBoostClassifier":
learn_options['method'] = "AdaBoostClassifier"
else:
raise Exception("model must be either AdaBoost or AdaBoost Classifier")
learn_options['adaboost_version'] = 'python' # "R" or "python"
if 'adaboost_loss' not in learn_options.keys() and model=="AdaBoostRegressor":
learn_options['adaboost_loss'] = 'ls' # alternatives: "lad", "huber", "quantile", see scikit docs for details
if 'adaboost_alpha' not in learn_options.keys():
learn_options['adaboost_alpha'] = 0.5 # this parameter is only used by the huber and quantile loss functions.
if not learn_options['adaboost_CV']:
learn_options['adaboost_learning_rate'] = learning_rate
learn_options['adaboost_n_estimators'] = num_estimators
learn_options['adaboost_max_depth'] = max_depth
else:
learn_options['adaboost_n_estimators'] = num_estimators
return learn_options
def shared_setup(learn_options, order, test):
if 'num_proc' not in learn_options.keys():
learn_options['num_proc'] = None
if 'num_thread_per_proc' not in learn_options.keys():
learn_options['num_thread_per_proc'] = None
num_proc = local_multiprocessing.configure(TEST=test, num_proc=learn_options["num_proc"],
num_thread_per_proc=learn_options["num_thread_per_proc"])
learn_options["num_proc"] = num_proc
learn_options["order"] = order # gets used many places in code, not just here
if "cv" not in learn_options.keys():
# if no CV preference is specified, use leave-one-gene-out
learn_options["cv"] = "gene"
if "normalize_features" not in learn_options.keys():
# if no CV preference is specified, use leave-one-gene-out
learn_options["normalize_features"] = True
if "weighted" not in learn_options.keys():
learn_options['weighted'] = None
if "all pairs" not in learn_options.keys():
learn_options["all pairs"] = False
if "include_known_pairs" not in learn_options.keys():
learn_options["include_known_pairs"] = False
if "include_gene_guide_feature" not in learn_options.keys():
learn_options["include_gene_guide_feature"] = 0 #used as window size, so 0 is none
#these should default to true to match experiments before they were options:
if "gc_features" not in learn_options.keys():
learn_options["gc_features"] = True
if "nuc_features" not in learn_options.keys():
learn_options["nuc_features"] = True
if 'train_genes' not in learn_options.keys():
learn_options["train_genes"] = None
if 'test_genes' not in learn_options.keys():
learn_options["test_genes"] = None
if "num_proc" not in learn_options:
learn_options["num_proc"] = None
if "num_thread_per_proc" not in learn_options:
learn_options["num_thread_per_proc"] = None
if 'seed' not in learn_options:
learn_options['seed'] = 1
if "flipV1target" not in learn_options:
learn_options["flipV1target"] = False
if 'num_genes_remove_train' not in learn_options:
learn_options['num_genes_remove_train'] = None
if "include_microhomology" not in learn_options:
learn_options["include_microhomology"] = False
if "algorithm_hyperparam_search" not in learn_options:
learn_options["algorithm_hyperparam_search"] = "grid" # other options is bo for bayesian optimization
return num_proc
def setup(test=False, order=1, learn_options=None, data_file=None, pam_audit=True, length_audit=True):
num_proc = shared_setup(learn_options, order, test)
assert "testing_non_binary_target_name" in learn_options.keys(), "need this in order to get metrics, though used to be not needed, so you may newly see this error"
if learn_options["testing_non_binary_target_name"] not in ['ranks', 'raw', 'thrs']:
raise Exception('learn_otions["testing_non_binary_target_name"] must be in ["ranks", "raw", "thrs"]')
Xdf, Y, gene_position, target_genes = load_data.from_file(data_file, learn_options)
learn_options['all_genes'] = target_genes
if test:
learn_options["order"] = 1
if 'convert_30mer_to_31mer' in learn_options and learn_options['convert_30mer_to_31mer'] is True:
print "WARNING!!! converting 30 mer to 31 mer (and then cutting off first nucleotide to go back to 30mer with a right shift)"
for i in range(Xdf.shape[0]):
Xdf['30mer'].iloc[i] = util.convert_to_thirty_one(Xdf.iloc[i]["30mer"], Xdf.index.values[i][1], Xdf.iloc[i]["Strand"])
# to_keep = Xdf['30mer'].isnull() == False
# Xdf = Xdf[to_keep]
# gene_position = gene_position[to_keep]
# Y = Y[to_keep]
Xdf["30mer"] = Xdf["30mer"].apply(lambda x: x[1:]) # chop the first nucleotide
if learn_options.has_key('left_right_guide_ind') and learn_options['left_right_guide_ind'] is not None:
seq_start, seq_end, expected_length = learn_options['left_right_guide_ind']
Xdf['30mer'] = Xdf['30mer'].apply(lambda seq: seq[seq_start:seq_end])
feature_sets = feat.featurize_data(Xdf, learn_options, Y, gene_position, pam_audit=pam_audit, length_audit=length_audit)
np.random.seed(learn_options['seed'])
return Y, feature_sets, target_genes, learn_options, num_proc
def run_models(models, orders, GP_likelihoods=['gaussian', 'warped'], WD_kernel_degrees=[3],
adaboost_learning_rates=[0.1], adaboost_num_estimators=[100], adaboost_max_depths=[3],
learn_options_set=None, test=False, CV=True, setup_function=setup, set_target_fn=set_target, pam_audit=True, length_audit=True):
'''
CV is set to false if want to train a final model and not cross-validate, but it goes in to what
looks like cv code
'''
results = {}
assert learn_options_set is not None, "need to specify learn_options_set"
all_learn_options = {}
#shorten so easier to display on graphs
feat_models_short = {'L1':"L1", 'L2':"L2", 'elasticnet':"EN", 'linreg':"LR",
'RandomForest': "RF",
'AdaBoost':"AB", 'AdaBoostClassifier':"ABClass", 'doench': 'doench',
"logregL1": "logregL1", "sgrna_from_doench":"sgrna_from_doench", 'SVC': 'SVC', 'xu_et_al': 'xu_et_al'}
if not CV:
print "Received option CV=False, so I'm training using all of the data"
assert len(learn_options_set.keys()) == 1, "when CV is False, only 1 set of learn options is allowed"
assert len(models) == 1, "when CV is False, only 1 model is allowed"
for learn_options_str in learn_options_set.keys():
# these options get augmented in setup
partial_learn_opt = learn_options_set[learn_options_str]
# if the model requires encoded features
for model in models:
# models requiring explicit featurization
if model in feat_models_short.keys():
for order in orders:
print "running %s, order %d for %s" % (model, order, learn_options_str)
Y, feature_sets, target_genes, learn_options, num_proc = setup_function(test=test, order=order, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit) # TODO precompute features for all orders, as this is repated for each model
if model == 'L1':
learn_options_model = L1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'L2':
learn_options_model = L2_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'elasticnet':
learn_options_model = elasticnet_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'linreg':
learn_options_model = linreg_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == "logregL1":
learn_options_model = logregL1_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'RandomForest':
learn_options_model = RF_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'SVC':
learn_options_model = SVC_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'doench':
learn_options_model = doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'sgrna_from_doench':
learn_options_model = sgrna_from_doench_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'xu_et_al':
learn_options_model = xu_et_al_setup(copy.deepcopy(learn_options), set_target_fn=set_target_fn)
elif model == 'AdaBoost' or 'AdaBoostClassifier':
for learning_rate in adaboost_learning_rates:
for num_estimators in adaboost_num_estimators:
for max_depth in adaboost_max_depths:
learn_options_model = adaboost_setup(copy.deepcopy(learn_options), learning_rate=learning_rate, num_estimators=num_estimators, max_depth=max_depth, set_target_fn=set_target_fn, model=model)
model_string = feat_models_short[model] + '_or%d_md%d_lr%.2f_n%d_%s' % (learn_options_set[learn_options_str]["order"], max_depth, learning_rate, num_estimators, learn_options_str)
if model != 'AdaBoost':
model_string = feat_models_short[model] + '_ord%d_%s' % (learn_options_set[learn_options_str]["order"], learn_options_str)
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
all_learn_options[model_string] = learn_options_model
# if the model doesn't require explicit featurization
else:
assert setup_fn==setup, "not yet modified to handle this"
print "running %s for %s" % (model, learn_options_str)
Y, feature_sets, target_genes, learn_options, num_proc = setup(test=test, order=1, learn_options=partial_learn_opt, pam_audit=pam_audit, length_audit=length_audit)
if model == 'mean':
learn_options_model = mean_setup(copy.deepcopy(learn_options))
elif model == 'random':
learn_options_model = random_setup(copy.deepcopy(learn_options))
elif model == 'DNN':
learn_options_model = DNN_setup(copy.deepcopy(learn_options))
elif model == 'GP':
for likelihood in GP_likelihoods:
for degree in WD_kernel_degrees:
learn_options_model = GP_setup(copy.deepcopy(learn_options), likelihood=likelihood, degree=degree)
model_string = '%s_%s_degree%d_%s' % (model, likelihood, degree, learn_options_str)
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model,TEST=test, CV=CV)
else:
raise NotImplementedError("model %s not supported" % model)
# "GP" already calls pd.cross_validate() and has its own model_string, so skip this.
if model != "GP":
model_string = model + '_%s' % learn_options_str
results[model_string] = pd.cross_validate(Y, feature_sets, learn_options=learn_options_model, TEST=test, CV=CV)
all_learn_options[model_string] = learn_options_model
return results, all_learn_options
def pickle_runner_results(exp_name, results, all_learn_options, relpath="/../" + "results"):
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath) + relpath
if not os.path.exists(dname):
os.makedirs(dname)
print "Created directory: %s" % str(dname)
if exp_name is None:
exp_name = results.keys()[0]
myfile = dname+'/'+ exp_name + '.pickle'
with open(myfile, 'wb') as f:
print "writing results to %s" % myfile
pickle.dump((results, all_learn_options), f, -1)
def runner(models, learn_options, GP_likelihoods=None, orders=None, WD_kernel_degrees=None, where='local', cluster_user='fusi', cluster='RR1-N13-09-H44', test=False, exp_name = None, **kwargs):
if where == 'local':
results, all_learn_options = run_models(models, orders=orders, GP_likelihoods=GP_likelihoods, learn_options_set=learn_options, WD_kernel_degrees=WD_kernel_degrees, test=test, **kwargs)
all_metrics, gene_names = util.get_all_metrics(results, learn_options)
util.plot_all_metrics(all_metrics, gene_names, all_learn_options, save=True)
# for non-local (i.e. cluster), the comparable code is in cli_run_model.py
pickle_runner_results(exp_name, results, all_learn_options)
return results, all_learn_options, all_metrics, gene_names
elif where == 'cluster':
import cluster_job
# create random cluster directory, dump learn options, and create cluster file
tempdir, user, clust_filename = cluster_job.create(cluster_user, models, orders, WD_kernel_degrees, GP_likelihoods, exp_name=exp_name, learn_options=learn_options, **kwargs)
# raw_input("Submit job to HPC and press any key when it's finished: ")
# util.plot_cluster_results(directory=tempdir)
#stdout = tempdir + r"/stdout"
#stderr = tempdir + r"/stderr"
#if not os.path.exists(stdout): os.makedirs(stdout)
#if not os.path.exists(stderr): os.makedirs(stderr)
return tempdir, clust_filename, user#, stdout, stderr
def save_final_model_V3(filename=None, include_position=True, learn_options=None, short_name='final', pam_audit=True, length_audit=True):
'''
run_models(produce_final_model=True) is what saves the model
'''
test = False
assert filename is not None, "need to provide filename to save final model"
if learn_options is None:
if include_position:
learn_options = {"V": 3,
'train_genes': load_data.get_V3_genes(),
'test_genes': load_data.get_V3_genes(),
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": None,
"nuc_features": True,
"include_gene_position": True,
"include_NGGX_interaction": True,
"include_NGGXX_interaction": None,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'normalize_features': False,
'adaboost_CV' : False
}
else:
learn_options = {"V": 3,
'train_genes': load_data.get_V3_genes(),
'test_genes': load_data.get_V3_genes(),
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": None,
"nuc_features": True,
"include_gene_position": False,
"include_NGGX_interaction": True,
"include_NGGXX_interaction": None,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'normalize_features': False,
'adaboost_CV' : False
}
learn_options_set = {short_name: learn_options}
results, all_learn_options = run_models(["AdaBoost"], orders=[2], adaboost_learning_rates=[0.1],
adaboost_max_depths=[3], adaboost_num_estimators=[100],
learn_options_set=learn_options_set,
test=test, CV=False, pam_audit=length_audit, length_audit=length_audit)
model = results.values()[0][3][0]
with open(filename, 'wb') as f:
pickle.dump((model, learn_options), f, -1)
return model
def predict(seq, aa_cut=-1, percent_peptide=-1, model=None, model_file=None, pam_audit=True, length_audit=False, learn_options_override=None):
"""
if pam_audit==False, then it will not check for GG in the expected position
this is useful if predicting on PAM mismatches, such as with off-target
"""
print "predict function running"
# assert not (model is None and model_file is None), "you have to specify either a model or a model_file"
assert isinstance(seq, (np.ndarray)), "Please ensure seq is a numpy array"
assert len(seq[0]) > 0, "Make sure that seq is not empty"
assert isinstance(seq[0], str), "Please ensure input sequences are in string format, i.e. 'AGAG' rather than ['A' 'G' 'A' 'G'] or alternate representations"
if aa_cut is not None:
assert len(aa_cut) > 0, "Make sure that aa_cut is not empty"
assert isinstance(aa_cut, (np.ndarray)), "Please ensure aa_cut is a numpy array"
assert np.all(np.isreal(aa_cut)), "amino-acid cut position needs to be a real number"
if percent_peptide is not None:
assert len(percent_peptide) > 0, "Make sure that percent_peptide is not empty"
assert isinstance(percent_peptide, (np.ndarray)), "Please ensure percent_peptide is a numpy array"
assert np.all(np.isreal(percent_peptide)), "percent_peptide needs to be a real number"
if model_file is None:
azimuth_saved_model_dir = os.path.join(os.path.dirname(__file__), 'saved_models')
if np.any(percent_peptide == -1) or (percent_peptide is None and aa_cut is None):
print("No model file specified, using V3_model_nopos")
model_name = 'V3_model_nopos.pickle'
else:
print("No model file specified, using V3_model_full")
model_name = 'V3_model_full.pickle'
model_file = os.path.join(azimuth_saved_model_dir, model_name)
if model is None:
with open(model_file, 'rb') as f:
model, learn_options = pickle.load(f)
print model_file
print learn_options
else:
model, learn_options = model
learn_options["V"] = 2
learn_options = override_learn_options(learn_options_override, learn_options)
# Y, feature_sets, target_genes, learn_options, num_proc = setup(test=False, order=2, learn_options=learn_options, data_file=test_filename)
# inputs, dim, dimsum, feature_names = pd.concatenate_feature_sets(feature_sets)
Xdf = pandas.DataFrame(columns=[u'30mer', u'Strand'], data=zip(seq, ['NA' for x in range(len(seq))]))
if np.all(percent_peptide != -1) and (percent_peptide is not None and aa_cut is not None):
gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(percent_peptide, aa_cut))
else:
gene_position = pandas.DataFrame(columns=[u'Percent Peptide', u'Amino Acid Cut position'], data=zip(np.ones(seq.shape[0])*-1, np.ones(seq.shape[0])*-1))
feature_sets = feat.featurize_data(Xdf, learn_options, pandas.DataFrame(), gene_position, pam_audit=pam_audit, length_audit=length_audit)
inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
# call to scikit-learn, returns a vector of predicted values
preds = model.predict(inputs)
# also check that predictions are not 0/1 from a classifier.predict() (instead of predict_proba() or decision_function())
unique_preds = np.unique(preds)
ok = False
for pr in preds:
if pr not in [0,1]:
ok = True
assert ok, "model returned only 0s and 1s"
return preds
def override_learn_options(learn_options_override, learn_options):
"""
override all keys seen in learn_options_override to alter learn_options
"""
if learn_options_override is not None:
for k in learn_options_override.keys():
learn_options[k] = learn_options_override[k]
return learn_options
def fill_learn_options(learn_options_fill, learn_options):
"""
only fill in keys that are missing form learn_options from learn_options_fill
"""
if learn_options_fill is not None:
for k in learn_options_fill.keys():
if not learn_options.has_key(k):
learn_options[k] = learn_options_fill[k]
return learn_options
def write_results(predictions, file_to_predict):
newfile = file_to_predict.replace(".csv", ".pred.csv")
data = pandas.read_csv(file_to_predict)
data['predictions'] = predictions
data.to_csv(newfile)
print "wrote results to %s" % newfile
return data, newfile
if __name__ == '__main__':
#save_final_model_V3(filename='azimuth/azure_models/V3_model_full.pickle', include_position=True)
save_final_model_V3(filename='saved_models/model_8_nopos.pickle', include_position=False)
save_final_model_V3(filename='saved_models/model_8.pickle', include_position=True)
# predict('GGGCCGCTGTTGCAGGTGGCGGGTAGGATC', 'sense', 1200, 30.3, model_file='../saved_models/final_model_nicolo.pickle')
learn_options = {"V": 3,
"train_genes": load_data.get_V3_genes(),
"test_genes": load_data.get_V3_genes(),
"target_name": 'score_drug_gene_rank',
"testing_non_binary_target_name": 'ranks',
'include_pi_nuc_feat': True,
"gc_features": True,
"pam_features": True,
"repeat_features": True,
"nuc_features": True,
"include_gene_position": True,
"include_NGGX_interaction": None,
"include_NGGXX_interaction": True,
"include_Tm": True,
"include_strand": False,
"include_gene_feature": False,
"include_gene_guide_feature": 0,
"extra pairs": False,
"weighted": None,
"training_metric": 'spearmanr',
"NDGC_k": 10,
"cv": "gene",
"adaboost_loss" : 'ls',
"include_gene_effect": False,
"include_drug": False,
"include_sgRNAscore": False,
'adaboost_loss' : 'ls', # main "ls", alternatives: "lad", "huber", "quantile", see scikit docs for details
'adaboost_alpha': 0.5, # this parameter is only used by the huber and quantile loss functions.
'adaboost_CV' : False
}
learn_options_set = {"post bug fix":learn_options}
#runner(['AdaBoost'], learn_options_set, orders=[2], where='local', adaboost_learning_rates=[0.1], adaboost_max_depths=[3], adaboost_num_estimators=[100], exp_name='post-index-fix')
# #util.feature_importances(results)
| mayavanand/RMMAFinalProject | azimuth/model_comparison.py | Python | bsd-3-clause | 31,399 | [
"Gaussian"
] | 461880d9a7452159f76a10f2e2cb3cb9cf0f5185dc096ef81be3e53f8bdf38c6 |
import os
import glob
import math
import zipfile
from shapely.geometry import MultiPoint, Point, mapping
from datetime import datetime
# NetCDF
import netCDF4
import pytz
from fiona import collection
from collections import OrderedDict
from paegan.logger import logger
import geojson
import tables
from tables import *
# Pytables representation of a model run
class ModelResultsTable(IsDescription):
particle = UInt8Col()
time = Time32Col()
latitude = Float32Col()
longitude = Float32Col()
depth = Float32Col()
u_vector = Float32Col()
v_vector = Float32Col()
w_vector = Float32Col()
temperature = Float32Col()
salinity = Float32Col()
age = Float32Col()
lifestage = UInt8Col()
progress = Float32Col()
settled = BoolCol()
halted = BoolCol()
dead = BoolCol()
class ResultsPyTable(object):
def __init__(self, output_file):
self._file = open_file(output_file, mode="w", title="Model run output")
self._root = self._file.create_group("/", "trajectories", "Trajectory Data")
self._table = self._file.create_table(self._root, "model_results", ModelResultsTable, "Model Results")
self._table.autoindex = False
self._table.cols.particle.create_index()
self._table.cols.time.create_index()
self._table.cols.latitude.create_index()
self._table.cols.longitude.create_index()
def write(self, data):
record = self._table.row
for k, v in data.items():
try:
record[k] = v
except Exception:
# No column named "k", so don't add the data
pass
record.append()
def trackline(self):
pass
def metadata(self):
pass
def compute(self):
self.trackline()
self.metadata()
def close(self):
self._table.flush()
self._table.reindex()
self._file.close()
class Export(object):
@classmethod
def export(cls, **kwargs):
raise("Please implement the export method of your Export class.")
class H5TracklineWithPoints(Export):
@classmethod
def export(cls, folder, h5_file):
with tables.open_file(h5_file, mode="r") as h5:
table = h5.root.trajectories.model_results
timestamps = sorted(list(set([ x["time"] for x in table.iterrows() ])))
pts = []
features = []
for i, ts in enumerate(timestamps):
points = MultiPoint([ Point(x['longitude'], x['latitude']) for x in table.where("""time == %s""" % ts) if x["latitude"] and x["longitude"] ])
cp = points.centroid.coords[0]
geo_pt = geojson.Point(cp)
pts.append(cp)
feat = geojson.Feature(id=i, geometry=geo_pt, properties={ "time" : datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc).isoformat() })
features.append(feat)
geo_ls = geojson.LineString(pts)
features.append(geojson.Feature(geometry=geo_ls, id='path'))
fc = geojson.FeatureCollection(features)
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "full_trackline.geojson")
with open(filepath, "wb") as r:
r.write(geojson.dumps(fc).encode('utf-8'))
class H5Trackline(Export):
@classmethod
def export(cls, folder, h5_file):
with tables.open_file(h5_file, mode="r") as h5:
table = h5.root.trajectories.model_results
timestamps = sorted(list(set([ x["time"] for x in table.iterrows() ])))
pts = []
for i, ts in enumerate(timestamps):
points = MultiPoint([ Point(x['longitude'], x['latitude']) for x in table.where("""time == %s""" % ts) if x["latitude"] and x["longitude"] ])
pts.append(points.centroid.coords[0])
geo_ls = geojson.LineString(pts)
feat = geojson.Feature(geometry=geo_ls, id='path')
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "simple_trackline.geojson")
with open(filepath, "wb") as r:
r.write(geojson.dumps(feat).encode('utf-8'))
class H5ParticleTracklines(Export):
@classmethod
def export(cls, folder, h5_file):
with tables.open_file(h5_file, mode="r") as h5:
table = h5.root.trajectories.model_results
particles = sorted(list(set([ x["particle"] for x in table.iterrows() ])))
features = []
for puid in particles:
points = [ (x["time"], (x['longitude'], x['latitude'])) for x in table.where("""particle == %s""" % puid) if x["latitude"] and x["longitude"] ]
geo_ls = geojson.LineString( [ x[1] for x in points ] )
times = [ datetime.utcfromtimestamp(x[0]).replace(tzinfo=pytz.utc).isoformat() for x in points ]
feat = geojson.Feature(geometry=geo_ls, id=puid, properties={ "particle" : puid, "times" : times })
features.append(feat)
fc = geojson.FeatureCollection(features)
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "particle_tracklines.geojson")
with open(filepath, "wb") as r:
r.write(geojson.dumps(fc).encode('utf-8'))
class H5ParticleMultiPoint(Export):
@classmethod
def export(cls, folder, h5_file):
with tables.open_file(h5_file, mode="r") as h5:
table = h5.root.trajectories.model_results
particles = sorted(list(set([ x["particle"] for x in table.iterrows() ])))
features = []
for puid in particles:
points = [ (x["time"], (x['longitude'], x['latitude'])) for x in table.where("""particle == %s""" % puid) if x["latitude"] and x["longitude"] ]
geo_mp = geojson.MultiPoint( [ x[1] for x in points ] )
times = [ x[0] for x in points ]
feat = geojson.Feature(geometry=geo_mp, id=puid, properties={ "particle" : puid, "time" : times })
features.append(feat)
fc = geojson.FeatureCollection(features)
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "particle_multipoint.geojson")
with open(filepath, "wb") as r:
r.write(geojson.dumps(fc).encode('utf-8'))
class H5GDALShapefile(Export):
@classmethod
def export(cls, folder, h5_file):
shape_schema = {'geometry': 'Point',
'properties': OrderedDict([('particle', 'int'),
('date', 'str'),
('latitude', 'float'),
('longitude', 'float'),
('depth', 'float'),
('u_vector', 'float'),
('v_vector', 'float'),
('w_vector', 'float'),
('temp', 'float'),
('salinity', 'float'),
('age', 'float'),
('settled', 'str'),
('dead', 'str'),
('halted', 'str'),
('lifestage', 'int'),
('progress', 'float')])}
shape_crs = {'no_defs': True, 'ellps': 'WGS84', 'datum': 'WGS84', 'proj': 'longlat'}
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, "particles.shp")
with tables.open_file(h5_file, mode="r") as h5:
table = h5.root.trajectories.model_results
with collection(filepath, "w", driver='ESRI Shapefile', schema=shape_schema, crs=shape_crs) as shape:
for r in table.iterrows():
shape.write({'geometry': mapping(Point(r["longitude"], r["latitude"])),
'properties': OrderedDict([('particle', r["particle"]),
('date', str(datetime.utcfromtimestamp(r["time"]).isoformat())),
('latitude', float(r["latitude"])),
('longitude', float(r["longitude"])),
('depth', float(r["depth"])),
('temp', float(r["temperature"])),
('salinity', float(r["salinity"])),
('u_vector', float(r["u_vector"])),
('v_vector', float(r["v_vector"])),
('w_vector', float(r["w_vector"])),
('settled', str(r["settled"])),
('dead', str(r["dead"])),
('halted', str(r["halted"])),
('age', float(r["age"])),
('lifestage' , int(r["lifestage"])),
('progress' , float(r["progress"]))])})
# Zip the output
shpzip = zipfile.ZipFile(os.path.join(folder, "h5shape.shp.zip"), mode='w')
for f in glob.glob(os.path.join(folder, "particles*")):
shpzip.write(f, os.path.basename(f))
os.remove(f)
shpzip.close()
class NetCDF(Export):
@classmethod
def export(cls, folder, particles, datetimes, summary, **kwargs):
"""
Export particle data to CF trajectory convention
netcdf file
"""
time_units = 'seconds since 1990-01-01 00:00:00'
# Create netcdf file, overwrite existing
if not os.path.exists(folder):
os.makedirs(folder)
filepath = os.path.join(folder, 'trajectories.nc')
nc = netCDF4.Dataset(filepath, 'w')
# Create netcdf dimensions
nc.createDimension('time', None)
nc.createDimension('particle', None)
fillvalue = -9999.9
# Create netcdf variables
time = nc.createVariable('time', 'i', ('time',))
part = nc.createVariable('particle', 'i', ('particle',))
depth = nc.createVariable('depth', 'f', ('time', 'particle'))
lat = nc.createVariable('lat', 'f', ('time', 'particle'), fill_value=fillvalue)
lon = nc.createVariable('lon', 'f', ('time', 'particle'), fill_value=fillvalue)
salt = nc.createVariable('salt', 'f', ('time', 'particle'), fill_value=fillvalue)
temp = nc.createVariable('temp', 'f', ('time', 'particle'), fill_value=fillvalue)
u = nc.createVariable('u', 'f', ('time', 'particle'), fill_value=fillvalue)
v = nc.createVariable('v', 'f', ('time', 'particle'), fill_value=fillvalue)
w = nc.createVariable('w', 'f', ('time', 'particle'), fill_value=fillvalue)
settled = nc.createVariable('settled', 'f', ('time', 'particle'), fill_value=fillvalue)
dead = nc.createVariable('dead', 'f', ('time', 'particle'), fill_value=fillvalue)
halted = nc.createVariable('halted', 'f', ('time', 'particle'), fill_value=fillvalue)
# Loop through locations in each particle,
# add to netcdf file
for j, particle in enumerate(particles):
part[j] = particle.uid
i = 0
normalized_locations = particle.normalized_locations(datetimes)
normalized_temps = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.temps]
normalized_salts = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.salts]
normalized_u = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.u_vectors]
normalized_v = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.v_vectors]
normalized_w = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.w_vectors]
normalized_settled = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.settles]
normalized_dead = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.deads]
normalized_halted = [x if x is not None and not math.isnan(x) else fillvalue for x in particle.halts]
if len(normalized_locations) != len(normalized_temps):
logger.info("No temperature being added to netcdf.")
# Create list of 'fillvalue' equal to the length of locations
normalized_temps = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_salts):
logger.info("No salinity being added to netcdf.")
# Create list of 'fillvalue' equal to the length of locations
normalized_salts = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_u):
logger.info("No U being added to netcdf.")
# Create list of 'fillvalue' equal to the length of locations
normalized_u = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_v):
logger.info("No V being added to netcdf.")
# Create list of 'fillvalue' equal to the length of locations
normalized_v = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_w):
logger.info("No W being added to netcdf.")
# Create list of 'fillvalue' equal to the length of locations
normalized_w = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_settled):
logger.info("No Settled being added to shapefile.")
# Create list of 'fillvalue' equal to the length of locations
normalized_settled = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_dead):
logger.info("No Dead being added to shapefile.")
# Create list of 'fillvalue' equal to the length of locations
normalized_dead = [fillvalue] * len(normalized_locations)
if len(normalized_locations) != len(normalized_halted):
logger.info("No Halted being added to shapefile.")
# Create list of 'fillvalue' equal to the length of locations
normalized_halted = [fillvalue] * len(normalized_locations)
for loc, _temp, _salt, _u, _v, _w, _settled, _dead, _halted in zip(normalized_locations, normalized_temps, normalized_salts, normalized_u, normalized_v, normalized_w, normalized_settled, normalized_dead, normalized_halted):
if j == 0:
time[i] = int(round(netCDF4.date2num(loc.time, time_units)))
depth[i, j] = loc.depth
lat[i, j] = loc.latitude
lon[i, j] = loc.longitude
salt[i, j] = _salt
temp[i, j] = _temp
u[i, j] = _u
v[i, j] = _v
w[i, j] = _w
settled[i, j] = _settled
dead[i, j] = _dead
halted[i, j] = _halted
i += 1
# Variable attributes
depth.coordinates = "time particle lat lon"
depth.standard_name = "depth_below_sea_surface"
depth.units = "m"
depth.POSITIVE = "up"
depth.positive = "up"
salt.coordinates = "time particle lat lon"
salt.standard_name = "sea_water_salinity"
salt.units = "psu"
temp.coordinates = "time particle lat lon"
temp.standard_name = "sea_water_temperature"
temp.units = "degrees_C"
u.coordinates = "time particle lat lon"
u.standard_name = "eastward_sea_water_velocity"
u.units = "m/s"
v.coordinates = "time particle lat lon"
v.standard_name = "northward_sea_water_velocity"
v.units = "m/s"
w.coordinates = "time particle lat lon"
w.standard_name = "upward_sea_water_velocity"
w.units = "m/s"
settled.coordinates = "time particle lat lon"
settled.description = "Is the particle settled"
settled.standard_name = "particle_settled"
dead.coordinates = "time particle lat lon"
dead.description = "Is the particle dead"
dead.standard_name = "particle_dead"
halted.coordinates = "time particle lat lon"
halted.description = "Is the particle prevented from being forced by currents"
halted.standard_name = "particle_halted"
time.units = time_units
time.standard_name = "time"
lat.units = "degrees_north"
lon.units = "degrees_east"
part.cf_role = "trajectory_id"
# Global attributes
nc.featureType = "trajectory"
nc.summary = str(summary)
for key in kwargs:
nc.__setattr__(key, kwargs.get(key))
nc.sync()
nc.close()
| axiom-data-science/paegan-transport | paegan/transport/export.py | Python | gpl-3.0 | 18,114 | [
"NetCDF"
] | fcc9da5cded49261d958d5ff31c410a6de582c1c7e533281a3927b3c29f02f3e |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test neurom.fst._core module'''
from copy import deepcopy
import numpy as np
from nose import tools as nt
import os
from neurom.fst import _core
from neurom import io as _io
_path = os.path.dirname(os.path.abspath(__file__))
DATA_ROOT = os.path.join(_path, '../../../test_data')
DATA_PATH = os.path.join(_path, '../../../test_data/valid_set')
FILENAMES = [os.path.join(DATA_PATH, f)
for f in ['Neuron.swc', 'Neuron_h5v1.h5', 'Neuron_h5v2.h5']]
def test_neuron_name():
d = _io.load_data(FILENAMES[0])
nrn = _core.FstNeuron(d, '12af3rg')
nt.eq_(nrn.name, '12af3rg')
def test_section_str():
s = _core.Section('foo')
nt.assert_true(isinstance(str(s), str))
def _check_cloned_neurites(a, b):
nt.assert_true(a is not b)
nt.assert_true(a.root_node is not b.root_node)
nt.assert_equal(a.type, b.type)
for aa, bb in zip(a.iter_sections(), b.iter_sections()):
nt.assert_true(np.all(aa.points == bb.points))
def test_neuron_deepcopy():
d = _io.load_neuron(FILENAMES[0])
dc = deepcopy(d)
nt.assert_true(d is not dc)
nt.assert_true(d.soma is not dc.soma)
nt.assert_true(np.all(d.soma.points == dc.soma.points))
nt.assert_true(np.all(d.soma.center == dc.soma.center))
nt.assert_equal(d.soma.radius, dc.soma.radius)
for a, b in zip(d.neurites, dc.neurites):
_check_cloned_neurites(a, b)
def test_neurite_deepcopy():
d = _io.load_neuron(FILENAMES[0])
nrt = d.neurites[0]
nrt2 = deepcopy(nrt)
nt.assert_true(nrt is not nrt2)
_check_cloned_neurites(nrt, nrt2)
| juanchopanza/NeuroM | neurom/fst/tests/test_core.py | Python | bsd-3-clause | 3,278 | [
"NEURON"
] | 7c3847ebb9051f7345290a6a2bf810f992d5585c3ad9f15cef9efbff8ff2bf6a |
from django.conf import settings
from django.db.models import F
from django.utils.module_loading import import_string
from wagtail_personalisation.models import Segment
from wagtail_personalisation.rules import AbstractBaseRule
from wagtail_personalisation.utils import create_segment_dictionary
class BaseSegmentsAdapter:
"""Base segments adapter."""
def __init__(self, request):
"""Prepare the request session for segment storage.
:param request: The http request
:type request: django.http.HttpRequest
"""
self.request = request
def setup(self):
"""Prepare the adapter for segment storage."""
def get_segments(self):
"""Return the segments stored in the adapter storage."""
def get_segment_by_id(self):
"""Return a single segment stored in the adapter storage."""
def add(self):
"""Add a new segment to the adapter storage."""
def refresh(self):
"""Refresh the segments stored in the adapter storage."""
def _test_rules(self, rules, request, match_any=False):
"""Tests the provided rules to see if the request still belongs
to a segment.
:param rules: The rules to test for
:type rules: list of wagtail_personalisation.rules
:param request: The http request
:type request: django.http.HttpRequest
:param match_any: Whether all rules need to match, or any
:type match_any: bool
:returns: A boolean indicating the segment matches the request
:rtype: bool
"""
if not rules:
return False
if match_any:
return any(rule.test_user(request) for rule in rules)
return all(rule.test_user(request) for rule in rules)
class Meta:
abstract = True
class SessionSegmentsAdapter(BaseSegmentsAdapter):
"""Segment adapter that uses Django's session backend."""
def __init__(self, request):
super(SessionSegmentsAdapter, self).__init__(request)
self.request.session.setdefault('segments', [])
self._segment_cache = None
def _segments(self, ids=None):
if not ids:
ids = []
segments = (
Segment.objects
.enabled()
.filter(persistent=True)
.filter(pk__in=ids)
)
return segments
def get_segments(self, key="segments"):
"""Return the persistent segments stored in the request session.
:param key: The key under which the segments are stored
:type key: String
:returns: The segments in the request session
:rtype: list of wagtail_personalisation.models.Segment or empty list
"""
if key == "segments" and self._segment_cache is not None:
return self._segment_cache
if key not in self.request.session:
return []
raw_segments = self.request.session[key]
segment_ids = [segment['id'] for segment in raw_segments]
segments = self._segments(ids=segment_ids)
result = list(segments)
if key == "segments":
self._segment_cache = result
return result
def set_segments(self, segments, key="segments"):
"""Set the currently active segments
:param segments: The segments to set for the current request
:type segments: list of wagtail_personalisation.models.Segment
:param key: The key under which to store the segments. Optional
:type key: String
"""
cache_segments = []
serialized_segments = []
segment_ids = set()
for segment in segments:
serialized = create_segment_dictionary(segment)
if serialized['id'] in segment_ids:
continue
cache_segments.append(segment)
serialized_segments.append(serialized)
segment_ids.add(segment.pk)
self.request.session[key] = serialized_segments
if key == "segments":
self._segment_cache = cache_segments
def get_segment_by_id(self, segment_id):
"""Find and return a single segment from the request session.
:param segment_id: The primary key of the segment
:type segment_id: int
:returns: The matching segment
:rtype: wagtail_personalisation.models.Segment or None
"""
segments = self._segments(ids=[segment_id])
if segments.exists():
return segments.get()
def add_page_visit(self, page):
"""Mark the page as visited by the user"""
visit_count = self.request.session.setdefault('visit_count', [])
page_visits = [visit for visit in visit_count if visit['id'] == page.pk]
if page_visits:
for page_visit in page_visits:
page_visit['count'] += 1
page_visit['path'] = page.url_path if page else self.request.path
self.request.session.modified = True
else:
visit_count.append({
'slug': page.slug,
'id': page.pk,
'path': page.url_path if page else self.request.path,
'count': 1,
})
def get_visit_count(self, page=None):
"""Return the number of visits on the current request or given page"""
path = page.url_path if page else self.request.path
visit_count = self.request.session.setdefault('visit_count', [])
for visit in visit_count:
if visit['path'] == path:
return visit['count']
return 0
def update_visit_count(self):
"""Update the visit count for all segments in the request session."""
segments = self.request.session['segments']
segment_pks = [s['id'] for s in segments]
# Update counts
(Segment.objects
.enabled()
.filter(pk__in=segment_pks)
.update(visit_count=F('visit_count') + 1))
def refresh(self):
"""Retrieve the request session segments and verify whether or not they
still apply to the requesting visitor.
"""
enabled_segments = Segment.objects.enabled()
rule_models = AbstractBaseRule.get_descendant_models()
current_segments = self.get_segments()
excluded_segments = self.get_segments("excluded_segments")
current_segments = list(
set(current_segments) - set(excluded_segments)
)
# Run tests on all remaining enabled segments to verify applicability.
additional_segments = []
for segment in enabled_segments:
if segment.is_static and segment.static_users.filter(id=self.request.user.id).exists():
additional_segments.append(segment)
elif any((
segment.excluded_users.filter(id=self.request.user.id).exists(),
segment in excluded_segments
)):
continue
elif not segment.is_static or not segment.is_full:
segment_rules = []
for rule_model in rule_models:
segment_rules.extend(rule_model.objects.filter(segment=segment))
result = self._test_rules(segment_rules, self.request,
match_any=segment.match_any)
if result and segment.randomise_into_segment():
if segment.is_static and not segment.is_full:
if self.request.user.is_authenticated:
segment.static_users.add(self.request.user)
additional_segments.append(segment)
elif result:
if segment.is_static and self.request.user.is_authenticated:
segment.excluded_users.add(self.request.user)
else:
excluded_segments += [segment]
self.set_segments(current_segments + additional_segments)
self.set_segments(excluded_segments, "excluded_segments")
self.update_visit_count()
SEGMENT_ADAPTER_CLASS = import_string(getattr(
settings,
'PERSONALISATION_SEGMENTS_ADAPTER',
'wagtail_personalisation.adapters.SessionSegmentsAdapter'))
def get_segment_adapter(request):
"""Return the Segment Adapter for the given request"""
if not hasattr(request, 'segment_adapter'):
request.segment_adapter = SEGMENT_ADAPTER_CLASS(request)
return request.segment_adapter
| LabD/wagtail-personalisation | src/wagtail_personalisation/adapters.py | Python | mit | 8,495 | [
"VisIt"
] | 70f8a1fd91192913b73051cce17120fbfd3b351a711bbd09edc8397297cc4bad |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
__all__ = ['AFTSurvivalRegression', 'AFTSurvivalRegressionModel',
'DecisionTreeRegressor', 'DecisionTreeRegressionModel',
'GBTRegressor', 'GBTRegressionModel',
'GeneralizedLinearRegression', 'GeneralizedLinearRegressionModel',
'GeneralizedLinearRegressionSummary', 'GeneralizedLinearRegressionTrainingSummary',
'IsotonicRegression', 'IsotonicRegressionModel',
'LinearRegression', 'LinearRegressionModel',
'LinearRegressionSummary', 'LinearRegressionTrainingSummary',
'RandomForestRegressor', 'RandomForestRegressionModel']
@inherit_doc
class LinearRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasElasticNetParam, HasFitIntercept,
HasStandardization, HasSolver, HasWeightCol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
Linear regression.
The learning objective is to minimize the squared error, with regularization.
The specific squared error loss function used is: L = 1/2n ||A coefficients - y||^2^
This supports multiple types of regularization:
* none (a.k.a. ordinary least squares)
* L2 (ridge regression)
* L1 (Lasso)
* L2 + L1 (elastic net)
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, 2.0, Vectors.dense(1.0)),
... (0.0, 2.0, Vectors.sparse(1, [], []))], ["label", "weight", "features"])
>>> lr = LinearRegression(maxIter=5, regParam=0.0, solver="normal", weightCol="weight")
>>> model = lr.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> abs(model.transform(test0).head().prediction - (-1.0)) < 0.001
True
>>> abs(model.coefficients[0] - 1.0) < 0.001
True
>>> abs(model.intercept - 0.0) < 0.001
True
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> abs(model.transform(test1).head().prediction - 1.0) < 0.001
True
>>> lr.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> lr.save(lr_path)
>>> lr2 = LinearRegression.load(lr_path)
>>> lr2.getMaxIter()
5
>>> model_path = temp_path + "/lr_model"
>>> model.save(model_path)
>>> model2 = LinearRegressionModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
>>> model.numFeatures
1
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
"""
super(LinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.LinearRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, solver="auto", weightCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
standardization=True, solver="auto", weightCol=None, aggregationDepth=2)
Sets params for linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearRegressionModel(java_model)
class LinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`LinearRegression`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, mse, r-squared ) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
java_lrt_summary = self._call_java("summary")
return LinearRegressionTrainingSummary(java_lrt_summary)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_lr_summary = self._call_java("evaluate", dataset)
return LinearRegressionSummary(java_lr_summary)
class LinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in "predictions" which gives the predicted value of
the label at each instance.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns the explained variance regression score.
explainedVariance = 1 - variance(y - \hat{y}) / variance(y)
.. seealso:: `Wikipedia explain variation \
<http://en.wikipedia.org/wiki/Explained_variation>`_
Note: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("explainedVariance")
@property
@since("2.0.0")
def meanAbsoluteError(self):
"""
Returns the mean absolute error, which is a risk function
corresponding to the expected value of the absolute error
loss or l1-norm loss.
Note: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanAbsoluteError")
@property
@since("2.0.0")
def meanSquaredError(self):
"""
Returns the mean squared error, which is a risk function
corresponding to the expected value of the squared error
loss or quadratic loss.
Note: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("meanSquaredError")
@property
@since("2.0.0")
def rootMeanSquaredError(self):
"""
Returns the root mean squared error, which is defined as the
square root of the mean squared error.
Note: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("rootMeanSquaredError")
@property
@since("2.0.0")
def r2(self):
"""
Returns R^2^, the coefficient of determination.
.. seealso:: `Wikipedia coefficient of determination \
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`
Note: This ignores instance weights (setting all to 1.0) from
`LinearRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("r2")
@property
@since("2.0.0")
def residuals(self):
"""
Residuals (label - predicted value)
"""
return self._call_java("residuals")
@property
@since("2.0.0")
def numInstances(self):
"""
Number of instances in DataFrame predictions
"""
return self._call_java("numInstances")
@property
@since("2.0.0")
def devianceResiduals(self):
"""
The weighted residuals, the usual residuals rescaled by the
square root of the instance weights.
"""
return self._call_java("devianceResiduals")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
This value is only available when using the "normal" solver.
If :py:attr:`LinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("pValues")
@inherit_doc
class LinearRegressionTrainingSummary(LinearRegressionSummary):
"""
.. note:: Experimental
Linear regression training results. Currently, the training summary ignores the
training weights except for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
This value is only available when using the "l-bfgs" solver.
.. seealso:: :py:attr:`LinearRegression.solver`
"""
return self._call_java("totalIterations")
@inherit_doc
class IsotonicRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Currently implemented using parallelized pool adjacent violators algorithm.
Only univariate (single feature) algorithm supported.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> ir = IsotonicRegression()
>>> model = ir.fit(df)
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.boundaries
DenseVector([0.0, 1.0])
>>> ir_path = temp_path + "/ir"
>>> ir.save(ir_path)
>>> ir2 = IsotonicRegression.load(ir_path)
>>> ir2.getIsotonic()
True
>>> model_path = temp_path + "/ir_model"
>>> model.save(model_path)
>>> model2 = IsotonicRegressionModel.load(model_path)
>>> model.boundaries == model2.boundaries
True
>>> model.predictions == model2.predictions
True
.. versionadded:: 1.6.0
"""
isotonic = \
Param(Params._dummy(), "isotonic",
"whether the output sequence should be isotonic/increasing (true) or" +
"antitonic/decreasing (false).", typeConverter=TypeConverters.toBoolean)
featureIndex = \
Param(Params._dummy(), "featureIndex",
"The index of the feature if featuresCol is a vector column, no effect otherwise.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
"""
super(IsotonicRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.IsotonicRegression", self.uid)
self._setDefault(isotonic=True, featureIndex=0)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
weightCol=None, isotonic=True, featureIndex=0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
weightCol=None, isotonic=True, featureIndex=0):
Set the params for IsotonicRegression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return IsotonicRegressionModel(java_model)
def setIsotonic(self, value):
"""
Sets the value of :py:attr:`isotonic`.
"""
return self._set(isotonic=value)
def getIsotonic(self):
"""
Gets the value of isotonic or its default value.
"""
return self.getOrDefault(self.isotonic)
def setFeatureIndex(self, value):
"""
Sets the value of :py:attr:`featureIndex`.
"""
return self._set(featureIndex=value)
def getFeatureIndex(self):
"""
Gets the value of featureIndex or its default value.
"""
return self.getOrDefault(self.featureIndex)
class IsotonicRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`IsotonicRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def boundaries(self):
"""
Boundaries in increasing order for which predictions are known.
"""
return self._call_java("boundaries")
@property
@since("1.6.0")
def predictions(self):
"""
Predictions associated with the boundaries at the same index, monotone because of isotonic
regression.
"""
return self._call_java("predictions")
class TreeEnsembleParams(DecisionTreeParams):
"""
Mixin for Decision Tree-based ensemble algorithms parameters.
"""
subsamplingRate = Param(Params._dummy(), "subsamplingRate", "Fraction of the training data " +
"used for learning each decision tree, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
def __init__(self):
super(TreeEnsembleParams, self).__init__()
@since("1.4.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
"""
return self._set(subsamplingRate=value)
@since("1.4.0")
def getSubsamplingRate(self):
"""
Gets the value of subsamplingRate or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
class TreeRegressorParams(Params):
"""
Private class to track supported impurity measures.
"""
supportedImpurities = ["variance"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeRegressorParams, self).__init__()
@since("1.4.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.4.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class RandomForestParams(TreeEnsembleParams):
"""
Private class to track supported random forest parameters.
"""
supportedFeatureSubsetStrategies = ["auto", "all", "onethird", "sqrt", "log2"]
numTrees = Param(Params._dummy(), "numTrees", "Number of trees to train (>= 1).",
typeConverter=TypeConverters.toInt)
featureSubsetStrategy = \
Param(Params._dummy(), "featureSubsetStrategy",
"The number of features to consider for splits at each tree node. Supported " +
"options: " + ", ".join(supportedFeatureSubsetStrategies) + " (0.0-1.0], [1-n].",
typeConverter=TypeConverters.toString)
def __init__(self):
super(RandomForestParams, self).__init__()
@since("1.4.0")
def setNumTrees(self, value):
"""
Sets the value of :py:attr:`numTrees`.
"""
return self._set(numTrees=value)
@since("1.4.0")
def getNumTrees(self):
"""
Gets the value of numTrees or its default value.
"""
return self.getOrDefault(self.numTrees)
@since("1.4.0")
def setFeatureSubsetStrategy(self, value):
"""
Sets the value of :py:attr:`featureSubsetStrategy`.
"""
return self._set(featureSubsetStrategy=value)
@since("1.4.0")
def getFeatureSubsetStrategy(self):
"""
Gets the value of featureSubsetStrategy or its default value.
"""
return self.getOrDefault(self.featureSubsetStrategy)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
"""
supportedLossTypes = ["squared", "absolute"]
@inherit_doc
class DecisionTreeRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
DecisionTreeParams, TreeRegressorParams, HasCheckpointInterval,
HasSeed, JavaMLWritable, JavaMLReadable, HasVarianceCol):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> dt = DecisionTreeRegressor(maxDepth=2, varianceCol="variance")
>>> model = dt.fit(df)
>>> model.depth
1
>>> model.numNodes
3
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtr_path = temp_path + "/dtr"
>>> dt.save(dtr_path)
>>> dt2 = DecisionTreeRegressor.load(dtr_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtr_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeRegressionModel.load(model_path)
>>> model.numNodes == model2.numNodes
True
>>> model.depth == model2.depth
True
>>> model.transform(test1).head().variance
0.0
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="variance",
seed=None, varianceCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
"""
super(DecisionTreeRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.DecisionTreeRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", seed=None, varianceCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", seed=None, varianceCol=None)
Sets params for the DecisionTreeRegressor.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeRegressionModel(java_model)
@inherit_doc
class DecisionTreeModel(JavaModel, JavaPredictionModel):
"""
Abstraction for Decision Tree models.
.. versionadded:: 1.5.0
"""
@property
@since("1.5.0")
def numNodes(self):
"""Return number of nodes of the decision tree."""
return self._call_java("numNodes")
@property
@since("1.5.0")
def depth(self):
"""Return depth of the decision tree."""
return self._call_java("depth")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class TreeEnsembleModel(JavaModel):
"""
(private abstraction)
Represents a tree ensemble model.
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def getNumTrees(self):
"""Number of trees in ensemble."""
return self._call_java("getNumTrees")
@property
@since("1.5.0")
def treeWeights(self):
"""Return the weights for each tree"""
return list(self._call_java("javaTreeWeights"))
@property
@since("2.0.0")
def totalNumNodes(self):
"""Total number of nodes, summed over all trees in the ensemble."""
return self._call_java("totalNumNodes")
@property
@since("2.0.0")
def toDebugString(self):
"""Full description of model."""
return self._call_java("toDebugString")
def __repr__(self):
return self._call_java("toString")
@inherit_doc
class DecisionTreeRegressionModel(DecisionTreeModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`DecisionTreeRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
Note: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestRegressor`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
RandomForestParams, TreeRegressorParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> rf = RandomForestRegressor(numTrees=2, maxDepth=2, seed=42)
>>> model = rf.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> model.numFeatures
1
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
>>> model.getNumTrees
2
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
0.5
>>> rfr_path = temp_path + "/rfr"
>>> rf.save(rfr_path)
>>> rf2 = RandomForestRegressor.load(rfr_path)
>>> rf2.getNumTrees()
2
>>> model_path = temp_path + "/rfr_model"
>>> model.save(model_path)
>>> model2 = RandomForestRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
"""
super(RandomForestRegressor, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.RandomForestRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, numTrees=20,
featureSubsetStrategy="auto")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20,
featureSubsetStrategy="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
impurity="variance", subsamplingRate=1.0, seed=None, numTrees=20, \
featureSubsetStrategy="auto")
Sets params for linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestRegressionModel(java_model)
class RandomForestRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by :class:`RandomForestRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@inherit_doc
class GBTRegressor(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable, TreeRegressorParams):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for regression.
It supports both continuous and categorical features.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> gbt = GBTRegressor(maxIter=5, maxDepth=2, seed=42)
>>> print(gbt.getImpurity())
variance
>>> model = gbt.fit(df)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> gbtr_path = temp_path + "gbtr"
>>> gbt.save(gbtr_path)
>>> gbt2 = GBTRegressor.load(gbtr_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtr_model"
>>> model.save(model_path)
>>> model2 = GBTRegressionModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impurity="variance"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
"""
super(GBTRegressor, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.regression.GBTRegressor", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1,
impurity="variance")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0,
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None,
impuriy="variance"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, subsamplingRate=1.0, \
checkpointInterval=10, lossType="squared", maxIter=20, stepSize=0.1, seed=None, \
impurity="variance")
Sets params for Gradient Boosted Tree Regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTRegressionModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTRegressionModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by :class:`GBTRegressor`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeRegressionModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class AFTSurvivalRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasAggregationDepth,
JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Accelerated Failure Time (AFT) Model Survival Regression
Fit a parametric AFT survival regression model based on the Weibull distribution
of the survival time.
.. seealso:: `AFT Model <https://en.wikipedia.org/wiki/Accelerated_failure_time_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0), 1.0),
... (0.0, Vectors.sparse(1, [], []), 0.0)], ["label", "features", "censor"])
>>> aftsr = AFTSurvivalRegression()
>>> model = aftsr.fit(df)
>>> model.predict(Vectors.dense(6.3))
1.0
>>> model.predictQuantiles(Vectors.dense(6.3))
DenseVector([0.0101, 0.0513, 0.1054, 0.2877, 0.6931, 1.3863, 2.3026, 2.9957, 4.6052])
>>> model.transform(df).show()
+-----+---------+------+----------+
|label| features|censor|prediction|
+-----+---------+------+----------+
| 1.0| [1.0]| 1.0| 1.0|
| 0.0|(1,[],[])| 0.0| 1.0|
+-----+---------+------+----------+
...
>>> aftsr_path = temp_path + "/aftsr"
>>> aftsr.save(aftsr_path)
>>> aftsr2 = AFTSurvivalRegression.load(aftsr_path)
>>> aftsr2.getMaxIter()
100
>>> model_path = temp_path + "/aftsr_model"
>>> model.save(model_path)
>>> model2 = AFTSurvivalRegressionModel.load(model_path)
>>> model.coefficients == model2.coefficients
True
>>> model.intercept == model2.intercept
True
>>> model.scale == model2.scale
True
.. versionadded:: 1.6.0
"""
censorCol = Param(Params._dummy(), "censorCol",
"censor column name. The value of this column could be 0 or 1. " +
"If the value is 1, it means the event has occurred i.e. " +
"uncensored; otherwise censored.", typeConverter=TypeConverters.toString)
quantileProbabilities = \
Param(Params._dummy(), "quantileProbabilities",
"quantile probabilities array. Values of the quantile probabilities array " +
"should be in the range (0, 1) and the array should be non-empty.",
typeConverter=TypeConverters.toListFloat)
quantilesCol = Param(Params._dummy(), "quantilesCol",
"quantiles column name. This column will output quantiles of " +
"corresponding quantileProbabilities if it is set.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2)
"""
super(AFTSurvivalRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.AFTSurvivalRegression", self.uid)
self._setDefault(censorCol="censor",
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99],
maxIter=100, tol=1E-6)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor",
quantileProbabilities=list([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]),
quantilesCol=None, aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
fitIntercept=True, maxIter=100, tol=1E-6, censorCol="censor", \
quantileProbabilities=[0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99], \
quantilesCol=None, aggregationDepth=2):
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return AFTSurvivalRegressionModel(java_model)
@since("1.6.0")
def setCensorCol(self, value):
"""
Sets the value of :py:attr:`censorCol`.
"""
return self._set(censorCol=value)
@since("1.6.0")
def getCensorCol(self):
"""
Gets the value of censorCol or its default value.
"""
return self.getOrDefault(self.censorCol)
@since("1.6.0")
def setQuantileProbabilities(self, value):
"""
Sets the value of :py:attr:`quantileProbabilities`.
"""
return self._set(quantileProbabilities=value)
@since("1.6.0")
def getQuantileProbabilities(self):
"""
Gets the value of quantileProbabilities or its default value.
"""
return self.getOrDefault(self.quantileProbabilities)
@since("1.6.0")
def setQuantilesCol(self, value):
"""
Sets the value of :py:attr:`quantilesCol`.
"""
return self._set(quantilesCol=value)
@since("1.6.0")
def getQuantilesCol(self):
"""
Gets the value of quantilesCol or its default value.
"""
return self.getOrDefault(self.quantilesCol)
class AFTSurvivalRegressionModel(JavaModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`AFTSurvivalRegression`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("1.6.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("1.6.0")
def scale(self):
"""
Model scale paramter.
"""
return self._call_java("scale")
@since("2.0.0")
def predictQuantiles(self, features):
"""
Predicted Quantiles
"""
return self._call_java("predictQuantiles", features)
@since("2.0.0")
def predict(self, features):
"""
Predicted value
"""
return self._call_java("predict", features)
@inherit_doc
class GeneralizedLinearRegression(JavaEstimator, HasLabelCol, HasFeaturesCol, HasPredictionCol,
HasFitIntercept, HasMaxIter, HasTol, HasRegParam, HasWeightCol,
HasSolver, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Generalized Linear Regression.
Fit a Generalized Linear Model specified by giving a symbolic description of the linear
predictor (link function) and a description of the error distribution (family). It supports
"gaussian", "binomial", "poisson" and "gamma" as family. Valid link functions for each family
is listed below. The first link function of each family is the default one.
* "gaussian" -> "identity", "log", "inverse"
* "binomial" -> "logit", "probit", "cloglog"
* "poisson" -> "log", "identity", "sqrt"
* "gamma" -> "inverse", "identity", "log"
.. seealso:: `GLM <https://en.wikipedia.org/wiki/Generalized_linear_model>`_
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(0.0, 0.0)),
... (1.0, Vectors.dense(1.0, 2.0)),
... (2.0, Vectors.dense(0.0, 0.0)),
... (2.0, Vectors.dense(1.0, 1.0)),], ["label", "features"])
>>> glr = GeneralizedLinearRegression(family="gaussian", link="identity", linkPredictionCol="p")
>>> model = glr.fit(df)
>>> transformed = model.transform(df)
>>> abs(transformed.head().prediction - 1.5) < 0.001
True
>>> abs(transformed.head().p - 1.5) < 0.001
True
>>> model.coefficients
DenseVector([1.5..., -1.0...])
>>> model.numFeatures
2
>>> abs(model.intercept - 1.5) < 0.001
True
>>> glr_path = temp_path + "/glr"
>>> glr.save(glr_path)
>>> glr2 = GeneralizedLinearRegression.load(glr_path)
>>> glr.getFamily() == glr2.getFamily()
True
>>> model_path = temp_path + "/glr_model"
>>> model.save(model_path)
>>> model2 = GeneralizedLinearRegressionModel.load(model_path)
>>> model.intercept == model2.intercept
True
>>> model.coefficients[0] == model2.coefficients[0]
True
.. versionadded:: 2.0.0
"""
family = Param(Params._dummy(), "family", "The name of family which is a description of " +
"the error distribution to be used in the model. Supported options: " +
"gaussian (default), binomial, poisson and gamma.",
typeConverter=TypeConverters.toString)
link = Param(Params._dummy(), "link", "The name of link function which provides the " +
"relationship between the linear predictor and the mean of the distribution " +
"function. Supported options: identity, log, inverse, logit, probit, cloglog " +
"and sqrt.", typeConverter=TypeConverters.toString)
linkPredictionCol = Param(Params._dummy(), "linkPredictionCol", "link prediction (linear " +
"predictor) column name", typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None):
"""
__init__(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None)
"""
super(GeneralizedLinearRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.regression.GeneralizedLinearRegression", self.uid)
self._setDefault(family="gaussian", maxIter=25, tol=1e-6, regParam=0.0, solver="irls")
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction",
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6,
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None):
"""
setParams(self, labelCol="label", featuresCol="features", predictionCol="prediction", \
family="gaussian", link=None, fitIntercept=True, maxIter=25, tol=1e-6, \
regParam=0.0, weightCol=None, solver="irls", linkPredictionCol=None)
Sets params for generalized linear regression.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GeneralizedLinearRegressionModel(java_model)
@since("2.0.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.0.0")
def getFamily(self):
"""
Gets the value of family or its default value.
"""
return self.getOrDefault(self.family)
@since("2.0.0")
def setLinkPredictionCol(self, value):
"""
Sets the value of :py:attr:`linkPredictionCol`.
"""
return self._set(linkPredictionCol=value)
@since("2.0.0")
def getLinkPredictionCol(self):
"""
Gets the value of linkPredictionCol or its default value.
"""
return self.getOrDefault(self.linkPredictionCol)
@since("2.0.0")
def setLink(self, value):
"""
Sets the value of :py:attr:`link`.
"""
return self._set(link=value)
@since("2.0.0")
def getLink(self):
"""
Gets the value of link or its default value.
"""
return self.getOrDefault(self.link)
class GeneralizedLinearRegressionModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
.. note:: Experimental
Model fitted by :class:`GeneralizedLinearRegression`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients.
"""
return self._call_java("coefficients")
@property
@since("2.0.0")
def intercept(self):
"""
Model intercept.
"""
return self._call_java("intercept")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. residuals, deviance, pValues) of model on
training set. An exception is thrown if
`trainingSummary is None`.
"""
java_glrt_summary = self._call_java("summary")
return GeneralizedLinearRegressionTrainingSummary(java_glrt_summary)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_glr_summary = self._call_java("evaluate", dataset)
return GeneralizedLinearRegressionSummary(java_glr_summary)
class GeneralizedLinearRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Generalized linear regression results evaluated on a dataset.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Predictions output by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def predictionCol(self):
"""
Field in :py:attr:`predictions` which gives the predicted value of each instance.
This is set to a new column name if the original model's `predictionCol` is not set.
"""
return self._call_java("predictionCol")
@property
@since("2.0.0")
def rank(self):
"""
The numeric rank of the fitted linear model.
"""
return self._call_java("rank")
@property
@since("2.0.0")
def degreesOfFreedom(self):
"""
Degrees of freedom.
"""
return self._call_java("degreesOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedom(self):
"""
The residual degrees of freedom.
"""
return self._call_java("residualDegreeOfFreedom")
@property
@since("2.0.0")
def residualDegreeOfFreedomNull(self):
"""
The residual degrees of freedom for the null model.
"""
return self._call_java("residualDegreeOfFreedomNull")
@since("2.0.0")
def residuals(self, residualsType="deviance"):
"""
Get the residuals of the fitted model by type.
:param residualsType: The type of residuals which should be returned.
Supported options: deviance (default), pearson, working, and response.
"""
return self._call_java("residuals", residualsType)
@property
@since("2.0.0")
def nullDeviance(self):
"""
The deviance for the null model.
"""
return self._call_java("nullDeviance")
@property
@since("2.0.0")
def deviance(self):
"""
The deviance for the fitted model.
"""
return self._call_java("deviance")
@property
@since("2.0.0")
def dispersion(self):
"""
The dispersion of the fitted model.
It is taken as 1.0 for the "binomial" and "poisson" families, and otherwise
estimated by the residual Pearson's Chi-Squared statistic (which is defined as
sum of the squares of the Pearson residuals) divided by the residual degrees of freedom.
"""
return self._call_java("dispersion")
@property
@since("2.0.0")
def aic(self):
"""
Akaike's "An Information Criterion"(AIC) for the fitted model.
"""
return self._call_java("aic")
@inherit_doc
class GeneralizedLinearRegressionTrainingSummary(GeneralizedLinearRegressionSummary):
"""
.. note:: Experimental
Generalized linear regression training results.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def numIterations(self):
"""
Number of training iterations.
"""
return self._call_java("numIterations")
@property
@since("2.0.0")
def solver(self):
"""
The numeric solver used for training.
"""
return self._call_java("solver")
@property
@since("2.0.0")
def coefficientStandardErrors(self):
"""
Standard error of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("coefficientStandardErrors")
@property
@since("2.0.0")
def tValues(self):
"""
T-statistic of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("tValues")
@property
@since("2.0.0")
def pValues(self):
"""
Two-sided p-value of estimated coefficients and intercept.
If :py:attr:`GeneralizedLinearRegression.fitIntercept` is set to True,
then the last element returned corresponds to the intercept.
"""
return self._call_java("pValues")
if __name__ == "__main__":
import doctest
import pyspark.ml.regression
from pyspark.sql import SparkSession
globs = pyspark.ml.regression.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.regression tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| likithkailas/StreamingSystems | python/pyspark/ml/regression.py | Python | apache-2.0 | 58,760 | [
"Gaussian"
] | 5178fd0b3f8ae3102682bf7bab8c1eb881672d1cf3bb0427cc64b6296ce1e9ae |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from scipy.spatial import ConvexHull, Voronoi
from .volume import Volume
__author__ = 'Yuji Ikeda'
__version__ = '0.1.0'
class VolumeVoronoi(Volume):
def generate_atomic_volume(self, prec=1e-6):
atoms = self._atoms
cell = atoms.get_cell()
natoms = atoms.get_number_of_atoms()
scaled_positions = atoms.get_scaled_positions()
expansion = [
[ 0, 0, 0],
[ 0, 0, 1],
[ 0, 0, -1],
[ 0, 1, 0],
[ 0, 1, 1],
[ 0, 1, -1],
[ 0, -1, 0],
[ 0, -1, 1],
[ 0, -1, -1],
[ 1, 0, 0],
[ 1, 0, 1],
[ 1, 0, -1],
[ 1, 1, 0],
[ 1, 1, 1],
[ 1, 1, -1],
[ 1, -1, 0],
[ 1, -1, 1],
[ 1, -1, -1],
[-1, 0, 0],
[-1, 0, 1],
[-1, 0, -1],
[-1, 1, 0],
[-1, 1, 1],
[-1, 1, -1],
[-1, -1, 0],
[-1, -1, 1],
[-1, -1, -1],
]
expansion = np.array(expansion)
scaled_positions_expanded = np.reshape(
scaled_positions[None, None, :, :] + expansion[None, :, None, :],
(-1, 3))
positions_expanded = np.dot(scaled_positions_expanded, cell)
voronoi = Voronoi(positions_expanded)
volumes_atom = np.full(natoms, np.nan)
for i in range(natoms):
j = voronoi.point_region[i]
region = voronoi.regions[j]
if np.all(np.asarray(region) >= 0):
vertices = voronoi.vertices[region]
# Since a Voronoi cell is always a convex polyhedron,
# we can obtain it's volume using ConvexHull.
# https://stackoverflow.com/questions/17129115
volume = ConvexHull(vertices).volume
volumes_atom[i] = volume
else:
raise ValueError('Region includes infinite point')
self._data['volume'] = volumes_atom
def _create_header(self):
return ''
def _create_filename(self):
return 'atomic_volume.dat'
def main():
import argparse
from phonopy.interface.vasp import read_vasp
parser = argparse.ArgumentParser()
parser.add_argument('atoms',
type=str,
help="POSCAR")
args = parser.parse_args()
atoms = read_vasp(args.atoms)
VolumeVoronoi(atoms).run()
if __name__ == '__main__':
main()
| yuzie007/ph_analysis | ph_analysis/structure/volume_voronoi.py | Python | mit | 2,740 | [
"VASP",
"phonopy"
] | c32db69598fa14774e5d4b55c1ada5a161ff1914b9993b150d0b617461f28c9f |
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import print_function, division
import os
import pkg_resources
import numpy as np
import unittest
from molmod import centimeter, amu, kjmol, joule, mol, kelvin, lightspeed, boltzmann
from molmod.periodic import periodic
from molmod.io import XYZFile
from molmod.test.common import tmpdir
from tamkin import *
__all__ = ["RotorTestCase"]
class RotorTestCase(unittest.TestCase):
def assertArraysAlmostEqual(self, a, b, eps=1e-5, relative=False):
assert isinstance(b, np.ndarray)
self.assertEqual(a.shape, b.shape)
if relative:
assert abs(2*(a-b)/(a+b)).max() <= eps
else:
assert abs(a-b).max() <= eps
def test_potential_op(self):
a = 10.0
hb = HarmonicBasis(3, a)
coeffs = np.arange(6, dtype=float)*2
op = hb.get_empty_op()
hb._add_potential_op(op, coeffs)
self.assertAlmostEqual(op[0,0], coeffs[2]/np.sqrt(2*a))
self.assertAlmostEqual(op[0,1], coeffs[3]/np.sqrt(2*a))
self.assertAlmostEqual(op[1,0], coeffs[3]/np.sqrt(2*a))
self.assertAlmostEqual(op[1,1], -coeffs[2]/np.sqrt(2*a))
def test_eval_fn(self):
a = 10.0
hb = HarmonicBasis(3, a)
grid = np.arange(0.0, 10.01, 0.1)
fn = hb.eval_fn(grid, [0,0,0,0,0,3,0])
expected = 3*np.cos(grid*6.0*np.pi/a)/np.sqrt(a/2)
self.assertArraysAlmostEqual(fn, expected)
fn = hb.eval_fn(grid, [0,0,0,0,2,0,0])
expected = 2*np.sin(grid*4.0*np.pi/a)/np.sqrt(a/2)
self.assertArraysAlmostEqual(fn, expected)
def test_eval_deriv(self):
a = 10.0
hb = HarmonicBasis(3, a)
grid = np.arange(0.0, 10.01, 1.0)
coeffs = [-0.5,1.2,2.3,-0.7,0.1,0.3,-1.0]
eps = 1e-6
aderiv = hb.eval_deriv(grid, coeffs)
nderiv = (hb.eval_fn(grid+eps, coeffs) - hb.eval_fn(grid-eps, coeffs))/(2*eps)
self.assertArraysAlmostEqual(aderiv, nderiv)
def test_eval_deriv2(self):
a = 10.0
hb = HarmonicBasis(3, a)
grid = np.arange(0.0, 10.01, 1.0)
coeffs = [-0.5,1.2,2.3,-0.7,0.1,0.3,-1.0]
eps = 1e-6
aderiv2 = hb.eval_deriv2(grid, coeffs)
nderiv2 = (hb.eval_deriv(grid+eps, coeffs) - hb.eval_deriv(grid-eps, coeffs))/(2*eps)
self.assertArraysAlmostEqual(aderiv2, nderiv2)
def test_fit_fn(self):
a = 10.0
hb = HarmonicBasis(10, a)
grid = np.arange(0.0, 10.01, 1.0)
f = np.exp(-((grid-5)/2)**2)
coeffs = hb.fit_fn(grid, f, 10)
g = hb.eval_fn(grid, coeffs)
self.assertArraysAlmostEqual(f, g)
def test_fit_fn_sym(self):
a = 9.0
hb = HarmonicBasis(90, a)
grid = np.arange(0.0, 1.501, 0.1)
f = np.exp(-(grid/2)**2)
f -= f.mean()
coeffs = hb.fit_fn(grid, f, 30, rotsym=3, even=True)
g = hb.eval_fn(grid, coeffs)
self.assertArraysAlmostEqual(f, g)
grid = np.arange(0.0, 9.001, 0.1)
g = hb.eval_fn(grid, coeffs)
self.assertArraysAlmostEqual(f, g[0:16])
self.assertArraysAlmostEqual(f, g[30:46])
self.assertArraysAlmostEqual(f, g[60:76])
self.assertArraysAlmostEqual(f[::-1], g[15:31])
self.assertArraysAlmostEqual(f[::-1], g[45:61])
self.assertArraysAlmostEqual(f[::-1], g[75:91])
import matplotlib.pyplot as pt
pt.clf()
pt.plot(grid, g, "k-", lw=2)
pt.plot(grid[:16], f, "rx", mew=2)
with tmpdir(__name__, 'test_fit_fn_sym') as dn:
pt.savefig(os.path.join(dn, "test_fit_fn_sym.png"))
def test_potential_op(self):
a = 10.0
mass = 1.0
nmax = 10
v_exp = np.zeros(nmax+1, complex)
v_exp[0] = np.random.normal(0,1)
v_exp[1:] += np.random.normal(0,1,nmax)
v_exp[1:] += 1j*np.random.normal(0,1,nmax)
#v_exp[3] = 1.0
def get_v(index):
if index>nmax or -index>nmax:
return 0
elif index>=0:
return v_exp[index]
else:
return np.conjugate(v_exp[-index])
v_op_exp = np.zeros((2*nmax+1,2*nmax+1), complex)
for i0 in range(2*nmax+1):
k0 = ((i0-1)//2+1)*(2*(i0%2)-1)
for i1 in range(2*nmax+1):
k1 = ((i1-1)//2+1)*(2*(i1%2)-1)
#print (i0,i1), (k0,k1), k0-k1
v_op_exp[i0,i1] = get_v(k0-k1)/np.sqrt(a)
#for row in v_op_exp:
# print "".join({True: " ", False: "X"}[v==0] for v in row)
hb = HarmonicBasis(nmax, a)
v_cs = np.zeros(2*nmax+1, float)
v_cs[0] = v_exp.real[0]
v_cs[1::2] = np.sqrt(2.0)*v_exp.real[1:]
v_cs[2::2] = -np.sqrt(2.0)*v_exp.imag[1:]
v_op_cs = hb.get_empty_op()
hb._add_potential_op(v_op_cs, v_cs)
lc = np.array([
[1.0, -1.0j],
[1.0, 1.0j],
])/np.sqrt(2)
lc_dagger = lc.transpose().conjugate()
for i0 in range(nmax):
for i1 in range(nmax):
check = np.dot(lc_dagger, np.dot(v_op_exp[2*i0+1:2*i0+3,2*i1+1:2*i1+3], lc))
assert abs(check.imag).max() < 1e-3
check = check.real
self.assertArraysAlmostEqual(
v_op_cs[2*i0+1:2*i0+3,2*i1+1:2*i1+3],
check,
1e-3
)
def test_flat(self):
a = 10.0
mass = 1.0
hb = HarmonicBasis(10, a)
energies, orbitals = hb.solve(mass, np.zeros(hb.size), evecs=True)
import matplotlib.pyplot as pt
x = np.arange(0.0, a, 0.001)
pt.clf()
for i in range(10):
f = hb.eval_fn(x, orbitals[:,i])
pt.plot(x, f+i)
with tmpdir(__name__, 'test_flat') as dn:
pt.savefig(os.path.join(dn, "flat_wavefunctions.png"))
indexes = np.array([0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10])
expected = 0.5/mass*(2*indexes*np.pi/a)**2
self.assertArraysAlmostEqual(energies, expected, 1e-4)
def test_flat2(self):
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/ethane/gaussian.fchk"))
nma = NMA(molecule)
rotscan1 = load_rotscan_g03log(
pkg_resources.resource_filename("tamkin", "data/test/rotor/gaussian.log"))
my_potential = rotscan1.potential.copy()
my_potential[1][:] = nma.energy
rotscan1 = rotscan1.copy_with(potential=my_potential)
rotor1 = Rotor(rotscan1, molecule, rotsym=3, even=True)
self.assertAlmostEqual(rotor1.cancel_freq/lightspeed*centimeter, 314, 0)
pf1 = PartFun(nma, [ExtTrans(), ExtRot(6), rotor1])
rotscan2 = RotScan(rotscan1.dihedral, top_indexes=rotscan1.top_indexes)
rotor2 = Rotor(rotscan2, molecule, rotsym=3, even=True)
pf2 = PartFun(nma, [ExtTrans(), ExtRot(6), rotor2])
self.assertArraysAlmostEqual(rotor1.energy_levels, rotor2.energy_levels)
def test_harmonic(self):
a = 20.0
hb = HarmonicBasis(20, a)
x = np.arange(0.0, a, 0.1)
v = 0.5*(x-a/2)**2
#v = 5*(1+np.cos(2*np.pi*x/a))**2
v_coeffs = hb.fit_fn(x, v, 20, even=True, v_threshold=0.1)
energies, orbitals = hb.solve(1, v_coeffs, evecs=True)
expected = np.arange(10) + 0.5
self.assertAlmostEqual(energies[0], 0.5, 1)
self.assertAlmostEqual(energies[1], 1.5, 1)
self.assertAlmostEqual(energies[2], 2.5, 1)
self.assertAlmostEqual(energies[3], 3.5, 1)
self.assertAlmostEqual(energies[4], 4.5, 1)
self.assertAlmostEqual(energies[5], 5.5, 1)
self.assertAlmostEqual(energies[6], 6.5, 1)
import matplotlib.pyplot as pt
x = np.arange(0.0, a, 0.001)
pt.clf()
for i in range(10):
f = hb.eval_fn(x, orbitals[:,i])
pt.plot(x, f)
with tmpdir(__name__, 'test_harmonic1') as dn:
pt.savefig(os.path.join(dn, "harmonic_wavefunctions.png"))
pt.clf()
v = hb.eval_fn(x, v_coeffs)
pt.plot(x, v)
for energy in energies[:10]:
pt.axhline(energy)
pt.xlim(0,a)
with tmpdir(__name__, 'test_harmonic2') as dn:
pt.savefig(os.path.join(dn, "harmonic_levels.png"))
def test_ethane_hindered(self):
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/ethane/gaussian.fchk"))
nma = NMA(molecule)
rot_scan = load_rotscan_g03log(
pkg_resources.resource_filename("tamkin", "data/test/rotor/gaussian.log"))
rotor = Rotor(rot_scan, molecule, rotsym=3, even=True, cancel_freq='scan')
pf = PartFun(nma, [ExtTrans(), ExtRot(6), rotor])
self.assertAlmostEqual(rotor.cancel_freq/lightspeed*centimeter, 298, 0)
rotor = Rotor(rot_scan, molecule, rotsym=3, even=True)
self.assertAlmostEqual(rotor.cancel_freq/lightspeed*centimeter, 314, 0)
pf = PartFun(nma, [ExtTrans(), ExtRot(6), rotor])
self.assertArraysAlmostEqual(
rotor.hb.eval_fn(rot_scan.potential[0], rotor.v_coeffs),
rot_scan.potential[1] - rot_scan.potential[1].min(), 1e-4
)
# reference data from legacy code (Veronique & co)
self.assertAlmostEqual(rotor.moment/amu, 11.092362911176032, 2)
self.assertAlmostEqual(rotor.reduced_moment/amu, 5.5461814555880098, 2)
self.assertAlmostEqual(np.exp(rotor.log_terms(100.0)[1]), 0.12208E+00, 1)
self.assertAlmostEqual(rotor.heat_capacity_terms(100.0)[1]/(joule/mol/kelvin), 2.567, 0)
self.assertAlmostEqual(rotor.entropy_terms(100.0)[1]/(joule/mol), 0.766, 0)
self.assertAlmostEqual(np.exp(rotor.log_terms(800.0)[1]), 0.21108E+01, 1)
self.assertAlmostEqual(rotor.heat_capacity_terms(800.0)[1]/(joule/mol/kelvin), 6.346, 1)
self.assertAlmostEqual(rotor.entropy_terms(800.0)[1]/(joule/mol), 14.824, 1)
with tmpdir(__name__, 'test_ethane_hindered') as dn:
rotor.plot_levels(os.path.join(dn, "ethane_hindered_levels.png"), 300)
pf.write_to_file(os.path.join(dn, "ethane_hindered.txt"))
ta = ThermoAnalysis(pf, [200,300,400,500,600,700,800,900])
ta.write_to_file(os.path.join(dn, "ethane_hindered_thermo.csv"))
def test_ethyl_free(self):
molecule = load_molecule_g03fchk(
pkg_resources.resource_filename("tamkin", "data/test/ethyl/gaussian.fchk"))
nma = NMA(molecule)
dihedral = [5, 1, 0, 2]
rot_scan = RotScan(dihedral, molecule)
rotor = Rotor(rot_scan, molecule, rotsym=6, even=True)
self.assertAlmostEqual(rotor.cancel_freq/lightspeed*centimeter, 141.2, 0)
pf = PartFun(nma, [ExtTrans(), ExtRot(1), rotor])
# reference data from legacy code (Veronique & co)
self.assertAlmostEqual(rotor.reduced_moment/amu, 4.007, 1)
self.assertAlmostEqual(np.exp(rotor.log_terms(100.0)[1]), 0.6386, 1)
self.assertAlmostEqual(np.exp(-rotor.log_terms(100.0)[0]), 0.4168, 1)
self.assertAlmostEqual(np.exp(rotor.log_terms(800.0)[1]), 1.8062, 1)
self.assertAlmostEqual(np.exp(-rotor.log_terms(800.0)[0]), 3.9273, 1)
with tmpdir(__name__, 'test_ethyl_free') as dn:
rotor.plot_levels(os.path.join(dn, "ethyl_free_levels.png"), 300)
pf.write_to_file(os.path.join(dn, "ethyl_free.txt"))
ta = ThermoAnalysis(pf, [200,300,400,500,600,700,800,900])
ta.write_to_file(os.path.join(dn, "ethyl_free_thermo.csv"))
def test_imoms(self):
cases = [
("caffeine.xyz", 2, 11, [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 18, 19, 20, 21, 22, 23], (2598.9923066760343, 11.373318286792710)),
("caffeine.xyz", 2, 11, [16, 17, 15], (11.427609412414192, 11.373318286796099)),
("caffeine.xyz", 3, 12, [0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 21, 22, 23], (3874.9262249281255, 11.489403874005802)),
("caffeine.xyz", 3, 12, [20, 18, 19], (11.554047706686680, 11.489402424437612)),
("caffeine.xyz", 4, 13, [0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20], (2298.5324430380965, 11.334929532798469)),
("caffeine.xyz", 4, 13, [23, 22, 21], (11.394908129049933, 11.334928102722181)),
("ethane.xyz", 0, 1, [2, 3, 4], (11.330123438245337, 5.6648361869614678)),
("ethane.xyz", 0, 1, [5, 6, 7], (11.330123438245337, 5.6648361869614661)),
("glycerol.xyz", 0, 3, [11], (3.0794510843017311, 3.0113070937447430)),
("glycerol.xyz", 0, 3, [1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13], (951.85671473731713, 3.0113074736677845)),
("glycerol.xyz", 1, 4, [0, 2, 3, 5, 6, 7, 8, 9, 10, 11, 13], (1072.5177006846639, 2.9828627310014326)),
("glycerol.xyz", 1, 4, [12], (3.0988467514954592, 2.9828627310015023)),
("glycerol.xyz", 2, 5, [0, 1, 3, 4, 6, 7, 8, 9, 10, 11, 12], (1071.1143603815583, 2.9517497493009159)),
("glycerol.xyz", 2, 5, [13], (3.1115917762553726, 2.9517493768918146)),
("glycerol.xyz", 3, 4, [0, 2, 5, 6, 9, 10, 11, 13], (370.75539459124985, 61.588976994367783)),
("glycerol.xyz", 3, 4, [8, 1, 12, 7], (124.71612061985820, 61.588969223953136)),
("glycerol.xyz", 3, 5, [0, 1, 4, 6, 7, 8, 11, 12], (352.35483251604194, 55.690249341790206)),
("glycerol.xyz", 3, 5, [9, 2, 10, 13], (116.03080804859955, 55.690242315592506)),
("nicotine.xyz", 0, 7, [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, 23, 24, 25], (4653.1199884792477, 11.230510638276883)),
("nicotine.xyz", 0, 7, [19, 20, 21], (11.272810801992463, 11.230510638275103)),
("nicotine.xyz", 2, 6, [0, 3, 4, 5, 7, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], (846.76088427036848, 208.26235559926022)),
("nicotine.xyz", 2, 6, [1, 8, 9, 10, 11, 22, 23, 24, 25], (307.24370887361914, 208.26235559925976)),
("peroxide.xyz", 0, 1, [2], (3.3679370303334704, 1.5747785415198767)),
("peroxide.xyz", 0, 1, [3], (3.3679004879866139, 1.5747785415198774)),
]
from molmod.io.xyz import XYZFile
for fn_xyz, i0, i1, top, expected in cases:
# preparation
mol = XYZFile(pkg_resources.resource_filename(
"tamkin", os.path.join("data/test/imom", fn_xyz))).get_molecule()
masses = np.array([periodic[n].mass for n in mol.numbers])
masses3 = np.array([masses, masses, masses]).transpose().ravel()
center = mol.coordinates[i0]
axis = mol.coordinates[i1] - mol.coordinates[i0]
axis /= np.linalg.norm(axis)
# trivial computation of absolute moment
mom = 0.0
for i in top:
delta = mol.coordinates[i] - center
delta -= axis*np.dot(axis, delta)
mom += masses[i]*np.linalg.norm(delta)**2
self.assertAlmostEqual(mom/amu, expected[0], 2)
# check tamkin routine
mom, redmom = compute_moments(mol.coordinates, masses3, center, axis, top)
self.assertAlmostEqual(mom/amu, expected[0], 2)
self.assertAlmostEqual(redmom/amu, expected[1], 2)
def test_legacy1(self):
a = 2*np.pi
mass = 5.5*amu
hb = HarmonicBasis(100, a)
v_coeffs = np.zeros(hb.size, float)
v_coeffs[0] = 0.5*11.5*kjmol*np.sqrt(a)
v_coeffs[5] = 0.5*11.5*kjmol*np.sqrt(a/2)
self.assertArraysAlmostEqual(
hb.eval_fn(np.array([0, a/6]), v_coeffs),
np.array([11.5*kjmol, 0.0]),
)
energies, orbitals = hb.solve(mass, v_coeffs, evecs=True)
import matplotlib.pyplot as pt
x = np.arange(0.0, a, 0.001)
pt.clf()
for i in range(10):
f = hb.eval_fn(x, orbitals[:,i])
pt.plot(x, f+i)
with tmpdir(__name__, 'test_legacy1') as dn:
pt.savefig(os.path.join(dn, "legacy_wavefunctions.png"))
# check energy levels
expected = np.array([
1.7635118, 1.76361979, 5.11795465, 8.04553104, 8.1095722,
10.3876796, 11.8999683, 12.9078395, 14.6739639, 16.7836847,
19.1722507,
1.76361979, 5.11795465, 5.12218335, 8.1095722, 10.3876796,
10.8661504, 12.9078395, 14.6739639, 16.7544718, 19.1722507,
])
expected.sort()
self.assertArraysAlmostEqual(energies[:10]/kjmol, expected[:10], 1e-3)
self.assertAlmostEqual(np.exp(-energies/(100*boltzmann)).sum()/3.0, 0.12208E+00, 5)
def test_load_rotor_margot(self):
rot_scan = load_rotscan_g03log(
pkg_resources.resource_filename("tamkin", "data/test/rotor/margot.log"))
assert rot_scan.potential.shape == (2, 1)
assert (rot_scan.dihedral == [2, 3, 4, 5]).all()
| molmod/tamkin | tamkin/test/test_rotor.py | Python | gpl-3.0 | 18,551 | [
"Gaussian"
] | 483932d9570b5478ffbf70ed99175706d15b3c32ac464ce6d08fcea8f85cb952 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# BEGIN PYTHON 2/3 COMPATIBILITY BOILERPLATE
from __future__ import absolute_import
from __future__ import with_statement
from __future__ import division
from __future__ import nested_scopes
from __future__ import generators
from __future__ import unicode_literals
from __future__ import print_function
from neurotools.system import *
"""
Commonly used functions
"""
import numpy as np
# Constants: ensure compatibility with float32
# while using highest available accuracy (longdouble)
F32EPS = np.longdouble('7e-45')
F32SAFE = np.sqrt(F32EPS)
F64EPS = np.longdouble('1.4012985e-45')
F64SAFE = np.sqrt(F64EPS)
ZERO128 = np.longdouble('0')
EMAX = np.longdouble(np.sqrt(np.log(np.finfo(np.float64).max)))
F128EMAX = np.sqrt(np.longdouble('11355.52340629414395'))
lgE = np.longdouble('1.442695040888963407359924681001892137426645954152985934135')
pi = np.longdouble('3.141592653589793238462643383279502884197169399375105820974')
tau = np.longdouble('6.283185307179586476925286766559005768394338798750211641949')
e = np.longdouble('2.718281828459045235360287471352662497757247093699959574966')
sqrt2 = np.longdouble('1.414213562373095048801688724209698078569671875376948073176')
sqrttau = np.longdouble('2.506628274631000502415765284811045253006986740609938316629')
invsqrttau = np.longdouble('0.398942280401432677939946059934381868475858631164934657666')
# largest floating point accuracy that scipy.linalg
# can support
LINALGMAXFLOAT = np.float64
def slog(x,eps=F64SAFE,returntype=LINALGMAXFLOAT):
'''
"safe" natural logarithm function, clips values avoiding NaN and inf
'''
return returntype(np.log(np.clip(x,eps,1.7976931348623157e+308)))
def sexp(x,limit=EMAX,returntype=LINALGMAXFLOAT):
'''
"safe" exponential function, clips values avoiding NaN and inf
'''
limit = np.longdouble(limit)
x = np.longdouble(x)
x = np.clip(x,-limit,limit)
return returntype(np.exp(x))
def sigmoid(x,limit=EMAX,returntype=LINALGMAXFLOAT):
'''
sigmoid function 1/(1+exp(-x))
'''
# logaddexp(x1,x2) = log(exp(x1) + exp(x2))
limit = np.longdouble(limit)
x = np.longdouble(x)
x = np.clip(x,-limit,limit)
return returntype(sexp(-np.logaddexp(ZERO128,-np.longdouble(x))))
def inversesigmoid(x,returntype=LINALGMAXFLOAT):
'''
Inverse of sigmoid function 1/(1+exp(-x)), -[log(1-x)+log(x)]
'''
return returntype(slog(x)-slog(1-x))
def dsigmoid(x,returntype=LINALGMAXFLOAT):
'''
Fist derivative of sigmoid
'''
x = np.longdouble(x)
return sexp(\
-np.logaddexp(ZERO128,-x)\
-np.logaddexp(ZERO128,x),
returntype=returntype)
# Sigmoid and derivatives
def g(x,returntype=LINALGMAXFLOAT):
'''
Evaluates g(x)=log(1+exp(x)) as accurately as possible.
'''
return returntype(np.logaddexp(ZERO128,np.longdouble(x)))
def f(x,returntype=LINALGMAXFLOAT):
'''
evaluates f(x)=1/(1+exp(-x)) as accurately as possible
'''
return returntype(sexp(-np.logaddexp(ZERO128,-np.longdouble(x))))
def f1(x,returntype=LINALGMAXFLOAT):
'''
Fist derivative of sigmoid
'''
x = np.longdouble(x)
return sexp(\
-np.logaddexp(ZERO128,-x)\
-np.logaddexp(ZERO128,x),
returntype=returntype)
def f2(x,returntype=LINALGMAXFLOAT):
'''
Second derivative of sigmoid
(q - p) p q
'''
x = np.longdouble(x)
logp = -np.logaddexp(ZERO128,-x)
logq = -np.logaddexp(ZERO128, x)
p = np.exp(np.minimum(F128EMAX,logp))
q = np.exp(np.minimum(F128EMAX,logq))
return returntype((q-p)*q*p);
def npdf(mu,sigma,x):
'''
Univariate Gaussian probability density
Parameters
----------
mu : float, scalar or array-like
Mean(s) of distribution(s)
sigma : float, scalar or array-like
Standard deviation(s) of distribution(s)
x : float, scalar or array-like
Points at which to evaluate distribution(s)
'''
mu = np.array(mu).ravel()
sigma = np.array(sigma).ravel()
x = np.array(x).ravel()
invsigma = 1.0/sigma
x = (x-mu)*invsigma
return (invsqrttau*invsigma) * sexp(-0.5*x**2)
def log_factorial(k):
'''
Returns the logarithm of a factorial by taking the sum of the
logarithms of 1..N. Slow, but numerically more accurate than
taking the logarithm of the factorial or using approximations.
k should be an integer.
'''
return 1 if k<2 else np.sum([np.log(i) for i in range(1,k+1)])
| michaelerule/neurotools | functions.py | Python | gpl-3.0 | 4,609 | [
"Gaussian"
] | 548d0c5d9b8cc2d1e9974926368cd636191dfb2608d1376e756ffb34bcf530b8 |
import unittest
import requests_mock
from frontstage import app
from tests.integration.mocked_services import url_banner_api
class TestCookiesContact(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
@requests_mock.mock()
def test_cookies_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/cookies")
self.assertEqual(response.status_code, 200)
self.assertTrue("Cookies on surveys.ons.gov.uk".encode() in response.data)
self.assertTrue(
"Cookies are small files saved on your phone, tablet or computer when you visit a website".encode()
in response.data
)
@requests_mock.mock()
def test_privacy_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/privacy-and-data-protection")
self.assertEqual(response.status_code, 200)
self.assertTrue("We will keep your information secure and confidential".encode() in response.data)
self.assertTrue("Where can I find out more about how my information will be treated?".encode() in response.data)
@requests_mock.mock()
def test_contact_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
response = self.app.get("/contact-us")
self.assertEqual(response.status_code, 200)
self.assertTrue("Contact us".encode() in response.data)
self.assertTrue("Opening hours:".encode() in response.data)
| ONSdigital/ras-frontstage | tests/unit/views/test_cookies_and_contact.py | Python | mit | 1,570 | [
"VisIt"
] | aaf509532ac3672dc0430fb7decf98df5e43dfbc63588c4d9f08ca1c1e9437c7 |
#!/usr/bin/env python
"""
TurnkeyLaserExporter
-----------------------------------
Maintained by Turnkey Tyranny (https://github.com/TurnkeyTyranny/laser-gcode-exporter-inkscape-plugin)
Designed to run on Ramps 1.4 + Marlin firmware on a K40 CO2 Laser Cutter.
Based on think|haus gcode inkscape extension
Based on a script by Nick Drobchenko from the CNC club
***
Copyright (C) 2009 Nick Drobchenko, nick@cnc-club.ru
based on gcode.py (C) 2007 hugomatic...
based on addnodes.py (C) 2005,2007 Aaron Spike, aaron@ekips.org
based on dots.py (C) 2005 Aaron Spike, aaron@ekips.org
based on interp.py (C) 2005 Aaron Spike, aaron@ekips.org
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
"""
Changelog 2015-02-01:
* Beginning of the project. Based on a fork from ShinyLaser(https://github.com/ajfoul/thlaser-inkscape-plugin)
Changelog 2015-02-16:
Added an option to export as Marlin or Smoothie Power levels
Changelog 2015-03-07:
Added capability to pick out power, ppm, feedrate etc from the layer names
Added code to support Pulse Per Minute burning or continuous burning. Will default to continuous.
M649 S100 L300 P10 - Set Laser settings to 100 percent power, pulses are each 300ms, and 10 pulses per mm.
G0 : Move to a new location with the laser off.
G1 : Move to a new location with the laser on.
G2 : Move in a Clockwise Arc
G3 : Move in a Counter Clockwise Arc
Name your layer like 10 [feed=600,ppm=40] for 10% power, 600mm per minute cut and 40 pulse per millimetre at 60ms duration
Changelog 2015-03-27
Changelog 2015-03-28
Fixed many many bugs, completed the work on exporting objects and images as rasters.
Fixed up as many situations I could find that threw python error messages and replaced them with meaningful notices for the user.
Changelog 2015-03-30
Accounts for strokes on objects. Conditional raster export as some items in inkscape are positioned strangely.
Changelog 2015-04-1
Need to get the 'positioning for all' functionality working as exporting many raster objects is painfully slow.
Updated script to export rasters with top left as the origin or bottom left.
Changelog 2015-04-10
Fixed a bug with exporting paths when the origin was the top left.
Disabled raster horizintal movement optimisation as it has a bug. Rasters will be a little slower but will come out oriented correctly. Search for line : row2 = rowData
Changelog 2015-04-11
Added back in raster optimising, it's not perfect but it's mostly there. Only a little slow parsing white vertical space now.
Found that raster optimisation code seems to be changing the pixel data at the end of the line somewhere. I'm not sure how since it's meant to just be cutting part of the data line out not changing it. will need to investigate further.
Added option to the menu for users to disable raster optimisations.
Changelog 2015-05-09
Spent a day stuffing around with the exporter and marlin firmware to figure out why pronterface was throwing checksum errors when
sending lots of G02 and G03 arc vector cuts. It turns out that with too much precision in the cuts marlin's buffer fills up and it's
unable to receive any more serial data. I resolved this by reducing the float point precision down to 3 decimal places and shifting
power and pulse settings to the G00 move command that comes before a set of G01, G02 and G03 commands to limit data that's needed to
be sent over the wire.
Changelog 2015-05-255
Updated GCodes to optimise when it sends PPM and laser power info.
Added a Pronterface option which is enabled by default to allow rasters to be printed with pronterface.
Added M80 command for Tim from LMN
I also fixed up the part of the exporter to allow the offset and scaling functions to work. Though I found that looking at the scaling
code it will only scale from the original 0,0 coordinate, it doesn't scale based on a centre point.
"""
###
### Gcode tools
###
import inkex, simplestyle, simplepath
import cubicsuperpath, simpletransform, bezmisc
import os
import math
import bezmisc
import re
import copy
import sys
import time
#Image processing for rastering
import base64
from PIL import Image
from PIL import ImageOps
import subprocess
import simplestyle
import getopt
from io import BytesIO
#_ = inkex._
################################################################################
###
### Constants
###
################################################################################
VERSION = "1.0.1"
STRAIGHT_TOLERANCE = 0.0001
STRAIGHT_DISTANCE_TOLERANCE = 0.0001
LASER_ON = "M3 ;turn the laser on" # LASER ON MCODE
LASER_OFF = "M5 ;turn the laser off\n" # LASER OFF MCODE
HEADER_TEXT = ""
FOOTER_TEXT = ""
BIARC_STYLE = {
'biarc0': simplestyle.formatStyle({ 'stroke': '#88f', 'fill': 'none', 'strokeWidth':'1' }),
'biarc1': simplestyle.formatStyle({ 'stroke': '#8f8', 'fill': 'none', 'strokeWidth':'1' }),
'line': simplestyle.formatStyle({ 'stroke': '#f88', 'fill': 'none', 'strokeWidth':'1' }),
'area': simplestyle.formatStyle({ 'stroke': '#777', 'fill': 'none', 'strokeWidth':'0.1' }),
}
# Inkscape group tag
SVG_GROUP_TAG = inkex.addNS("g", "svg")
SVG_PATH_TAG = inkex.addNS('path','svg')
SVG_IMAGE_TAG = inkex.addNS('image', 'svg')
SVG_TEXT_TAG = inkex.addNS('text', 'svg')
SVG_LABEL_TAG = inkex.addNS("label", "inkscape")
GCODE_EXTENSION = ".g" # changed to be Marlin friendly (ajf)
options = {}
################################################################################
###
### Common functions
###
################################################################################
###
### Just simple output function for better debugging
###
class Logger(object):
first = True
enabled = True
def __init__(self):
home = os.getenv("HOME") or os.getenv("USERPROFILE")
self.logpath = os.path.join(home, "thlaser.log")
def write(self, s):
if (not self.enabled):
return
if self.first and os.path.isfile(self.logpath):
os.remove(self.logpath)
self.first = False
f = open(self.logpath, "a")
f.write(str(s)+"\n")
f.close()
# The global logger object
logger = Logger()
###
### Point (x,y) operations
###
## Pretty much what it sounds like: defines some arithmetic functions that can be applied to points.
class P:
def __init__(self, x, y=None):
if not y==None:
self.x, self.y = float(x), float(y)
else:
self.x, self.y = float(x[0]), float(x[1])
def __add__(self, other): return P(self.x + other.x, self.y + other.y)
def __sub__(self, other): return P(self.x - other.x, self.y - other.y)
def __neg__(self): return P(-self.x, -self.y)
def __mul__(self, other):
if isinstance(other, P):
return self.x * other.x + self.y * other.y
return P(self.x * other, self.y * other)
__rmul__ = __mul__
def __div__(self, other): return P(self.x / other, self.y / other)
def mag(self): return math.hypot(self.x, self.y)
def unit(self):
h = self.mag()
if h: return self / h
else: return P(0,0)
def dot(self, other): return self.x * other.x + self.y * other.y
def rot(self, theta):
c = math.cos(theta)
s = math.sin(theta)
return P(self.x * c - self.y * s, self.x * s + self.y * c)
def angle(self): return math.atan2(self.y, self.x)
def __repr__(self): return '%f,%f' % (self.x, self.y)
def pr(self): return "%.2f,%.2f" % (self.x, self.y)
def to_list(self): return [self.x, self.y]
###
### Functions to operate with CubicSuperPath
###
def csp_at_t(sp1,sp2,t):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
return bezmisc.bezierpointatt(bez,t)
def cspbezsplit(sp1, sp2, t = 0.5):
s1,s2 = bezmisc.beziersplitatt((sp1[1],sp1[2],sp2[0],sp2[1]),t)
return [ [sp1[0][:], sp1[1][:], list(s1[1])], [list(s1[2]), list(s1[3]), list(s2[1])], [list(s2[2]), sp2[1][:], sp2[2][:]] ]
def cspbezsplitatlength(sp1, sp2, l = 0.5, tolerance = 0.01):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
t = bezmisc.beziertatlength(bez, l, tolerance)
return cspbezsplit(sp1, sp2, t)
def cspseglength(sp1,sp2, tolerance = 0.001):
bez = (sp1[1][:],sp1[2][:],sp2[0][:],sp2[1][:])
return bezmisc.bezierlength(bez, tolerance)
def csplength(csp):
total = 0
lengths = []
for sp in csp:
for i in xrange(1,len(sp)):
l = cspseglength(sp[i-1],sp[i])
lengths.append(l)
total += l
return lengths, total
###
### Distance calculattion from point to arc
###
def between(c,x,y):
return x-STRAIGHT_TOLERANCE<=c<=y+STRAIGHT_TOLERANCE or y-STRAIGHT_TOLERANCE<=c<=x+STRAIGHT_TOLERANCE
def distance_from_point_to_arc(p, arc):
P0,P2,c,a = arc
dist = None
p = P(p)
r = (P0-c).mag()
if r>0 :
i = c + (p-c).unit()*r
alpha = ((i-c).angle() - (P0-c).angle())
if a*alpha<0:
if alpha>0: alpha = alpha-2*math.pi
else: alpha = 2*math.pi+alpha
if between(alpha,0,a) or min(abs(alpha),abs(alpha-a))<STRAIGHT_TOLERANCE :
return (p-i).mag(), [i.x, i.y]
else :
d1, d2 = (p-P0).mag(), (p-P2).mag()
if d1<d2 :
return (d1, [P0.x,P0.y])
else :
return (d2, [P2.x,P2.y])
def get_distance_from_csp_to_arc(sp1,sp2, arc1, arc2, tolerance = 0.001 ): # arc = [start,end,center,alpha]
n, i = 10, 0
d, d1, dl = (0,(0,0)), (0,(0,0)), 0
while i<1 or (abs(d1[0]-dl[0])>tolerance and i<2):
i += 1
dl = d1*1
for j in range(n+1):
t = float(j)/n
p = csp_at_t(sp1,sp2,t)
d = min(distance_from_point_to_arc(p,arc1), distance_from_point_to_arc(p,arc2))
d1 = max(d1,d)
n=n*2
return d1[0]
################################################################################
###
### Biarc function
###
### Calculates biarc approximation of cubic super path segment
### splits segment if needed or approximates it with straight line
###
################################################################################
def biarc(sp1, sp2, z1, z2, depth=0,):
def biarc_split(sp1,sp2, z1, z2, depth):
if depth<options.biarc_max_split_depth:
sp1,sp2,sp3 = cspbezsplit(sp1,sp2)
l1, l2 = cspseglength(sp1,sp2), cspseglength(sp2,sp3)
if l1+l2 == 0 : zm = z1
else : zm = z1+(z2-z1)*l1/(l1+l2)
return biarc(sp1,sp2,depth+1,z1,zm)+biarc(sp2,sp3,depth+1,z1,zm)
else: return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
P0, P4 = P(sp1[1]), P(sp2[1])
TS, TE, v = (P(sp1[2])-P0), -(P(sp2[0])-P4), P0 - P4
tsa, tea, va = TS.angle(), TE.angle(), v.angle()
if TE.mag()<STRAIGHT_DISTANCE_TOLERANCE and TS.mag()<STRAIGHT_DISTANCE_TOLERANCE:
# Both tangents are zerro - line straight
return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
if TE.mag() < STRAIGHT_DISTANCE_TOLERANCE:
TE = -(TS+v).unit()
r = TS.mag()/v.mag()*2
elif TS.mag() < STRAIGHT_DISTANCE_TOLERANCE:
TS = -(TE+v).unit()
r = 1/( TE.mag()/v.mag()*2 )
else:
r=TS.mag()/TE.mag()
TS, TE = TS.unit(), TE.unit()
tang_are_parallel = ((tsa-tea)%math.pi<STRAIGHT_TOLERANCE or math.pi-(tsa-tea)%math.pi<STRAIGHT_TOLERANCE )
if ( tang_are_parallel and
((v.mag()<STRAIGHT_DISTANCE_TOLERANCE or TE.mag()<STRAIGHT_DISTANCE_TOLERANCE or TS.mag()<STRAIGHT_DISTANCE_TOLERANCE) or
1-abs(TS*v/(TS.mag()*v.mag()))<STRAIGHT_TOLERANCE) ):
# Both tangents are parallel and start and end are the same - line straight
# or one of tangents still smaller then tollerance
# Both tangents and v are parallel - line straight
return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
c,b,a = v*v, 2*v*(r*TS+TE), 2*r*(TS*TE-1)
if v.mag()==0:
return biarc_split(sp1, sp2, z1, z2, depth)
asmall, bsmall, csmall = abs(a)<10**-10,abs(b)<10**-10,abs(c)<10**-10
if asmall and b!=0: beta = -c/b
elif csmall and a!=0: beta = -b/a
elif not asmall:
discr = b*b-4*a*c
if discr < 0: raise ValueError, (a,b,c,discr)
disq = discr**.5
beta1 = (-b - disq) / 2 / a
beta2 = (-b + disq) / 2 / a
if beta1*beta2 > 0 : raise ValueError, (a,b,c,disq,beta1,beta2)
beta = max(beta1, beta2)
elif asmall and bsmall:
return biarc_split(sp1, sp2, z1, z2, depth)
alpha = beta * r
ab = alpha + beta
P1 = P0 + alpha * TS
P3 = P4 - beta * TE
P2 = (beta / ab) * P1 + (alpha / ab) * P3
def calculate_arc_params(P0,P1,P2):
D = (P0+P2)/2
if (D-P1).mag()==0: return None, None
R = D - ( (D-P0).mag()**2/(D-P1).mag() )*(P1-D).unit()
p0a, p1a, p2a = (P0-R).angle()%(2*math.pi), (P1-R).angle()%(2*math.pi), (P2-R).angle()%(2*math.pi)
alpha = (p2a - p0a) % (2*math.pi)
if (p0a<p2a and (p1a<p0a or p2a<p1a)) or (p2a<p1a<p0a) :
alpha = -2*math.pi+alpha
if abs(R.x)>1000000 or abs(R.y)>1000000 or (R-P0).mag<options.min_arc_radius :
return None, None
else :
return R, alpha
R1,a1 = calculate_arc_params(P0,P1,P2)
R2,a2 = calculate_arc_params(P2,P3,P4)
if R1==None or R2==None or (R1-P0).mag()<STRAIGHT_TOLERANCE or (R2-P2).mag()<STRAIGHT_TOLERANCE : return [ [sp1[1],'line', 0, 0, sp2[1], [z1,z2]] ]
d = get_distance_from_csp_to_arc(sp1,sp2, [P0,P2,R1,a1],[P2,P4,R2,a2])
if d > options.biarc_tolerance and depth<options.biarc_max_split_depth : return biarc_split(sp1, sp2, z1, z2, depth)
else:
if R2.mag()*a2 == 0 : zm = z2
else : zm = z1 + (z2-z1)*(R1.mag()*a1)/(R2.mag()*a2+R1.mag()*a1)
return [ [ sp1[1], 'arc', [R1.x,R1.y], a1, [P2.x,P2.y], [z1,zm] ], [ [P2.x,P2.y], 'arc', [R2.x,R2.y], a2, [P4.x,P4.y], [zm,z2] ] ]
################################################################################
###
### Inkscape helper functions
###
################################################################################
# Returns true if the given node is a layer
def is_layer(node):
return (node.tag == SVG_GROUP_TAG and
node.get(inkex.addNS("groupmode", "inkscape")) == "layer")
def get_layers(document):
layers = []
root = document.getroot()
for node in root.iterchildren():
if (is_layer(node)):
# Found an inkscape layer
layers.append(node)
return layers
def parse_layer_name(txt):
params = {}
try:
n = txt.index("[")
except ValueError:
layerName = txt.strip()
else:
layerName = txt[0:n].strip()
args = txt[n+1:].strip()
if (args.endswith("]")):
args = args[0:-1]
for arg in args.split(","):
try:
(field, value) = arg.split("=")
except:
raise ValueError("Invalid argument in layer '%s'" % layerName)
if (field == "feed" or field == "ppm"):
try:
value = float(value)
except:
raise ValueError("Invalid layer name '%s'" % value)
params[field] = value
logger.write("%s == %s" % (field, value))
return (layerName, params)
################################################################################
###
### Gcode tools class
###
################################################################################
class Gcode_tools(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
outdir = os.getenv("HOME") or os.getenv("USERPROFILE")
if (outdir):
outdir = os.path.join(outdir, "Desktop")
else:
outdir = os.getcwd()
self.OptionParser.add_option("-d", "--directory", action="store", type="string", dest="directory", default=outdir, help="Directory for gcode file")
self.OptionParser.add_option("-f", "--filename", action="store", type="string", dest="file", default="-1.0", help="File name")
self.OptionParser.add_option("-u", "--Xscale", action="store", type="float", dest="Xscale", default="1.0", help="Scale factor X")
self.OptionParser.add_option("-v", "--Yscale", action="store", type="float", dest="Yscale", default="1.0", help="Scale factor Y")
self.OptionParser.add_option("-x", "--Xoffset", action="store", type="float", dest="Xoffset", default="0.0", help="Offset along X")
self.OptionParser.add_option("-y", "--Yoffset", action="store", type="float", dest="Yoffset", default="0.0", help="Offset along Y")
# added move (laser off) feedrate and laser intensity; made all int rather than float - (ajf)
self.OptionParser.add_option("-m", "--Mfeed", action="store", type="int", dest="Mfeed", default="2000", help="Default Move Feed rate in unit/min")
self.OptionParser.add_option("-p", "--feed", action="store", type="int", dest="feed", default="300", help="Default Cut Feed rate in unit/min")
self.OptionParser.add_option("-l", "--laser", action="store", type="int", dest="laser", default="10", help="Default Laser intensity (0-100 %)")
self.OptionParser.add_option("-b", "--homebefore", action="store", type="inkbool", dest="homebefore", default=True, help="Home all beofre starting (G28 XY)")
self.OptionParser.add_option("-a", "--homeafter", action="store", type="inkbool", dest="homeafter", default=False, help="Home X Y at end of job")
self.OptionParser.add_option("", "--biarc-tolerance", action="store", type="float", dest="biarc_tolerance", default="1", help="Tolerance used when calculating biarc interpolation.")
self.OptionParser.add_option("", "--biarc-max-split-depth", action="store", type="int", dest="biarc_max_split_depth", default="4", help="Defines maximum depth of splitting while approximating using biarcs.")
self.OptionParser.add_option("", "--unit", action="store", type="string", dest="unit", default="G21 (All units in mm)\n", help="Units")
self.OptionParser.add_option("", "--function", action="store", type="string", dest="function", default="Curve", help="What to do: Curve|Area|Area inkscape")
self.OptionParser.add_option("", "--tab", action="store", type="string", dest="tab", default="", help="Means nothing right now. Notebooks Tab.")
#self.OptionParser.add_option("", "--generate_not_parametric_code",action="store", type="inkbool", dest="generate_not_parametric_code", default=False,help="Generated code will be not parametric.")
self.OptionParser.add_option("", "--double_sided_cutting",action="store", type="inkbool", dest="double_sided_cutting", default=False,help="Generate code for double-sided cutting.")
self.OptionParser.add_option("", "--draw-curves", action="store", type="inkbool", dest="drawCurves", default=False,help="Draws curves to show what geometry was processed")
self.OptionParser.add_option("", "--logging", action="store", type="inkbool", dest="logging", default=False, help="Enable output logging from the plugin")
self.OptionParser.add_option("", "--loft-distances", action="store", type="string", dest="loft_distances", default="10", help="Distances between paths.")
self.OptionParser.add_option("", "--loft-direction", action="store", type="string", dest="loft_direction", default="crosswise", help="Direction of loft's interpolation.")
self.OptionParser.add_option("", "--loft-interpolation-degree",action="store", type="float", dest="loft_interpolation_degree", default="2", help="Which interpolation use to loft the paths smooth interpolation or staright.")
self.OptionParser.add_option("", "--min-arc-radius", action="store", type="float", dest="min_arc_radius", default="0.0005", help="All arc having radius less than minimum will be considered as straight line")
self.OptionParser.add_option("", "--mainboard", action="store", type="string", dest="mainboard", default="ramps", help="Mainboard")
self.OptionParser.add_option("", "--pronterface", action="store", type="inkbool", dest="pronterface", default=True, help="Are you using Pronterface? If so we need to change some characters in the GCode raster data to keep pronterface happy. Slight loss of intensity on pure blacks but nothing major.")
self.OptionParser.add_option("", "--origin", action="store", type="string", dest="origin", default="topleft", help="Origin of the Y Axis")
self.OptionParser.add_option("", "--optimiseraster", action="store", type="inkbool", dest="optimiseraster", default=True, help="Optimise raster horizontal scanning speed")
def parse_curve(self, path):
# if self.options.Xscale!=self.options.Yscale:
# xs,ys = self.options.Xscale,self.options.Yscale
# self.options.Xscale,self.options.Yscale = 1.0, 1.0
# else :
xs,ys = 1.0,1.0
# ### Sort to reduce Rapid distance
# np = [p[0]]
# del p[0]
# while len(p)>0:
# end = np[-1][-1][1]
# dist = None
# for i in range(len(p)):
# start = p[i][0][1]
#
# dist = max( ( -( ( end[0]-start[0])**2+(end[1]-start[1])**2 ) ,i) , dist )
# np += [p[dist[1]][:]]
# del p[dist[1]]
# p = np[:]
if(path['type'] == "vector") :
lst = {}
lst['type'] = "vector"
lst['data'] = []
for subpath in path['data']:
lst['data'].append(
[[subpath[0][1][0]*xs, subpath[0][1][1]*ys], 'move', 0, 0]
)
for i in range(1,len(subpath)):
sp1 = [ [subpath[i-1][j][0]*xs, subpath[i-1][j][1]*ys] for j in range(3)]
sp2 = [ [subpath[i ][j][0]*xs, subpath[i ][j][1]*ys] for j in range(3)]
lst['data'] += biarc(sp1,sp2,0,0)
lst['data'].append(
[[subpath[-1][1][0]*xs, subpath[-1][1][1]*ys], 'end', 0, 0]
)
return lst
#Raster image data, cut/burn left to right, drop down a line, repeat in reverse until completed.
else:
#No need to modify
return path
def draw_curve(self, curve, group=None, style=BIARC_STYLE):
if group==None:
group = inkex.etree.SubElement( self.biarcGroup, SVG_GROUP_TAG )
s, arcn = '', 0
for si in curve:
if s!='':
if s[1] == 'line':
inkex.etree.SubElement( group, SVG_PATH_TAG,
{
'style': style['line'],
'd':'M %s,%s L %s,%s' % (s[0][0], s[0][1], si[0][0], si[0][1]),
'comment': str(s)
}
)
elif s[1] == 'arc':
arcn += 1
sp = s[0]
c = s[2]
a = ( (P(si[0])-P(c)).angle() - (P(s[0])-P(c)).angle() )%(2*math.pi) #s[3]
if s[3]*a<0:
if a>0: a = a-2*math.pi
else: a = 2*math.pi+a
r = math.sqrt( (sp[0]-c[0])**2 + (sp[1]-c[1])**2 )
a_st = ( math.atan2(sp[0]-c[0],- (sp[1]-c[1])) - math.pi/2 ) % (math.pi*2)
if a>0:
a_end = a_st+a
else:
a_end = a_st*1
a_st = a_st+a
inkex.etree.SubElement( group, inkex.addNS('path','svg'),
{
'style': style['biarc%s' % (arcn%2)],
inkex.addNS('cx','sodipodi'): str(c[0]),
inkex.addNS('cy','sodipodi'): str(c[1]),
inkex.addNS('rx','sodipodi'): str(r),
inkex.addNS('ry','sodipodi'): str(r),
inkex.addNS('start','sodipodi'): str(a_st),
inkex.addNS('end','sodipodi'): str(a_end),
inkex.addNS('open','sodipodi'): 'true',
inkex.addNS('type','sodipodi'): 'arc',
'comment': str(s)
})
s = si
def check_dir(self):
if (os.path.isdir(self.options.directory)):
if (os.path.isfile(self.options.directory+'/header')):
f = open(self.options.directory+'/header', 'r')
self.header = f.read()
f.close()
else:
self.header = HEADER_TEXT
if (os.path.isfile(self.options.directory+'/footer')):
f = open(self.options.directory+'/footer','r')
self.footer = f.read()
f.close()
else:
self.footer = FOOTER_TEXT
else:
inkex.errormsg(("Directory specified for output gcode does not exist! Please create it."))
return False
return True
# Turns a list of arguments into gcode-style parameters (eg (1, 2, 3) -> "X1 Y2 Z3"),
# taking scaling, offsets and the "parametric curve" setting into account
def make_args(self, c):
c = [c[i] if i<len(c) else None for i in range(6)]
if c[5] == 0:
c[5] = None
# next few lines generate the stuff at the front of the file - scaling, offsets, etc (adina)
#if self.options.generate_not_parametric_code:
s = ["X", "Y", "Z", "I", "J", "K"]
s1 = ["","","","","",""]
m = [self.options.Xscale, -self.options.Yscale, 1,
self.options.Xscale, -self.options.Yscale, 1]
a = [self.options.Xoffset, self.options.Yoffset, 0, 0, 0, 0]
# else:
# s = ["X", "Y", "Z", "I", "J", "K"]
# s1 = ["", "", "", "", "", ""]
# m = [1, -1, 1, 1, -1, 1]
# a = [0, 0, 0, 0, 0, 0]
#There's no aphrodisiac like loneliness
#Add the page height if the origin is the bottom left.
if (self.options.origin != 'topleft'):
a[1] += self.pageHeight
args = []
for i in range(6):
if c[i]!=None:
value = self.unitScale*(c[i]*m[i]+a[i])
args.append(s[i] + ("%.3f" % value) + s1[i])
return " ".join(args)
def generate_raster_gcode(self, curve, laserPower, altfeed=None):
gcode = ''
#Setup our feed rate, either from the layer name or from the default value.
if (altfeed):
# Use the "alternative" feed rate specified
cutFeed = "F%i" % altfeed
else:
#if self.options.generate_not_parametric_code:
# cutFeed = "F%i" % self.options.feed
#else:
cutFeed = "F%i" % self.options.feed
#This extension assumes that your copy of Inkscape is running at 90dpi (it is by default)
#R = mm per pixel
#R = 1 / dots per mm
#90dpi = 1 / (90 / 25.4)
#Rasters are exported internally at 270dpi.
#So R = 1 / (270 / 25.4)
# = 0.09406
gcode += '\n\n;Beginning of Raster Image '+str(curve['id'])+' pixel size: '+str(curve['width'])+'x'+str(curve['height'])+'\n'
gcode += 'M649 S'+str(laserPower)+' B2 D0 R0.09406\n'
#Do not remove these two lines, they're important. Will not raster correctly if feedrate is not set prior.
#Move fast to point, cut at correct speed.
if(cutFeed < self.options.Mfeed):
gcode += 'G0 X'+str(curve['x'])+' Y'+str(curve['y'])+' F'+str(self.options.Mfeed)+'\n'
gcode += 'G0 X'+str(curve['x'])+' Y'+str(curve['y'])+' '+cutFeed+'\n'
#def get_chunks(arr, chunk_size = 51):
def get_chunks(arr, chunk_size = 51):
chunks = [ arr[start:start+chunk_size] for start in range(0, len(arr), chunk_size)]
return chunks
#return the first pixel that holds data.
def first_in_list(arr):
end = 0
for i in range(len(arr)):
if (arr[i] == 0):
end = i
if (arr[i] > 0):
break
return end
#does this line have any data?
def is_blank_line(arr):
for i in range(len(arr)):
if (arr[i] > 0):
return False
return True
#return the last pixel that holds data.
def last_in_list(arr):
end = len(arr)
for i in range(len(arr)):
if (arr[i] > 0):
end = i
return end
#Flip the image top to bottom.
row = curve['data'][::-1]
previousRight = 99999999999
previousLeft = 0
firstRow = True
first = True
forward = True
for index, rowData in enumerate(row):
splitRight = 0
splitLeft = 0
#Turnkey - 11-04-15
#The below allows iteration over blank lines, while still being 'mostly' optimised for path. could still do with a little improvement for optimising horizontal movement and extrenuous for loops.
sub_index = index+1
if(sub_index < len(row)):
while is_blank_line(row[sub_index-1]):
if(sub_index < len(row)):
sub_index += 1
else:
break
#are we processing data before the last line?
if(sub_index < len(row)):
# Determine where to split the lines.
##################################################
#If the left most pixel of the next row is earlier than the current row, then extend.
if(first_in_list(row[sub_index]) > first_in_list(rowData)):
splitLeft = first_in_list(rowData)
else:
splitLeft = first_in_list(row[sub_index])
#If the end pixel of the next line is later than the current line, extend.
if(last_in_list(row[sub_index]) > last_in_list(rowData)):
splitRight = last_in_list(row[sub_index])
else:
splitRight = last_in_list(rowData)
else:
splitLeft = first_in_list(rowData)
splitRight = last_in_list(rowData)
#Positive direction
if forward:
#Split the right side.
###########################################
#Don't split more than the start of the last row as we print in reverse for alternate lines
splitLeft = previousLeft
previousRight = splitRight
#Negative direction
else:
#Split the left side.
###########################################
#Don't split more than the end of the last row as we print in reverse for alternate lines
splitRight = previousRight
previousLeft = splitLeft
#Exception to the rule : Don't split the left of the first row.
if(firstRow):
splitLeft = (previousLeft)
firstRow = False
row2 = rowData[(splitLeft+1):(splitRight+1)]
#Turnkey 11-04-15 - For the time being, I've disabled the raster optimisation with the below line.
#There's a bug where it cannot correctly handle white space between vertical lines in raster images and it fucks up the horizontal alignment.
#-Update, users can disable optimisations through the options now.
#The optimisation has a bug which can produce hot spots at the edge of rasters.
if( not self.options.optimiseraster ):
row2 = rowData
#Heading Left to right, invert the data.
if not forward:
result_row = row2[::-1]
#Heading Right to left.
else:
result_row = row2
first = True
for chunk in get_chunks(result_row,51):
if first:
if forward:
gcode += ("\nG7 $1 ")
else:
gcode += ("\nG7 $0 ")
first = not first
else:
gcode += ("G7 ")
b64 = base64.b64encode("".join(chr(y) for y in chunk))
#If we're using pronterface, we need to change raster data / and + in the base64 alphabet to letter 9. This loses a little intensity in pure blacks but keeps pronterface happy.
if( self.options.pronterface ):
b64 = b64.replace("+", "9").replace("/", "9");
gcode += ("L"+str(len(b64))+" ")
gcode += ("D"+b64+ "\n")
forward = not forward
gcode += ("M5 \n");
gcode += ';End of Raster Image '+str(curve['id'])+'\n\n'
return gcode
def generate_gcode(self, curve, depth, laserPower, altfeed=None, altppm=None):
gcode = ''
#Setup our feed rate, either from the layer name or from the default value.
if (altfeed):
# Use the "alternative" feed rate specified
cutFeed = "F%i" % altfeed
else:
cutFeed = "F%i" % self.options.feed
#Setup our pulse per millimetre option, if applicable
#B: laser firing mode (0 = continuous, 1 = pulsed, 2 = raster)
if (altppm):
# Use the "alternative" ppm - L60000 is 60us
ppmValue = "L60000 P%.2f B1 D0" % altppm
else:
#Set the laser firing mode to continuous.
ppmValue = "B0 D0"
cwArc = "G02"
ccwArc = "G03"
# The geometry is reflected, so invert the orientation of the arcs to match
if (self.flipArcs):
(cwArc, ccwArc) = (ccwArc, cwArc)
# The 'laser on' and 'laser off' m-codes get appended to the GCODE generation
lg = 'G00'
firstGCode = False
for i in range(1,len(curve['data'])):
s, si = curve['data'][i-1], curve['data'][i]
#G00 : Move with the laser off to a new point
if s[1] == 'move':
#Turn off the laser if it was on previously.
#if lg != "G00":
# gcode += LASER_OFF + "\n"
gcode += "G00 " + self.make_args(si[0]) + " F%i " % self.options.Mfeed + "\n"
lg = 'G00'
elif s[1] == 'end':
lg = 'G00'
#G01 : Move with the laser turned on to a new point
elif s[1] == 'line':
if not firstGCode: #Include the ppm values for the first G01 command in the set.
gcode += "G01 " + self.make_args(si[0]) + "S%.2f " % laserPower + "%s " % cutFeed + "%s" % ppmValue + "\n"
firstGCode = True
else:
gcode += "G01 " + self.make_args(si[0]) + "\n"
lg = 'G01'
#G02 and G03 : Move in an arc with the laser turned on.
elif s[1] == 'arc':
dx = s[2][0]-s[0][0]
dy = s[2][1]-s[0][1]
if abs((dx**2 + dy**2)*self.options.Xscale) > self.options.min_arc_radius:
r1 = P(s[0])-P(s[2])
r2 = P(si[0])-P(s[2])
if abs(r1.mag() - r2.mag()) < 0.001:
if (s[3] > 0):
gcode += cwArc
else:
gcode += ccwArc
if not firstGCode: #Include the ppm values for the first G01 command in the set.
gcode += " " + self.make_args(si[0] + [None, dx, dy, None]) + "S%.2f " % laserPower + "%s " % cutFeed + "%s" % ppmValue + "\n"
firstGCode = True
else:
gcode += " " + self.make_args(si[0] + [None, dx, dy, None]) + "\n"
else:
r = (r1.mag()+r2.mag())/2
if (s[3] > 0):
gcode += cwArc
else:
gcode += ccwArc
if not firstGCode: #Include the ppm values for the first G01 command in the set.
gcode += " " + self.make_args(si[0]) + " R%f" % (r*self.options.Xscale) + "S%.2f " % laserPower + "%s " % cutFeed + "%s" % ppmValue + "\n"
firstGCode = True
else:
gcode += " " + self.make_args(si[0]) + " R%f" % (r*self.options.Xscale) + "\n"
lg = cwArc
#The arc is less than the minimum arc radius, draw it as a straight line.
else:
if not firstGCode: #Include the ppm values for the first G01 command in the set.
gcode += "G01 " + self.make_args(si[0]) + "S%.2f " % laserPower + "%s " % cutFeed + "%s" % ppmValue + "\n"
firstGCode = True
else:
gcode += "G01 " + self.make_args(si[0]) + "\n"
lg = 'G01'
#The end of the layer.
if si[1] == 'end':
gcode += LASER_OFF
return gcode
def tool_change(self):
# Include a tool change operation
gcode = TOOL_CHANGE % (self.currentTool+1)
# Select the next available tool
self.currentTool = (self.currentTool+1) % 32
return gcode
#Determine the tmp directory for the user's operating system.
def getTmpPath(self):
"""Define the temporary folder path depending on the operating system"""
if os.name == 'nt':
return 'C:\\WINDOWS\\Temp\\'
else:
return '/tmp/'
################################################################################
###
### Curve to Gcode
###
################################################################################
def effect_curve(self, selected):
selected = list(selected)
# Set group
if self.options.drawCurves and len(selected)>0:
self.biarcGroup = inkex.etree.SubElement( selected[0].getparent(), SVG_GROUP_TAG )
options.Group = self.biarcGroup
# Recursively compiles a list of paths that are decendant from the given node
self.skipped = 0
def compile_paths(parent, node, trans):
# Apply the object transform, along with the parent transformation
mat = node.get('transform', None)
path = {}
if mat:
mat = simpletransform.parseTransform(mat)
trans = simpletransform.composeTransform(trans, mat)
if node.tag == SVG_PATH_TAG:
# This is a path object
if (not node.get("d")): return []
csp = cubicsuperpath.parsePath(node.get("d"))
path['type'] = "vector"
path['id'] = node.get("id")
path['data'] = []
if (trans):
simpletransform.applyTransformToPath(trans, csp)
path['data'] = csp
#Apply a transform in the Y plan to flip the path vertically
#If we want our origin to the the top left.
if (self.options.origin == 'topleft'):
csp = path['data']
simpletransform.applyTransformToPath(([1.0, 0.0, 0], [0.0, -1.0, 0]), csp)
path['data'] = csp
return path
elif node.tag == SVG_GROUP_TAG:
# This node is a group of other nodes
pathsGroup = []
for child in node.iterchildren():
data = compile_paths(parent, child, trans)
#inkex.errormsg(str(data))
if type(data) is not list:
pathsGroup.append(data.copy())
else:
pathsGroup += data
return pathsGroup
else :
#Raster the results.
if(node.get("x") > 0):
tmp = self.getTmpPath() #OS tmp directory
bgcol = "#ffffff" #White
curfile = curfile = self.args[-1] #The current inkscape project we're exporting from.
command="inkscape --export-dpi 270 -i %s --export-id-only -e \"%stmpinkscapeexport.png\" -b \"%s\" %s" % (node.get("id"),tmp,bgcol,curfile)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return_code = p.wait()
f = p.stdout
err = p.stderr
#Fetch the image Data
filename = "%stmpinkscapeexport.png" % (tmp)
if (self.options.origin == 'topleft'):
im = Image.open(filename).transpose(Image.FLIP_TOP_BOTTOM).convert('L')
else:
im = Image.open(filename).convert('L')
img = ImageOps.invert(im)
#Get the image size
imageDataWidth, imageDataheight = img.size
#Compile the pixels.
pixels = list(img.getdata())
pixels = [pixels[i * (imageDataWidth):(i + 1) * (imageDataWidth)] for i in xrange(imageDataheight)]
path['type'] = "raster"
path['width'] = imageDataWidth
path['height'] = imageDataheight
#A slow, but reliable way of getting correct coordinates since working with inkscape transpositions and transforms is a major pain in the ass.
#command="inkscape -X --query-id=%s %s" % (node.get("id"),curfile)
#p2 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#return_code = p2.wait()
#text = p2.communicate()[0]
#x_position = float(text)
#command="inkscape -Y --query-id=%s %s" % (node.get("id"),curfile)
#p3 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#return_code = p3.wait()
#text = p3.communicate()[0]
#y_position = float(text)*-1+self.pageHeight
if not hasattr(parent, 'glob_nodePositions'):
#Get the XY position of all elements in the inkscape job.
command="inkscape -S %s" % (curfile)
p5 = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dataString = str(p5.communicate()[0]).replace("\r", "").split('\n')
#Remove the final array element since the last item has a \r\n which creates a blank array element otherwise.
del dataString[-1]
elementList = dict((item.split(",",1)[0],item.split(",",1)[1]) for item in dataString)
parent.glob_nodePositions = elementList
#Lookup the xy coords for this node.
elementData = parent.glob_nodePositions[node.get("id")].split(',')
x_position = float(elementData[0])
y_position = float(elementData[1])*-1+self.pageHeight
#Text is y positioned from the top left.
if (self.options.origin == 'topleft'):
#Don't flip the y position. Since we're moving the origin from bottom left to top left.
y_position = float(elementData[1])
else:
#Very small loss of positioning due to conversion of the dpi in the exported image.
y_position -= imageDataheight/3
#Convert from pixels to mm
path['x'] = float(str("%.3f") %(self.unitScale * x_position))
path['y'] = float(str("%.3f") %(self.unitScale * y_position))
#Do not permit being < 0
if(path['y'] < 0):
path['y'] = 0
if(path['x'] < 0):
path['x'] = 0
path['id'] = node.get("id")
path['data'] = pixels
return path
else:
inkex.errormsg("Unable to generate raster for object " + str(node.get("id"))+" as it does not have an x-y coordinate associated.")
"""
elif node.tag == SVG_IMAGE_TAG:
#inkex.errormsg( )
#Work together to destroy
#visit https://www.python.org/downloads/ and download python 2.7.9
#Install it
#In the folder : C:\Program Files\Inkscape you will need to rename the folder "Python" to "Python-old" so it uses the new system install instead.
#pip install wheel
#From http://www.lfd.uci.edu/~gohlke/pythonlibs/#pil , download "Pillow-2.7.0-cp27-none-win32.whl"
#pip install Pillow-2.7.0-cp27-none-win32.whl
#You're good to go!
#Fetch the image Data
inkscapeWidth = int(float(node.get("width")))
inkscapeHeight = int(float(node.get("height")))
data = str((node.get(inkex.addNS('href','xlink')) )).replace("data:image/png;base64,","").replace("data:image/jpeg;base64,","")
im = Image.open(BytesIO(base64.b64decode(data))).convert('L')
img = ImageOps.invert(im)
imageDataWidth, imageDataheight = img.size
#Resize to match the dimensions in Inkscape
im_resized = img.resize((inkscapeWidth*3, inkscapeHeight*3), Image.ANTIALIAS)
#Resize the image here for highter DPI - say 300dpi
#Compile the pixels.
pixels = list(im_resized.getdata())
pixels = [pixels[i * (inkscapeWidth*3):(i + 1) * (inkscapeWidth * 3)] for i in xrange(inkscapeHeight*3)]
path['type'] = "raster"
path['width'] = inkscapeWidth
path['height'] = inkscapeHeight
path['x'] = self.unitScale*(float(node.get("x")) * 1)
#Add the height in px from inkscape from the image, as its top is measured from the origin top left, though in inkscape the origin is bottom left so we need to begin scanning the px at the bottom of the image for our laser bed.
path['y'] = self.unitScale * ((float(node.get("y"))+float(node.get("height")))*-1+self.pageHeight)
path['id'] = node.get("id")
path['data'] = pixels
#inkex.errormsg(str(path))
return path
#The object isn't a path, and it's not an image. Convert it to an image to be rastered.
"""
inkex.errormsg("skipping node " + str(node.get("id")))
self.skipped += 1
return []
# Compile a list of layers in this document. We compile a list of only the layers
# we need to use, so we can know ahead of time whether to put tool change
# operations between them.
layers = []
for layer in reversed(get_layers(self.document)):
for node in layer.iterchildren():
if (node in selected):
layers.append(layer)
break
layers = list(reversed(get_layers(self.document)))
# Loop over the layers and objects
gcode = ""
gcode_raster = ""
for layer in layers:
label = layer.get(SVG_LABEL_TAG).strip()
if (label.startswith("#")):
# Ignore everything selected in this layer
for node in layer.iterchildren():
if (node in selected):
selected.remove(node)
continue
# Parse the layer label text, which consists of the layer name followed
# by an optional number of arguments in square brackets.
try:
originalLayerName = label
(layerName, layerParams) = parse_layer_name(label)
except ValueError,e:
inkex.errormsg("Your inkscape layer is named incorrectly. Please use the format '20 [ppm=40,feed=300]' without the quotes. This would set the power at 20%, cutting at 300mm per minute at a pulse rate of 40 pulse per millimetre. The ppm option is optional, leaving it out will set the laser to continuous wave mode.")
return
# Check if the layer specifies an alternative (from the default) feed rate
altfeed = layerParams.get("feed", self.options.feed)
altppm = layerParams.get("ppm", None)
logger.write("layer %s" % layerName)
if (layerParams):
logger.write("layer params == %s" % layerParams)
pathList = []
# Apply the layer transform to all objects within the layer
trans = layer.get('transform', None)
trans = simpletransform.parseTransform(trans)
for node in layer.iterchildren():
if (node in selected):
#Vector path data, cut from x to y in a line or curve
logger.write("node %s" % str(node.tag))
selected.remove(node)
try:
newPath = compile_paths(self, node, trans).copy();
pathList.append(newPath)
inkex.errormsg("Built gcode for "+str(node.get("id"))+" - will be cut as %s." % (newPath['type']) )
except:
messageOnce = True
for objectData in compile_paths(self, node, trans):
#if (messageOnce):
inkex.errormsg("Built gcode for group "+str(node.get("id"))+", item %s - will be cut as %s." % (objectData['id'], objectData['type']) )
#messageOnce = False
pathList.append(objectData)
else:
logger.write("skipping node %s" % node)
if (not pathList):
logger.write("no objects in layer")
continue
#Determind the power of the laser that this layer should be cut at.
#If the layer is not named as an integer value then default to the laser intensity set at the export settings.
#Fetch the laser power from the export dialog box.
laserPower = self.options.laser
try:
if (int(layerName) > 0 and int(layerName) <= 100):
laserPower = int(layerName)
else :
laserPower = self.options.laser
except ValueError,e:
laserPower = self.options.laser
inkex.errormsg("Unable to parse power level for layer name. Using default power level %d percent." % (self.options.laser))
#Switch between smoothie power levels and ramps+marlin power levels
#ramps and marlin expect 0 to 100 while smoothie wants 0.0 to 1.0
if (self.options.mainboard == 'smoothie'):
laserPower = float(laserPower) / 100
#Fetch the vector or raster data and turn it into GCode
for objectData in pathList:
curve = self.parse_curve(objectData)
header_data = ""
#Turnkey : Always output the layer header for information.
if (len(layers) > 0):
header_data += LASER_OFF+"\n"
size = 60
header_data += ";(%s)\n" % ("*"*size)
header_data += (";(***** Layer: %%-%ds *****)\n" % (size-19)) % (originalLayerName)
header_data += (";(***** Laser Power: %%-%ds *****)\n" % (size-25)) % (laserPower)
header_data += (";(***** Feed Rate: %%-%ds *****)\n" % (size-23)) % (altfeed)
if(altppm):
header_data += (";(***** Pulse Rate: %%-%ds *****)\n" % (size-24)) % (altppm)
header_data += ";(%s)\n" % ("*"*size)
header_data += ";(MSG,Starting layer '%s')\n\n" % originalLayerName
#Generate the GCode for this layer
if (curve['type'] == "vector"):
#Should the curves be drawn in inkscape?
if (self.options.drawCurves):
self.draw_curve(curve)
gcode += header_data+self.generate_gcode(curve, 0, laserPower, altfeed=altfeed, altppm=altppm)
elif (curve['type'] == "raster"):
gcode_raster += header_data+self.generate_raster_gcode(curve, laserPower, altfeed=altfeed)
#Turnkey - Need to figure out why inkscape sometimes gets to this point and hasn't found the objects above.
# If there are any objects left over, it's because they don't belong
# to any inkscape layer (bug in inkscape?). Output those now.
#Turnkey - This is caused by objects being inside a group.
if (selected):
inkex.errormsg("Warning: Your selected object is part of a group. If your group has a transformations/skew/rotation applied to it these will not be exported correctly. Please ungroup your objects first then re-export. Select them and press Shift+Ctrl+G to ungroup.\n")
pathList = []
# Use the identity transform (eg no transform) for the root objects
trans = simpletransform.parseTransform("")
for node in selected:
try:
newPath = compile_paths(self, node, trans).copy();
pathList.append(newPath)
inkex.errormsg("Built gcode for "+str(node.get("id"))+" - will be cut as %s." % (newPath['type']) )
except:
messageOnce = True
for objectData in compile_paths(self, node, trans):
#if (messageOnce):
inkex.errormsg("Built gcode for group "+str(node.get("id"))+", item %s - will be cut as %s." % (objectData['id'], objectData['type']) )
#messageOnce = False
pathList.append(objectData)
if (pathList):
for objectData in pathList:
curve = self.parse_curve(objectData)
#Determind the power of the laser that this layer should be cut at.
#If the layer is not named as an integer value then default to the laser intensity set at the export settings.
#Fetch the laser power from the export dialog box.
laserPower = self.options.laser
try:
if (int(layerName) > 0 and int(layerName) <= 100):
laserPower = int(layerName)
else :
laserPower = self.options.laser
except ValueError,e:
laserPower = self.options.laser
inkex.errormsg("Unable to parse power level for layer name. Using default power level %d percent." % (self.options.laser))
#Switch between smoothie power levels and ramps+marlin power levels
#ramps and marlin expect 0 to 100 while smoothie wants 0.0 to 1.0
if (self.options.mainboard == 'smoothie'):
laserPower = float(laserPower) / 100
header_data = ""
#Turnkey : Always output the layer header for information.
if (len(layers) > 0):
header_data += LASER_OFF+"\n"
size = 60
header_data += ";(%s)\n" % ("*"*size)
header_data += (";(***** Layer: %%-%ds *****)\n" % (size-19)) % (originalLayerName)
header_data += (";(***** Laser Power: %%-%ds *****)\n" % (size-25)) % (laserPower)
header_data += (";(***** Feed Rate: %%-%ds *****)\n" % (size-23)) % (altfeed)
if(altppm):
header_data += (";(***** Pulse Rate: %%-%ds *****)\n" % (size-24)) % (altppm)
header_data += ";(%s)\n" % ("*"*size)
header_data += ";(MSG,Starting layer '%s')\n\n" % originalLayerName
#Generate the GCode for this layer
if (curve['type'] == "vector"):
#Should the curves be drawn in inkscape?
if (self.options.drawCurves):
self.draw_curve(curve)
gcode += header_data+self.generate_gcode(curve, 0, laserPower, altfeed=altfeed, altppm=altppm)
elif (curve['type'] == "raster"):
gcode_raster += header_data+self.generate_raster_gcode(curve, laserPower, altfeed=altfeed)
if self.options.homeafter:
gcode += "\n\nG00 X0 Y0 F4000 ; home"
#Always raster before vector cutting.
gcode = gcode_raster+"\n\n"+gcode
return gcode
def effect(self):
global options
options = self.options
selected = self.selected.values()
root = self.document.getroot()
#See if the user has the document setup in mm or pixels.
try:
self.pageHeight = float(root.get("height", None))
except:
inkex.errormsg(("Please change your inkscape project units to be in pixels, not inches or mm. In Inkscape press ctrl+shift+d and change 'units' on the page tab to px. The option 'default units' can be set to mm or inch, these are the units displayed on your rulers."))
return
self.flipArcs = (self.options.Xscale*self.options.Yscale < 0)
self.currentTool = 0
self.filename = options.file.strip()
if (self.filename == "-1.0" or self.filename == ""):
inkex.errormsg(("Please select an output file name."))
return
if (not self.filename.lower().endswith(GCODE_EXTENSION)):
# Automatically append the correct extension
self.filename += GCODE_EXTENSION
logger.enabled = self.options.logging
logger.write("Laser script started")
logger.write("output file == %s" % self.options.file)
if len(selected)<=0:
inkex.errormsg(("This extension requires at least one selected path."))
return
dirExists = self.check_dir()
if (not dirExists):
return
gcode = self.header;
if (self.options.unit == "mm"):
self.unitScale = 0.282222222222
gcode += "G21 ; All units in mm\n"
elif (self.options.unit == "in"):
self.unitScale = 0.011111
gcode += "G20 ; All units in in\n"
else:
inkex.errormsg(("You must choose mm or in"))
return
gcode += "M80 ; Turn on Optional Peripherals Board at LMN\n"
#Put the header data in the gcode file
gcode += """
; Raster data will always precede vector data
; Default Cut Feedrate %i mm per minute
; Default Move Feedrate %i mm per minute
; Default Laser Intensity %i percent\n""" % (self.options.feed, self.options.Mfeed, self.options.laser)
if self.options.homebefore:
gcode += "G28 XY; home X and Y\n\n"
#if self.options.function == 'Curve':
data = self.effect_curve(selected)
if data:
gcode += data
if (self.options.double_sided_cutting):
gcode += "\n\n;(MSG,Please flip over material)\n\n"
# Include a tool change operation
gcode += self.tool_change()
logger.write("*** processing mirror image")
self.options.Yscale *= -1
self.flipArcs = not(self.flipArcs)
#self.options.generate_not_parametric_code = True
self.pageHeight = 0
gcode += self.effect_curve(selected)
try:
f = open(self.options.directory+'/'+self.options.file, "w")
f.write(gcode + self.footer)
f.close()
except:
inkex.errormsg(("Can not write to specified file!"))
return
if (self.skipped > 0):
inkex.errormsg(("Warning: skipped %d object(s) because they were not paths (Vectors) or images (Raster). Please convert them to paths using the menu 'Path->Object To Path'" % self.skipped))
e = Gcode_tools()
e.affect()
inkex.errormsg("Finished processing.") | quillford/laser-gcode-exporter-inkscape-plugin | turnkeylaser.py | Python | gpl-2.0 | 66,956 | [
"VisIt"
] | 049f459186c8d9409d97976abe6d288f6d65c732bb329d821299517b41079297 |
'''Constant pressure/stress and temperature dynamics.
Combined Nose-Hoover and Parrinello-Rahman dynamics, creating an NPT
(or N,stress,T) ensemble.
The method is the one proposed by Melchionna et al. [1] and later
modified by Melchionna [2]. The differential equations are integrated
using a centered difference method [3].
1. S. Melchionna, G. Ciccotti and B. L. Holian, "Hoover NPT dynamics
for systems varying in shape and size", Molecular Physics 78, p. 533
(1993).
2. S. Melchionna, "Constrained systems and statistical distribution",
Physical Review E, 61, p. 6165 (2000).
3. B. L. Holian, A. J. De Groot, W. G. Hoover, and C. G. Hoover,
"Time-reversible equilibrium and nonequilibrium isothermal-isobaric
simulations with centered-difference Stoermer algorithms.", Physical
Review A, 41, p. 4552 (1990).
'''
__docformat__ = 'reStructuredText'
from numpy import *
import sys
import time
import weakref
from ase.md import MolecularDynamics
#from ASE.Trajectories.NetCDFTrajectory import NetCDFTrajectory
# Delayed imports: If the trajectory object is reading a special ASAP version
# of HooverNPT, that class is imported from Asap.Dynamics.NPTDynamics.
class NPT(MolecularDynamics):
'''Constant pressure/stress and temperature dynamics.
Combined Nose-Hoover and Parrinello-Rahman dynamics, creating an
NPT (or N,stress,T) ensemble.
The method is the one proposed by Melchionna et al. [1] and later
modified by Melchionna [2]. The differential equations are integrated
using a centered difference method [3]. See also NPTdynamics.tex
The dynamics object is called with the following parameters:
atoms
The list of atoms.
dt
The timestep in units matching eV, A, u.
temperature
The desired temperature in eV.
externalstress
The external stress in eV/A^3. Either a symmetric
3x3 tensor, a 6-vector representing the same, or a
scalar representing the pressure. Note that the
stress is positive in tension whereas the pressure is
positive in compression: giving a scalar p is
equivalent to giving the tensor (-p, -p, -p, 0, 0, 0).
ttime
Characteristic timescale of the thermostat.
Set to None to disable the thermostat.
pfactor
A constant in the barostat differential equation. If
a characteristic barostat timescale of ptime is
desired, set pfactor to ptime^2 * B (where B is the
Bulk Modulus). Set to None to disable the barostat.
Typical metallic bulk moduli are of the order of
100 GPa or 0.6 eV/A^3.
mask=None
Optional argument. A tuple of three integers (0 or 1),
indicating if the system can change size along the
three Cartesian axes. Set to (1,1,1) or None to allow
a fully flexible computational box. Set to (1,1,0)
to disallow elongations along the z-axis etc.
Useful parameter values:
* The same timestep can be used as in Verlet dynamics, i.e. 5 fs is fine
for bulk copper.
* The ttime and pfactor are quite critical[4], too small values may
cause instabilites and/or wrong fluctuations in T / p. Too
large values cause an oscillation which is slow to die. Good
values for the characteristic times seem to be 25 fs for ttime,
and 75 fs for ptime (used to calculate pfactor), at least for
bulk copper with 15000-200000 atoms. But this is not well
tested, it is IMPORTANT to monitor the temperature and
stress/pressure fluctuations.
It has the following methods:
__call__(n)
Perform n timesteps.
initialize()
Estimates the dynamic variables for time=-1 to start
the algorithm. This is automatically called before
the first timestep.
set_stress()
Set the external stress. Use with care. It is
preferable to set the right value when creating the
object.
set_mask()
Change the mask. Use with care, as you may "freeze"
a fluctuation in the strain rate.
get_gibbs_free_energy()
Gibbs free energy is supposed to be preserved by this
dynamics. This is mainly intended as a diagnostic
tool.
References:
1) S. Melchionna, G. Ciccotti and B. L. Holian, Molecular
Physics 78, p. 533 (1993).
2) S. Melchionna, Physical
Review E 61, p. 6165 (2000).
3) B. L. Holian, A. J. De Groot, W. G. Hoover, and C. G. Hoover,
Physical Review A 41, p. 4552 (1990).
4) F. D. Di Tolla and M. Ronchetti, Physical
Review E 48, p. 1726 (1993).
'''
classname = "NPT" # Used by the trajectory.
def __init__(self, atoms,
timestep, temperature, externalstress, ttime, pfactor,
mask=None, trajectory=None):
MolecularDynamics.__init__(self, atoms, timestep, trajectory)
#self.atoms = atoms
#self.timestep = timestep
self.zero_center_of_mass_momentum(verbose=1)
self.temperature = temperature
self.set_stress(externalstress)
self.set_mask(mask)
self.eta = zeros((3,3), float)
self.zeta = 0.0
self.zeta_integrated = 0.0
self.initialized = 0
self.ttime = ttime
self.pfactor_given = pfactor
self._calculateconstants()
self.timeelapsed = 0.0
self.frac_traceless = 1
def set_temperature(self, temperature):
self.temperature = temperature
self._calculateconstants()
def set_stress(self, stress):
"""Set the applied stress.
Must be a symmetric 3x3 tensor, a 6-vector representing a symmetric
3x3 tensor, or a number representing the pressure.
"""
if type(stress) == type(1.0) or type(stress) == type(1):
stress = array((-stress, -stress, -stress, 0.0, 0.0, 0.0))
elif stress.shape == (3,3):
if not self._issymmetric(stress):
raise ValueError, "The external stress must be a symmetric tensor."
stress = array((stress[0,0], stress[1,1], stress[2,2], stress[1,2],
stress[0,2], stress[0,1]))
elif stress.shape != (6,):
raise ValueError, "The external stress has the wrong shape."
self.externalstress = stress
def set_mask(self, mask):
"""Set the mask indicating dynamic elements of the computational box.
If set to None, all elements may change. If set to a 3-vector
of ones and zeros, elements which are zero specify directions
along which the size of the computational box cannot change.
For example, if mask = {1,1,0} the length of the system along
the z-axis cannot change, although xz and yz shear is still
possible. To disable shear globally, set the mode to diagonal
(not yet implemented).
"""
if mask is None:
mask = ones((3,))
if not hasattr(mask, "shape"):
mask = array(mask)
if mask.shape != (3,) and mask.shape != (3,3):
raise "The mask has the wrong shape (must be a 3-vector or 3x3 matrix)"
else:
mask = not_equal(mask, 0) # Make sure it is 0/1
if mask.shape == (3,):
self.mask = outer(mask, mask)
else:
self.mask = mask
def set_fraction_traceless(self, fracTraceless):
"""set what fraction of the traceless part of the force
on eta is kept.
By setting this to zero, the volume may change but the shape may not.
"""
self.frac_traceless = fracTraceless
def get_strain_rate(self):
"Get the strain rate as an upper-triangular 3x3 matrix"
return array(self.eta, copy=1)
def set_strain_rate(self, rate):
"Set the strain rate. Must be an upper triangular 3x3 matrix."
if not (rate.shape == (3,3) and self._isuppertriangular(rate)):
raise ValueError, "Strain rate must be an upper triangular matrix."
self.eta = rate
if self.initialized:
# Recalculate h_past and eta_past so they match the current value.
self._initialize_eta_h()
def get_time(self):
"Get the elapsed time."
return self.timeelapsed
def run(self, steps):
"""Perform a number of time steps."""
if not self.initialized:
self.initialize()
else:
if self.have_the_atoms_been_changed():
raise NotImplementedError, "You have modified the atoms since the last timestep."
for i in xrange(steps):
self.step()
self.nsteps += 1
self.call_observers()
def have_the_atoms_been_changed(self):
"Checks if the user has modified the positions or momenta of the atoms"
limit = 1e-10
h = self._getbox()
if max(abs((h - self.h).ravel())) > limit:
self._warning("The computational box has been modified.")
return 1
expected_r = dot(self.q + 0.5, h)
err = max(abs((expected_r - self.atoms.get_positions()).ravel()))
if err > limit:
self._warning("The atomic positions have been modified: "+ str(err))
return 1
return 0
def step(self):
"""Perform a single time step.
Assumes that the forces and stresses are up to date, and that
the positions and momenta have not been changed since last
timestep.
"""
## Assumes the following variables are OK
# q_past, q, q_future, p, eta, eta_past, zeta, zeta_past, h, h_past
#
# q corresponds to the current positions
# p must be equal to self.atoms.GetCartesianMomenta()
# h must be equal to self.atoms.GetUnitCell()
#
#print "Making a timestep"
dt = self.dt
h_future = self.h_past + 2*dt * dot(self.h, self.eta)
deltaeta = -2*dt * (self.pfact * linalg.det(self.h)
* (self.atoms.get_stress() - self.externalstress))
if self.frac_traceless == 1:
eta_future = self.eta_past + self.mask * self._makeuppertriangular(deltaeta)
else:
trace_part, traceless_part = self._separatetrace(self._makeuppertriangular(deltaeta))
eta_future = self.eta_past + trace_part + self.frac_traceless * traceless_part
deltazeta = 2*dt*self.tfact * (self.atoms.get_kinetic_energy()
- self.desiredEkin)
zeta_future = self.zeta_past + deltazeta
# Advance time
#print "Max change in scaled positions:", max(abs(self.q_future.flat - self.q.flat))
#print "Max change in basis set", max(abs((h_future - self.h).flat))
self.timeelapsed += dt
self.h_past = self.h
self.h = h_future
self.inv_h = linalg.inv(self.h)
# Do not throw away the q arrays, they are "magical" on parallel
# simulations (the contents migrate along with the atoms).
(self.q_past, self.q, self.q_future) = (self.q, self.q_future,
self.q_past)
self._setbox_and_positions(self.h,self.q)
self.eta_past = self.eta
self.eta = eta_future
self.zeta_past = self.zeta
self.zeta = zeta_future
self._synchronize() # for parallel simulations.
self.zeta_integrated += dt * self.zeta
#self.forcecalculator()
force = self.atoms.get_forces()
# The periodic boundary conditions may have moved the atoms.
self.post_pbc_fix(fixfuture=0)
self._calculate_q_future(force)
self.atoms.set_momenta(dot(self.q_future-self.q_past, self.h/(2*dt)) *
self._getmasses())
#self.stresscalculator()
def initialize(self):
"""Initialize the dynamics.
The dynamics requires positions etc for the two last times to
do a timestep, so the algorithm is not self-starting. This
method performs a 'backwards' timestep to generate a
configuration before the current.
"""
#print "Initializing the NPT dynamics."
dt = self.dt
atoms = self.atoms
self.h = self._getbox()
if not self._isuppertriangular(self.h):
print "I am", self
print "self.h:"
print self.h
print "Min:", min((self.h[1,0], self.h[2,0], self.h[2,1]))
print "Max:", max((self.h[1,0], self.h[2,0], self.h[2,1]))
raise NotImplementedError, "Can (so far) only operate on lists of atoms where the computational box is an upper triangular matrix."
self.inv_h = linalg.inv(self.h)
# The contents of the q arrays should migrate in parallel simulations.
self._make_special_q_arrays()
self.q[:] = dot(self.atoms.get_positions(),
self.inv_h) - 0.5
# zeta and eta were set in __init__
self._initialize_eta_h()
deltazeta = dt * self.tfact * (atoms.get_kinetic_energy() -
self.desiredEkin)
self.zeta_past = self.zeta - deltazeta
self._calculate_q_past_and_future()
self.initialized = 1
def get_gibbs_free_energy(self):
"""Return the Gibb's free energy, which is supposed to be conserved.
Requires that the energies of the atoms are up to date.
This is mainly intended as a diagnostic tool. If called before the
first timestep, Initialize will be called.
"""
if not self.initialized:
self.Initialize()
n = self._getnatoms()
#tretaTeta = sum(diagonal(matrixmultiply(transpose(self.eta),
# self.eta)))
contractedeta = sum((self.eta*self.eta).ravel())
gibbs = (self.atoms.get_potential_energy() +
self.atoms.get_kinetic_energy()
- sum(self.externalstress[0:3]) * linalg.det(self.h) / 3.0)
if self.ttime is not None:
gibbs += (1.5 * n * self.temperature * (self.ttime * self.zeta)**2
+ 3 * self.temperature * (n-1) * self.zeta_integrated)
else:
assert self.zeta == 0.0
if self.pfactor_given is not None:
gibbs += 0.5 / self.pfact * contractedeta
else:
assert contractedeta == 0.0
return gibbs
def get_center_of_mass_momentum(self):
"Get the center of mass momentum."
return self.atoms.get_momenta().sum(0)
def zero_center_of_mass_momentum(self, verbose=0):
"Set the center of mass momentum to zero."
cm = self.get_center_of_mass_momentum()
abscm = sqrt(sum(cm*cm))
if verbose and abscm > 1e-4:
self._warning(self.classname+": Setting the center-of-mass momentum to zero (was %.6g %.6g %.6g)" % tuple(cm))
self.atoms.set_momenta(self.atoms.get_momenta()
- cm / self._getnatoms())
def post_pbc_fix(self, fixfuture=1):
"""Correct for atoms moved by the boundary conditions.
If the fixfuture argument is 1 (the default), q_future is also
corrected. This is not necessary when post_pbc_fix() is called from
within Timestep(), but must be done when the user calls post_pbc_fix
(for example if a CNA calculation may have triggered a migration).
"""
q = dot(self.atoms.get_positions(),
self.inv_h) - 0.5
delta_q = floor(0.5 + (q - self.q))
self.q += delta_q
self.q_past += delta_q
if fixfuture:
self.q_future += delta_q
def attach_atoms(self, atoms):
"""Assign atoms to a restored dynamics object.
This function must be called to set the atoms immediately after the
dynamics object has been read from a trajectory.
"""
try:
self.atoms
except AttributeError:
pass
else:
raise RuntimeError, "Cannot call attach_atoms on a dynamics which already has atoms."
MolecularDynamics.__init__(self, atoms, self.dt)
####self.atoms = atoms
limit = 1e-6
h = self._getbox()
if max(abs((h - self.h).ravel())) > limit:
raise RuntimeError, "The unit cell of the atoms does not match the unit cell stored in the file."
self.inv_h = linalg.inv(self.h)
self._make_special_q_arrays()
self.q[:] = dot(self.atoms.get_positions(),
self.inv_h) - 0.5
self._calculate_q_past_and_future()
self.initialized = 1
def _getbox(self):
"Get the computational box."
return self.atoms.get_cell()
def _getmasses(self):
"Get the masses as an Nx1 array."
return reshape(self.atoms.get_masses(), (-1,1))
# def _getcartesianpositions(self):
# "Get the cartesian positions of the atoms"
# return self.atoms.get_positions()
# def _getmomenta(self):
# "Get the (cartesian) momenta of the atoms"
# return self.atoms.GetCartesianMomenta()
# def _getforces(self):
# "Get the (cartesian) forces of the atoms"
# return self.atoms.GetCartesianForces()
# def _setmomenta(self, momenta):
# "Set the (cartesian) momenta of the atoms"
# self.atoms.SetCartesianMomenta(momenta)
def _separatetrace(self, mat):
"""return two matrices, one proportional to the identity
the other traceless, which sum to the given matrix
"""
tracePart = ((mat[0][0] + mat[1][1] + mat[2][2]) / 3.) * identity(3)
return tracePart, mat - tracePart
# A number of convenient helper methods
def _warning(self, text):
"Emit a warning."
sys.stderr.write("WARNING: "+text+"\n")
sys.stderr.flush()
def _calculate_q_future(self, force):
"Calculate future q. Needed in Timestep and Initialization."
dt = self.dt
id3 = identity(3)
alpha = (dt * dt) * dot(force / self._getmasses(),
self.inv_h)
beta = dt * dot(self.h, dot(self.eta + 0.5 * self.zeta * id3,
self.inv_h))
inv_b = linalg.inv(beta + id3)
self.q_future[:] = dot(2*self.q + dot(self.q_past, beta - id3) + alpha,
inv_b)
def _calculate_q_past_and_future(self):
def ekin(p, m = self.atoms.get_masses()):
p2 = sum(p*p, -1)
return 0.5 * sum(p2 / m) / len(m)
p0 = self.atoms.get_momenta()
m = self._getmasses()
e0 = ekin(p0)
p = array(p0, copy=1)
dt = self.dt
for i in range(2):
self.q_past[:] = self.q - dt * dot(p / m, self.inv_h)
self._calculate_q_future(self.atoms.get_forces())
p = dot(self.q_future - self.q_past, self.h/(2*dt)) * m
e = ekin(p)
if e < 1e-5:
# The kinetic energy and momenta are virtually zero
return
p = (p0 - p) + p0
def _initialize_eta_h(self):
self.h_past = self.h - self.dt * dot(self.h, self.eta)
deltaeta = (-self.dt * self.pfact * linalg.det(self.h)
* (self.atoms.get_stress() - self.externalstress))
if self.frac_traceless == 1:
self.eta_past = self.eta - self.mask * self._makeuppertriangular(deltaeta)
else:
trace_part, traceless_part = self._separatetrace(self._makeuppertriangular(deltaeta))
self.eta_past = self.eta - trace_part - self.frac_traceless * traceless_part
def _makeuppertriangular(self, sixvector):
"Make an upper triangular matrix from a 6-vector."
return array(((sixvector[0], sixvector[5], sixvector[4]),
(0, sixvector[1], sixvector[3]),
(0, 0, sixvector[2])))
def _isuppertriangular(self, m):
"Check that a matrix is on upper triangular form."
return m[1,0] == m[2,0] == m[2,1] == 0.0
def _calculateconstants(self):
"(Re)calculate some constants when pfactor, ttime or temperature have been changed."
n = self._getnatoms()
if self.ttime is None:
self.tfact = 0.0
else:
self.tfact = 2.0 / (3 * n * self.temperature *
self.ttime * self.ttime)
if self.pfactor_given is None:
self.pfact = 0.0
else:
self.pfact = 1.0 / (self.pfactor_given
* linalg.det(self._getbox()))
#self.pfact = 1.0/(n * self.temperature * self.ptime * self.ptime)
self.desiredEkin = 1.5 * (n - 1) * self.temperature
def _setbox_and_positions(self, h, q):
"""Set the computational box and the positions."""
self.atoms.set_cell(h, scale_atoms=True)
r = dot(q + 0.5, h)
self.atoms.set_positions(r)
# A few helper methods, which have been placed in separate methods
# so they can be replaced in the parallel version.
def _synchronize(self):
"""Synchronizes eta, h and zeta on all processors in a parallel simulation.
In a parallel simulation, eta, h and zeta are communicated
from the master to all slaves, to prevent numerical noise from
causing them to diverge.
In a serial simulation, do nothing.
"""
pass # This is a serial simulation object. Do nothing.
def _getnatoms(self):
"""Get the number of atoms.
In a parallel simulation, this is the total number of atoms on all
processors.
"""
return len(self.atoms)
def _make_special_q_arrays(self):
"""Make the arrays used to store data about the atoms.
In a parallel simulation, these are migrating arrays. In a
serial simulation they are ordinary Numeric arrays.
"""
natoms = len(self.atoms)
self.q = zeros((natoms,3), float)
self.q_past = zeros((natoms,3), float)
self.q_future = zeros((natoms,3), float)
# class _HooverNPTTrajectory:
# """A Trajectory-like object storing data in a HooverNPT object."""
# def InitForWrite(self):
# """Does initialization related to write mode."""
# self.CreateDimension('unlim', None)
# self.nc.history = 'ASE NPT trajectory'
# self.nc.version = '0.1'
# self.nc.classname = self.atoms.classname
# self.unlim = 0
# self.nc.lengthunit = units.GetLengthUnit()
# self.nc.energyunit = units.GetEnergyUnit()
# self.conversion = (1, 1)
# def InitForWriteOrAppend(self):
# """Does initialization related to write and append mode.
# Either InitForWrite or InitForReadOrAppend will have been
# called before calling this method.
# """
# names = copy.copy(self.known_names)
# if self.atoms.ttime is None:
# del names['ttime']
# if self.atoms.pfactor_given is None:
# del names['pfactor_given']
# for d in names.keys():
# def getdata(atoms=self.atoms, name=d):
# return getattr(atoms, name)
# self.Add(d, data = getdata)
# known_names = {
# # name shape typecode once units
# # ----------------------------------------------------------------
# 'dt': ((), Float, True, (1, -0.5)),
# 'temperature': ((), Float, True, (0, 1)),
# 'desiredEkin': ((), Float, True, (0, 1)),
# 'externalstress': ((6,), Float, True, (-3, 1)),
# 'mask': ((3, 3), Float, True, (0, 0)),
# 'ttime': ((), Float, True, (1, -0.5)),
# 'tfact': ((), Float, True, (-2, 0)),
# 'pfactor_given': ((), Float, True, (-1, 0)),
# 'pfact': ((), Float, True, (-2, 0)),
# 'frac_traceless': ((), Float, True, (0, 0)),
# 'eta': ((3, 3), Float, False, (-1, 0.5)),
# 'eta_past': ((3, 3), Float, False, (-1, 0.5)),
# 'zeta': ((), Float, False, (-1, 0.5)),
# 'zeta_past': ((), Float, False, (-1, 0.5)),
# 'zeta_integrated': ((), Float, False, (0, 0)),
# 'h': ((3, 3), Float, False, (1, 0)),
# 'h_past': ((3, 3), Float, False, (1, 0)),
# 'timeelapsed': ((), Float, False, (1, -0.5))
# }
# # This trajectory does not store a list of atoms
# def GetListOfAtoms(self, frame=None):
# raise AttributeError, "GetListOfAtoms makes no sense in a HooverNPTTrajectory"
# # Instead, we store a dynamics
# def GetDynamics(self, frame=None):
# """Get a HooverNPT Dynamics object.
# If a frame number is not given, the current frame is used.
# The variant of the object (ASE HooverNPT, ASAP Serial/Parallel NPT)
# will be the same as the stored object.
# After getting the dynamics, the atoms should be attached with the
# dynamics.attach_atoms(atoms) method.
# """
# # Bypass calling the normal constructor
# class Dummy:
# pass
# dyn = Dummy()
# dyn.__class__ = self.getClass(self.nc.classname)
# vars = self.nc.variables
# for q in self.known_names.keys():
# if vars.has_key(q):
# once = self.known_names[q][2]
# if once:
# setattr(dyn, q, vars[q].getValue())
# else:
# setattr(dyn, q, vars[q][frame])
# return dyn
# def getClass(self, classname):
# "Internal function: turns a class name into a class object."
# if self.nc.classname == "HooverNPT":
# return HooverNPT
# else:
# raise RuntimeError, ("Cannot create a dynamics of type "
# + self.nc.classname)
# class HooverNPTTrajectory(_HooverNPTTrajectory,NetCDFTrajectory):
# """A Trajectory-like object storing data in a HooverNPT object."""
# def __init__(self, filename, dynamics=None, mode=None, interval=1):
# """Open the NetCDF file.
# If there is no ``dynamics`` argument, then the file is opened
# in read mode - otherwise, write or append mode is used. The
# ``interval`` argument determines how often the configurations
# are written to file."""
# # Call the original constructor, but passing the dynamics instead of
# # the atoms.
# if dynamics is not None:
# # Prevents a circular reference when the trajectory is attached
# # to the dynamics it observes.
# dynamics = weakref.proxy(dynamics)
# NetCDFTrajectory.__init__(self, filename,
# atoms=dynamics,
# mode=mode, interval=interval)
| freephys/python_ase | ase/md/npt.py | Python | gpl-3.0 | 27,683 | [
"ASE",
"NetCDF"
] | d743dafe6c23e2b8a311a1b33ae0bc43d014ee5e5006fa63662b6e474d4a483d |
"""
Test helper functions and base classes.
"""
import inspect
import json
import unittest
import functools
import operator
import pprint
import requests
import os
import urlparse
from contextlib import contextmanager
from datetime import datetime
from path import Path as path
from bok_choy.javascript import js_defined
from bok_choy.web_app_test import WebAppTest
from bok_choy.promise import EmptyPromise, Promise
from bok_choy.page_object import XSS_INJECTION
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from common.test.acceptance.pages.studio.auto_auth import AutoAuthPage
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from opaque_keys.edx.locator import CourseLocator
from pymongo import MongoClient, ASCENDING
from openedx.core.lib.tests.assertions.events import assert_event_matches, is_matching_event, EventMatchTolerates
from xmodule.partitions.partitions import UserPartition
from xmodule.partitions.tests.test_partitions import MockUserPartitionScheme
from selenium.common.exceptions import StaleElementReferenceException
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from unittest import TestCase
from common.test.acceptance.pages.common import BASE_URL
MAX_EVENTS_IN_FAILURE_OUTPUT = 20
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'https://www.youtube.com/iframe_api',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def is_focused_on_element(browser, selector):
"""
Check if the focus is on the element that matches the selector.
"""
return browser.execute_script("return $('{}').is(':focus')".format(selector))
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path
with open(full_path) as data_file:
return data_file.read()
def remove_file(filename):
"""
Remove a file if it exists
"""
if os.path.exists(filename):
os.remove(filename)
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
@js_defined('window.jQuery')
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
@js_defined('window.jQuery')
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
def select_option_by_text(select_browser_query, option_text):
"""
Chooses an option within a select by text (helper method for Select's select_by_visible_text method).
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def select_option(query, value):
""" Get the first select element that matches the query and select the desired value. """
try:
select = Select(query.first.results[0])
select.select_by_visible_text(value)
return True
except StaleElementReferenceException:
return False
msg = 'Selected option {}'.format(option_text)
EmptyPromise(lambda: select_option(select_browser_query, option_text), msg).fulfill()
def get_selected_option_text(select_browser_query):
"""
Returns the text value for the first selected option within a select.
Wrap this in a Promise to prevent a StaleElementReferenceException
from being raised while the DOM is still being rewritten
"""
def get_option(query):
""" Get the first select element that matches the query and return its value. """
try:
select = Select(query.first.results[0])
return (True, select.first_selected_option.text)
except StaleElementReferenceException:
return (False, None)
text = Promise(lambda: get_option(select_browser_query), 'Retrieved selected option text').fulfill()
return text
def get_options(select_browser_query):
"""
Returns all the options for the given select.
"""
return Select(select_browser_query.first.results[0]).options
def generate_course_key(org, number, run):
"""
Makes a CourseLocator from org, number and run
"""
default_store = os.environ.get('DEFAULT_STORE', 'draft')
return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
def select_option_by_value(browser_query, value):
"""
Selects a html select element by matching value attribute
"""
select = Select(browser_query.first.results[0])
select.select_by_value(value)
def options_selected():
"""
Returns True if all options in select element where value attribute
matches `value`. if any option is not selected then returns False
and select it. if value is not an option choice then it returns False.
"""
all_options_selected = True
has_option = False
for opt in select.options:
if opt.get_attribute('value') == value:
has_option = True
if not opt.is_selected():
all_options_selected = False
opt.click()
# if value is not an option choice then it should return false
if all_options_selected and not has_option:
all_options_selected = False
return all_options_selected
# Make sure specified option is actually selected
EmptyPromise(options_selected, "Option is selected").fulfill()
def is_option_value_selected(browser_query, value):
"""
return true if given value is selected in html select element, else return false.
"""
select = Select(browser_query.first.results[0])
ddl_selected_value = select.first_selected_option.get_attribute('value')
return ddl_selected_value == value
def element_has_text(page, css_selector, text):
"""
Return true if the given text is present in the list.
"""
text_present = False
text_list = page.q(css=css_selector).text
if len(text_list) > 0 and (text in text_list):
text_present = True
return text_present
def get_modal_alert(browser):
"""
Returns instance of modal alert box shown in browser after waiting
for 6 seconds
"""
WebDriverWait(browser, 6).until(EC.alert_is_present())
return browser.switch_to.alert
def get_element_padding(page, selector):
"""
Get Padding of the element with given selector,
:returns a dict object with the following keys.
1 - padding-top
2 - padding-right
3 - padding-bottom
4 - padding-left
Example Use:
progress_page.get_element_padding('.wrapper-msg.wrapper-auto-cert')
"""
js_script = """
var $element = $('%(selector)s');
element_padding = {
'padding-top': $element.css('padding-top').replace("px", ""),
'padding-right': $element.css('padding-right').replace("px", ""),
'padding-bottom': $element.css('padding-bottom').replace("px", ""),
'padding-left': $element.css('padding-left').replace("px", "")
};
return element_padding;
""" % {'selector': selector}
return page.browser.execute_script(js_script)
def is_404_page(browser):
""" Check if page is 404 """
return 'Page not found (404)' in browser.find_element_by_tag_name('h1').text
def create_multiple_choice_xml(correct_choice=2, num_choices=4):
"""
Return the Multiple Choice Problem XML, given the name of the problem.
"""
# all choices are incorrect except for correct_choice
choices = [False for _ in range(num_choices)]
choices[correct_choice] = True
choice_names = ['choice_{}'.format(index) for index in range(num_choices)]
question_text = 'The correct answer is Choice {}'.format(correct_choice)
return MultipleChoiceResponseXMLFactory().build_xml(
question_text=question_text,
choices=choices,
choice_names=choice_names,
)
def create_multiple_choice_problem(problem_name):
"""
Return the Multiple Choice Problem Descriptor, given the name of the problem.
"""
xml_data = create_multiple_choice_xml()
return XBlockFixtureDesc(
'problem',
problem_name,
data=xml_data,
metadata={'rerandomize': 'always'}
)
def auto_auth(browser, username, email, staff, course_id):
"""
Logout and login with given credentials.
"""
AutoAuthPage(browser, username=username, email=email, course_id=course_id, staff=staff).visit()
def assert_link(test, expected_link, actual_link):
"""
Assert that 'href' and text inside help DOM element are correct.
Arguments:
test: Test on which links are being tested.
expected_link (dict): The expected link attributes.
actual_link (dict): The actual link attribute on page.
"""
test.assertEqual(expected_link['href'], actual_link.get_attribute('href'))
test.assertEqual(expected_link['text'], actual_link.text)
def assert_opened_help_link_is_correct(test, url):
"""
Asserts that url of browser when help link is clicked is correct.
Arguments:
test (AcceptanceTest): test calling this method.
url (str): url to verify.
"""
test.browser.switch_to_window(test.browser.window_handles[-1])
# Assert that url in the browser is the same.
test.assertEqual(url, test.browser.current_url)
test.assertNotIn('Maze Found', test.browser.title)
class EventsTestMixin(TestCase):
"""
Helpers and setup for running tests that evaluate events emitted
"""
def setUp(self):
super(EventsTestMixin, self).setUp()
self.event_collection = MongoClient()["test"]["events"]
self.start_time = datetime.now()
def reset_event_tracking(self):
"""Drop any events that have been collected thus far and start collecting again from scratch."""
self.event_collection.drop()
self.start_time = datetime.now()
@contextmanager
def capture_events(self, event_filter=None, number_of_matches=1, captured_events=None):
"""
Context manager that captures all events emitted while executing a particular block.
All captured events are stored in the list referenced by `captured_events`. Note that this list is appended to
*in place*. The events will be appended to the list in the order they are emitted.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` tells this context manager when enough events have been found and it can move on. The
context manager will not exit until this many events have passed the filter. If not enough events are found
before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that
*at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of
`captured_events`.
"""
start_time = datetime.utcnow()
yield
events = self.wait_for_events(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches)
if captured_events is not None and hasattr(captured_events, 'append') and callable(captured_events.append):
for event in events:
captured_events.append(event)
@contextmanager
def assert_events_match_during(self, event_filter=None, expected_events=None, in_order=True):
"""
Context manager that ensures that events matching the `event_filter` and `expected_events` are emitted.
This context manager will filter out the event stream using the `event_filter` and wait for
`len(expected_events)` to match the filter.
It will then compare the events in order with their counterpart in `expected_events` to ensure they match the
more detailed assertion.
Typically `event_filter` will be an `event_type` filter and the `expected_events` list will contain more
detailed assertions.
"""
captured_events = []
with self.capture_events(event_filter, len(expected_events), captured_events):
yield
self.assert_events_match(expected_events, captured_events, in_order=in_order)
def wait_for_events(self, start_time=None, event_filter=None, number_of_matches=1, timeout=None):
"""
Wait for `number_of_matches` events to pass the `event_filter`.
By default, this will look at all events that have been emitted since the beginning of the setup of this mixin.
A custom `start_time` can be specified which will limit the events searched to only those emitted after that
time.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` lets us know when enough events have been found and it can move on. The function will not
return until this many events have passed the filter. If not enough events are found before a timeout expires,
then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have
been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`.
Specifying a custom `timeout` can allow you to extend the default 30 second timeout if necessary.
"""
if start_time is None:
start_time = self.start_time
if timeout is None:
timeout = 30
def check_for_matching_events():
"""Gather any events that have been emitted since `start_time`"""
return self.matching_events_were_emitted(
start_time=start_time,
event_filter=event_filter,
number_of_matches=number_of_matches
)
return Promise(
check_for_matching_events,
# This is a bit of a hack, Promise calls str(description), so I set the description to an object with a
# custom __str__ and have it do some intelligent stuff to generate a helpful error message.
CollectedEventsDescription(
'Waiting for {number_of_matches} events to match the filter:\n{event_filter}'.format(
number_of_matches=number_of_matches,
event_filter=self.event_filter_to_descriptive_string(event_filter),
),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
),
timeout=timeout
).fulfill()
def matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Return True if enough events have been emitted that pass the `event_filter` since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
return len(matching_events) >= number_of_matches, matching_events
def get_matching_events_from_time(self, start_time=None, event_filter=None):
"""
Return a list of events that pass the `event_filter` and were emitted after `start_time`.
This function is used internally by most of the other assertions and convenience methods in this class.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
"""
if start_time is None:
start_time = self.start_time
if isinstance(event_filter, dict):
event_filter = functools.partial(is_matching_event, event_filter)
elif not callable(event_filter):
raise ValueError(
'event_filter must either be a dict or a callable function with as single "event" parameter that '
'returns a boolean value.'
)
matching_events = []
cursor = self.event_collection.find(
{
"time": {
"$gte": start_time
}
}
).sort("time", ASCENDING)
for event in cursor:
matches = False
try:
# Mongo automatically assigns an _id to all events inserted into it. We strip it out here, since
# we don't care about it.
del event['_id']
if event_filter is not None:
# Typically we will be grabbing all events of a particular type, however, you can use arbitrary
# logic to identify the events that are of interest.
matches = event_filter(event)
except AssertionError:
# allow the filters to use "assert" to filter out events
continue
else:
if matches is None or matches:
matching_events.append(event)
return matching_events
def assert_matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Assert that at least `number_of_matches` events have passed the filter since `start_time`."""
description = CollectedEventsDescription(
'Not enough events match the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
)
self.assertTrue(
self.matching_events_were_emitted(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches
),
description
)
def assert_no_matching_events_were_emitted(self, event_filter, start_time=None):
"""Assert that no events have passed the filter since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
description = CollectedEventsDescription(
'Events unexpected matched the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
lambda: matching_events
)
self.assertEquals(len(matching_events), 0, description)
def assert_events_match(self, expected_events, actual_events, in_order=True):
"""Assert that each actual event matches one of the expected events.
Args:
expected_events (List): a list of dicts representing the expected events.
actual_events (List): a list of dicts that were actually recorded.
in_order (bool): if True then the events must be in the same order (defaults to True).
"""
if in_order:
for expected_event, actual_event in zip(expected_events, actual_events):
assert_event_matches(
expected_event,
actual_event,
tolerate=EventMatchTolerates.lenient()
)
else:
for expected_event in expected_events:
actual_event = next(event for event in actual_events if is_matching_event(expected_event, event))
assert_event_matches(
expected_event,
actual_event or {},
tolerate=EventMatchTolerates.lenient()
)
def relative_path_to_absolute_uri(self, relative_path):
"""Return an aboslute URI given a relative path taking into account the test context."""
return urlparse.urljoin(BASE_URL, relative_path)
def event_filter_to_descriptive_string(self, event_filter):
"""Find the source code of the callable or pretty-print the dictionary"""
message = ''
if callable(event_filter):
file_name = '(unknown)'
try:
file_name = inspect.getsourcefile(event_filter)
except TypeError:
pass
try:
list_of_source_lines, line_no = inspect.getsourcelines(event_filter)
except IOError:
pass
else:
message = '{file_name}:{line_no}\n{hr}\n{event_filter}\n{hr}'.format(
event_filter=''.join(list_of_source_lines).rstrip(),
file_name=file_name,
line_no=line_no,
hr='-' * 20,
)
if not message:
message = '{hr}\n{event_filter}\n{hr}'.format(
event_filter=pprint.pformat(event_filter),
hr='-' * 20,
)
return message
class CollectedEventsDescription(object):
"""
Produce a clear error message when tests fail.
This class calls the provided `get_events_func` when converted to a string, and pretty prints the returned events.
"""
def __init__(self, description, get_events_func):
self.description = description
self.get_events_func = get_events_func
def __str__(self):
message_lines = [
self.description,
'Events:'
]
events = self.get_events_func()
events.sort(key=operator.itemgetter('time'), reverse=True)
for event in events[:MAX_EVENTS_IN_FAILURE_OUTPUT]:
message_lines.append(pprint.pformat(event))
if len(events) > MAX_EVENTS_IN_FAILURE_OUTPUT:
message_lines.append(
'Too many events to display, the remaining events were omitted. Run locally to diagnose.')
return '\n\n'.join(message_lines)
class AcceptanceTest(WebAppTest):
"""
The base class of all acceptance tests.
"""
def __init__(self, *args, **kwargs):
super(AcceptanceTest, self).__init__(*args, **kwargs)
# Use long messages so that failures show actual and expected values
self.longMessage = True # pylint: disable=invalid-name
class UniqueCourseTest(AcceptanceTest):
"""
Test that provides a unique course ID.
"""
def __init__(self, *args, **kwargs):
"""
Create a unique course ID.
"""
super(UniqueCourseTest, self).__init__(*args, **kwargs)
def setUp(self):
super(UniqueCourseTest, self).setUp()
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + XSS_INJECTION + self.unique_id
}
@property
def course_id(self):
"""
Returns the serialized course_key for the test
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
deprecated=(default_store == 'draft')
)
return unicode(course_key)
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
class YouTubeStubConfig(object):
"""
Configure YouTube Stub Server.
"""
PORT = 9080
URL = 'http://127.0.0.1:{}/'.format(PORT)
@classmethod
def configure(cls, config):
"""
Allow callers to configure the stub server using the /set_config URL.
Arguments:
config (dict): Configuration dictionary.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
@classmethod
def reset(cls):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
@classmethod
def get_configuration(cls):
"""
Allow callers to get current stub server configuration.
Returns:
dict
"""
youtube_stub_config_url = cls.URL + 'get_config'
response = requests.get(youtube_stub_config_url)
if response.ok:
return json.loads(response.content)
else:
return {}
def create_user_partition_json(partition_id, name, description, groups, scheme="random"):
"""
Helper method to create user partition JSON. If scheme is not supplied, "random" is used.
"""
return UserPartition(
partition_id, name, description, groups, MockUserPartitionScheme(scheme)
).to_json()
def assert_nav_help_link(test, page, href, signed_in=True):
"""
Asserts that help link in navigation bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
signed_in (bool): Specifies whether user is logged in or not. (It effects the css)
"""
expected_link = {
'href': href,
'text': 'Help'
}
# Get actual anchor help element from the page.
actual_link = page.get_nav_help_element_and_click_help(signed_in)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
def assert_side_bar_help_link(test, page, href, help_text, as_list_item=False, index=-1):
"""
Asserts that help link in side bar is correct.
It first checks the url inside anchor DOM element and
then clicks to ensure that help opens correctly.
Arguments:
test (AcceptanceTest): Test object
page (PageObject): Page object to perform tests on.
href (str): The help link which we expect to see when it is opened.
as_list_item (bool): Specifies whether help element is in one of the
'li' inside a sidebar list DOM element.
index (int): The index of element in case there are more than
one matching elements.
"""
expected_link = {
'href': href,
'text': help_text
}
# Get actual anchor help element from the page.
actual_link = page.get_side_bar_help_element_and_click_help(as_list_item=as_list_item, index=index)
# Assert that 'href' and text are the same as expected.
assert_link(test, expected_link, actual_link)
# Assert that opened link is correct
assert_opened_help_link_is_correct(test, href)
class TestWithSearchIndexMixin(object):
""" Mixin encapsulating search index creation """
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def _create_search_index(self):
""" Creates search index backing file """
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
def _cleanup_index_file(self):
""" Removes search index backing file """
remove_file(self.TEST_INDEX_FILENAME)
| jzoldak/edx-platform | common/test/acceptance/tests/helpers.py | Python | agpl-3.0 | 32,321 | [
"VisIt"
] | 37484072f146334a0031af8a3b341528418347b7ddb5c5f889e7816f6c66b598 |
#!/usr/bin/env python
# This is a test that:
# - gets the (DIRAC-free) PilotWrapper.py (that should be in input)
# - use its functions to generate a pilot wrapper
# - starts it
#
# It should be executed for different versions of python, e.g.:
# - 2.6.x
# - 2.7.x (x < 9)
# - 2.7.x (x >= 9)
# - 3.6.x
#
#
# Invoke this with:
#
# python Test_GenerateAndExecutePilotWrapper.py url://to_PilotWrapper.py
# (and in this case it will download dirac-install.py from github)
# or
# python Test_GenerateAndExecutePilotWrapper.py url://to_PilotWrapper.py url://to_dirac-install.py
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
import os
import time
import base64
import bz2
# 1) gets the (DIRAC-free) PilotWrapper.py, and dirac-install.py
# urllib is different between python 2 and 3
if sys.version_info < (3,):
from urllib2 import urlopen as url_library_urlopen # pylint: disable=import-error
else:
from urllib.request import urlopen as url_library_urlopen # pylint: disable=import-error,no-name-in-module
if sys.version_info >= (2, 7, 9):
import ssl # pylint: disable=import-error
context = ssl._create_unverified_context()
rf = url_library_urlopen(sys.argv[1],
context=context)
try: # dirac-install.py location from the args, if provided
di = url_library_urlopen(sys.argv[2],
context=context)
except IndexError:
di_loc = 'https://raw.githubusercontent.com/DIRACGrid/management/master/dirac-install.py'
di = url_library_urlopen(di_loc,
context=context)
else:
rf = url_library_urlopen(sys.argv[1])
try: # dirac-install.py location from the args, if provided
di = url_library_urlopen(sys.argv[2])
except IndexError:
di_loc = 'https://raw.githubusercontent.com/DIRACGrid/management/master/dirac-install.py'
di = url_library_urlopen(di_loc)
with open('PilotWrapper.py', 'wb') as pj:
pj.write(rf.read())
pj.close() # for python 2.6
with open('dirac-install.py', 'wb') as pj:
pj.write(di.read())
pj.close() # for python 2.6
# 2) use its functions to generate a pilot wrapper
time.sleep(1)
# by now this will be in the local dir
from PilotWrapper import pilotWrapperScript # pylint: disable=import-error
diracInstall = os.path.join(os.getcwd(), 'dirac-install.py')
with open(diracInstall, "rb") as fd:
diracInstall = fd.read()
diracInstallEncoded = base64.b64encode(bz2.compress(diracInstall, 9)).decode()
res = pilotWrapperScript(
pilotFilesCompressedEncodedDict={'dirac-install.py': diracInstallEncoded},
pilotOptions="--setup=CI -N ce.dirac.org -Q DIRACQUEUE -n DIRAC.CI.ORG --debug",
location='diracproject.web.cern.ch/diracproject/tars/Pilot/DIRAC/master/,wrong.cern.ch')
with open('pilot-wrapper.sh', 'wb') as pj:
pj.write(res.encode())
# 3) now start it
ret = os.system("sh pilot-wrapper.sh")
if ret:
sys.exit(1)
| yujikato/DIRAC | tests/Integration/WorkloadManagementSystem/Test_GenerateAndExecutePilotWrapper.py | Python | gpl-3.0 | 2,960 | [
"DIRAC"
] | 100ab93bba0cd3c4afb879ed2f0115449476cfdcdc41d2276fc6dde21f107563 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Helper functions --- :mod:`MDAnalysis.lib.util`
====================================================
Small helper functions that don't fit anywhere else.
Files and directories
---------------------
.. autofunction:: filename
.. function:: openany(directory[,mode='r'])
Context manager to open a compressed (bzip2, gzip) or plain file
(uses :func:`anyopen`).
.. autofunction:: anyopen
.. autofunction:: greedy_splitext
.. autofunction:: which
.. autofunction:: realpath
.. autofunction:: guess_format
Streams
-------
Many of the readers are not restricted to just reading files. They can
also use gzip-compressed or bzip2-compressed files (through the
internal use of :func:`openany`). It is also possible to provide more
general streams as inputs, such as a :func:`cStringIO.StringIO`
instances (essentially, a memory buffer) by wrapping these instances
into a :class:`NamedStream`. This :class:`NamedStream` can then be
used in place of an ordinary file name (typically, with a
class:`~MDAnalysis.core.universe.Universe` but it is also possible to
*write* to such a stream using :func:`MDAnalysis.Writer`).
.. rubric: Examples
In the following example, we use a PDB stored as a string ``pdb_s``::
import MDAnalysis
from MDAnalysis.lib.util import NamedStream
import cStringIO
pdb_s = "TITLE Lonely Ion\\nATOM 1 NA NA+ 1 81.260 64.982 10.926 1.00 0.00\\n"
u = MDAnalysis.Universe(NamedStream(cStringIO.StringIO(pdb_s), "ion.pdb"))
print(u)
# <Universe with 1 atoms>
print(u.atoms.positions)
# [[ 81.26000214 64.98200226 10.92599964]]
It is important to provide a proper pseudo file name with the correct extension
(".pdb") to :class:`NamedStream` because the file type recognition uses the
extension of the file name to determine the file format or alternatively
provide the ``format="pdb"`` keyword argument to the
:class:`~MDAnalysis.core.universe.Universe`.
The use of streams becomes more interesting when MDAnalysis is used as glue
between different analysis packages and when one can arrange things so that
intermediate frames (typically in the PDB format) are not written to disk but
remain in memory via e.g. :mod:`cStringIO` buffers.
.. The following does *not* work because most readers need to
.. reopen files, which is not possible with http streams. Might
.. need to implement a buffer.
..
.. Read a test LAMMPS data file from the MDAnalysis repository::
..
.. import MDAnalysis
.. from MDAnalysis.lib.util import NamedStream
.. import urllib2
.. URI = "https://mdanalysis.googlecode.com/git-history/develop/testsuite/MDAnalysisTests/data/mini.data"
.. urldata = NamedStream(urllib2.urlopen(URI), "mini.data")
.. u = MDAnalysis.Universe(urldata)
.. Note:: A remote connection created by :func:`urllib2.urlopen` is not seekable
and therefore will often not work as an input. But try it...
.. autoclass:: NamedStream
:members:
.. autofunction:: isstream
Containers and lists
--------------------
.. autofunction:: iterable
.. autofunction:: asiterable
.. autofunction:: hasmethod
.. autoclass:: Namespace
File parsing
------------
.. autoclass:: FORTRANReader
:members:
.. autodata:: FORTRAN_format_regex
Data manipulation and handling
------------------------------
.. autofunction:: fixedwidth_bins
Strings
-------
.. autofunction:: convert_aa_code
.. autofunction:: parse_residue
.. autofunction:: conv_float
Class decorators
----------------
.. autofunction:: cached
.. Rubric:: Footnotes
.. [#NamedStreamClose] The reason why :meth:`NamedStream.close` does
not close a stream by default (but just rewinds it to the
beginning) is so that one can use the class :class:`NamedStream` as
a drop-in replacement for file names, which are often re-opened
(e.g. when the same file is used as a topology and coordinate file
or when repeatedly iterating through a trajectory in some
implementations). The ``close=True`` keyword can be supplied in
order to make :meth:`NamedStream.close` actually close the
underlying stream and ``NamedStream.close(force=True)`` will also
close it.
.. versionchanged:: 0.11.0
Moved mathematical functions into lib.mdamath
"""
from __future__ import division, absolute_import
import six
from six.moves import range, map
import sys
__docformat__ = "restructuredtext en"
import os
import os.path
import errno
from contextlib import contextmanager
import bz2
import gzip
import re
import io
import warnings
from functools import wraps
import mmtf
import numpy as np
import functools
from numpy.testing import assert_equal
from ..exceptions import StreamWarning
# Python 3.0, 3.1 do not have the builtin callable()
try:
callable(list)
except NameError:
# http://bugs.python.org/issue10518
import collections
def callable(obj):
return isinstance(obj, collections.Callable)
def filename(name, ext=None, keep=False):
"""Return a new name that has suffix attached; replaces other extensions.
:Arguments:
*name*
filename; extension is replaced unless keep=True;
*name* can also be a :class:`NamedStream` (and its
:attr:`NamedStream.name` will be changed accordingly)
*ext*
extension
*keep*
- ``False``: replace existing extension with *ext*;
- ``True``: keep old extension if one existed
.. versionchanged:: 0.9.0
Also permits :class:`NamedStream` to pass through.
"""
if ext is not None:
if not ext.startswith(os.path.extsep):
ext = os.path.extsep + ext
root, origext = os.path.splitext(name)
if not keep or len(origext) == 0:
newname = root + ext
if isstream(name):
name.name = newname
else:
name = newname
return name if isstream(name) else str(name)
@contextmanager
def openany(datasource, mode='r', reset=True):
"""Context manager for :func:`anyopen`.
Open the *datasource* and close it when the context of the :keyword:`with`
statement exits.
*datasource* can be a filename or a stream (see :func:`isstream`). A stream
is reset to its start if possible (via :meth:`~io.IOBase.seek` or
:meth:`~cString.StringIO.reset`).
The advantage of this function is that very different input sources
("streams") can be used for a "file", ranging from files on disk (including
compressed files) to open file objects to sockets and strings---as long as
they have a file-like interface.
:Arguments:
*datasource*
a file or a stream
*mode*
'r' or 'w'
*reset*
try to read (*mode* 'r') the stream from the start [``True``]
.. rubric:: Examples
Open a gzipped file and process it line by line::
with openany("input.pdb.gz") as pdb:
for line in pdb:
if line.startswith('ATOM'): print(line)
Open a URL and read it::
import urllib2
with openany(urllib2.urlopen("http://www.MDAnalysis.org/")) as html:
print(html.read())
.. SeeAlso:: :func:`anyopen`
"""
stream = anyopen(datasource, mode=mode, reset=reset)
try:
yield stream
finally:
stream.close()
# On python 3, we want to use bz2.open to open and uncompress bz2 files. That
# function allows to specify the type of the uncompressed file (bytes ot text).
# The function does not exist in python 2, thus we must use bz2.BZFile to
# which we cannot tell if the uncompressed file contains bytes or text.
# Therefore, on python 2 we use a proxy function that removes the type of the
# uncompressed file from the `mode` argument.
try:
bz2.open
except AttributeError:
# We are on python 2 and bz2.open is not available
def bz2_open(filename, mode):
"""Open and uncompress a BZ2 file"""
mode = mode.replace('t', '').replace('b', '')
return bz2.BZ2File(filename, mode)
else:
# We are on python 3 so we can use bz2.open
bz2_open = bz2.open
def anyopen(datasource, mode='r', reset=True):
"""Open datasource (gzipped, bzipped, uncompressed) and return a stream.
*datasource* can be a filename or a stream (see :func:`isstream`). By
default, a stream is reset to its start if possible (via
:meth:`~io.IOBase.seek` or :meth:`~cString.StringIO.reset`).
If possible, the attribute ``stream.name`` is set to the filename or
"<stream>" if no filename could be associated with the *datasource*.
:Arguments:
*datasource*
a file (from :class:`file` or :func:`open`) or a stream (e.g. from
:func:`urllib2.urlopen` or :class:`cStringIO.StringIO`)
*mode*
'r' or 'w' or 'a', more complicated modes ('r+', 'w+' are not supported because
only the first letter is looked at) [``'r'``]
*reset*
try to read (*mode* 'r') the stream from the start [``True``]
:Returns: tuple ``stream`` which is a file-like object
.. SeeAlso:: :func:`openany` to be used with the :keyword:`with` statement.
.. versionchanged:: 0.9.0
Only returns the ``stream`` and tries to set ``stream.name = filename`` instead of the previous
behavior to return a tuple ``(stream, filename)``.
"""
handlers = {'bz2': bz2_open, 'gz': gzip.open, '': open}
if mode.startswith('r'):
if isstream(datasource):
stream = datasource
try:
filename = str(stream.name) # maybe that does not always work?
except AttributeError:
filename = "<stream>"
if reset:
try:
stream.reset()
except (AttributeError, IOError):
try:
stream.seek(0)
except (AttributeError, IOError):
warnings.warn("Stream {0}: not guaranteed to be at the beginning."
"".format(filename),
category=StreamWarning)
else:
stream = None
filename = datasource
for ext in ('bz2', 'gz', ''): # file == '' should be last
openfunc = handlers[ext]
stream = _get_stream(datasource, openfunc, mode=mode)
if stream is not None:
break
if stream is None:
raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
elif mode.startswith('w') or mode.startswith('a'): # append 'a' not tested...
if isstream(datasource):
stream = datasource
try:
filename = str(stream.name) # maybe that does not always work?
except AttributeError:
filename = "<stream>"
else:
stream = None
filename = datasource
name, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if not ext in ('bz2', 'gz'):
ext = '' # anything else but bz2 or gz is just a normal file
openfunc = handlers[ext]
stream = openfunc(datasource, mode=mode)
if stream is None:
raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename))
else:
raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars()))
try:
stream.name = filename
except (AttributeError, TypeError):
pass # can't set name (e.g. cStringIO.StringIO)
return stream
def _get_stream(filename, openfunction=open, mode='r'):
"""Return open stream if *filename* can be opened with *openfunction* or else ``None``."""
try:
stream = openfunction(filename, mode=mode)
except (IOError, OSError) as err:
# An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this
# case we have to ignore the error and return None. Second is when openfunction can't open the file because
# either the file isn't there or the permissions don't allow access.
if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']:
six.reraise(*sys.exc_info())
return None
if mode.startswith('r'):
# additional check for reading (eg can we uncompress) --- is this needed?
try:
stream.readline()
except IOError:
stream.close()
stream = None
except:
stream.close()
raise
else:
stream.close()
stream = openfunction(filename, mode=mode)
return stream
def greedy_splitext(p):
"""Split extension in path *p* at the left-most separator.
Extensions are taken to be separated from the filename with the
separator :data:`os.extsep` (as used by :func:`os.path.splitext`).
Arguments
---------
p : path, string
Returns
-------
Tuple ``(root, extension)`` where ``root`` is the full path and
filename with all extensions removed whereas ``extension`` is the
string of all extensions.
Example
-------
>>> greedy_splitext("/home/joe/protein.pdb.bz2")
('/home/joe/protein', '.pdb.bz2')
"""
path, root = os.path.split(p)
extension = ''
while True:
root, ext = os.path.splitext(root)
extension = ext + extension
if not ext:
break
return os.path.join(path, root), extension
def hasmethod(obj, m):
"""Return ``True`` if object *obj* contains the method *m*."""
return hasattr(obj, m) and callable(getattr(obj, m))
def isstream(obj):
"""Detect if *obj* is a stream.
We consider anything a stream that has the methods
- ``close()``
and either set of the following
- ``read()``, ``readline()``, ``readlines()``
- ``write()``, ``writeline()``, ``writelines()``
.. SeeAlso:: :mod:`io`
:Arguments:
*obj*
stream or string
:Returns: ``True`` is *obj* is a stream, ``False`` otherwise
.. versionadded:: 0.9.0
"""
signature_methods = ("close",)
alternative_methods = (
("read", "readline", "readlines"),
("write", "writeline", "writelines"))
# Must have ALL the signature methods
for m in signature_methods:
if not hasmethod(obj, m):
return False
# Must have at least one complete set of alternative_methods
alternative_results = [
np.all([hasmethod(obj, m) for m in alternatives])
for alternatives in alternative_methods]
return np.any(alternative_results)
def which(program):
"""Determine full path of executable *program* on :envvar:`PATH`.
(Jay at http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python)
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
real_program = realpath(program)
if is_exe(real_program):
return real_program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@functools.total_ordering
class NamedStream(io.IOBase):
"""Stream that also provides a (fake) name.
By wrapping a stream *stream* in this class, it can be passed to
code that uses inspection of the filename to make decisions. For
instance. :func:`os.path.split` will work correctly on a
:class:`NamedStream`.
The class can be used as a context manager.
:class:`NamedStream` is derived from :class:`io.IOBase` (to indicate that
it is a stream). Many operations that normally expect a string will also
work with a :class:`NamedStream`; for instance, most of the functions in
:mod:`os.path` will work with the exception of :func:`os.path.expandvars`
and :func:`os.path.expanduser`, which will return the :class:`NamedStream`
itself instead of a string if no substitutions were made.
.. rubric:: Example
Wrap a :func:`cStringIO.StringIO` instance to write to::
import cStringIO
import os.path
stream = cStringIO.StringIO()
f = NamedStream(stream, "output.pdb")
print(os.path.splitext(f))
Wrap a :class:`file` instance to read from::
stream = open("input.pdb")
f = NamedStream(stream, stream.name)
Use as a context manager (closes stream automatically when the
:keyword:`with` block is left)::
with NamedStream(open("input.pdb"), "input.pdb") as f:
# use f
print f.closed # --> False
# ...
print f.closed # --> True
.. Note::
This class uses its own :meth:`__getitem__` method so if *stream*
implements :meth:`stream.__getitem__` then that will be masked and this
class should not be used.
.. Warning::
By default, :meth:`NamedStream.close` will **not close the
stream** but instead :meth:`~NamedStream.reset` it to the
beginning. [#NamedStreamClose]_ Provide the ``force=True`` keyword
to :meth:`NamedStream.close` to always close the stream.
"""
def __init__(self, stream, filename, reset=True, close=False):
"""Initialize the :class:`NamedStream` from a *stream* and give it a *name*.
The constructor attempts to rewind the stream to the beginning unless
the keyword *reset* is set to ``False``. If rewinding fails, a
:class:`MDAnalysis.StreamWarning` is issued.
.. Note::
By default, this stream will *not* be closed by :keyword:`with` and
:meth:`close` (see there) unless the *close* keyword is set to
``True``.
Arguments
---------
stream : stream
an open stream (e.g. :class:`file` or :func:`cStringIO.StringIO`)
filename : str
the filename that should be associated with the stream
Keywords
--------
reset : boolean, default ``True``
start the stream from the beginning (either :meth:`reset` or :meth:`seek`)
when the class instance is constructed
close : booelan, default ``True``
close the stream when a :keyword:`with` block exits or when
:meth:`close` is called; note that the default is **not to close
the stream**
.. versionadded:: 0.9.0
"""
self.stream = stream
self.name = filename
self.close_stream = close
if reset:
self.reset()
def reset(self):
"""Move to the beginning of the stream"""
# try to rewind
try:
self.stream.reset() # e.g. StreamIO
except (AttributeError, IOError):
try:
self.stream.seek(0) # typical file objects
except (AttributeError, IOError):
warnings.warn("NamedStream {0}: not guaranteed to be at the beginning."
"".format(self.name),
category=StreamWarning)
# access the stream
def __getattr__(self, x):
try:
return getattr(self.stream, x)
except AttributeError:
return getattr(self.name, x)
def __iter__(self):
return iter(self.stream)
def __enter__(self):
# do not call the stream's __enter__ because the stream is already open
return self
def __exit__(self, *args):
# NOTE: By default (close=False) we only reset the stream and NOT close it; this makes
# it easier to use it as a drop-in replacement for a filename that might
# be opened repeatedly (at least in MDAnalysis)
#try:
# return self.stream.__exit__(*args)
#except AttributeError:
# super(NamedStream, self).__exit__(*args)
self.close()
# override more IOBase methods, as these are provided by IOBase and are not
# caught with __getattr__ (ugly...)
def close(self, force=False):
"""Reset or close the stream.
If :attr:`NamedStream.close_stream` is set to ``False`` (the default)
then this method will *not close the stream* and only :meth:`reset` it.
If the *force* = ``True`` keyword is provided, the stream will be
closed.
.. Note:: This ``close()`` method is non-standard. ``del NamedStream``
always closes the underlying stream.
"""
if self.close_stream or force:
try:
return self.stream.close()
except AttributeError:
return super(NamedStream, self).close()
else:
self.flush()
self.reset()
def __del__(self):
"""Always closes the stream."""
self.close(force=True)
@property
def closed(self):
"""``True`` if stream is closed."""
try:
return self.stream.closed
except AttributeError:
return super(NamedStream, self).closed
def seek(self, offset, whence=os.SEEK_SET):
"""Change the stream position to the given byte *offset* .
*offset* is interpreted relative to the position indicated by
*whence*. Values for *whence* are:
- :data:`io.SEEK_SET` or 0 – start of the stream (the default); *offset*
should be zero or positive
- :data:`io.SEEK_CUR` or 1 – current stream position; *offset* may be
negative
- :data:`io.SEEK_END` or 2 – end of the stream; *offset* is usually
negative
:Returns: the new absolute position.
"""
try:
return self.stream.seek(offset, whence) # file.seek: no kw
except AttributeError:
return super(NamedStream, self).seek(offset, whence)
def tell(self):
"""Return the current stream position."""
try:
return self.stream.tell()
except AttributeError:
return super(NamedStream, self).tell()
def truncate(self, *size):
"""Truncate the stream's size to *size*.
The size defaults to the current position (if no *size* argument is
supplied). The current file position is not changed.
"""
try:
return self.stream.truncate(*size)
except AttributeError:
return super(NamedStream, self).truncate(*size)
def seekable(self):
"""Return ``True`` if the stream supports random access.
If ``False``, :meth:`seek`, :meth:`tell` and :meth:`truncate` will raise :exc:`IOError`.
"""
try:
return self.stream.seekable()
except AttributeError:
return super(NamedStream, self).seekable()
def readable(self):
"""Return ``True`` if the stream can be read from.
If ``False``, :meth:`read` will raise :exc:`IOError`.
"""
try:
return self.stream.readable()
except AttributeError:
return super(NamedStream, self).readable()
def writable(self):
"""Return ``True`` if the stream can be written to.
If ``False``, :meth:`write` will raise :exc:`IOError`.
"""
try:
return self.stream.writable()
except AttributeError:
return super(NamedStream, self).writable()
def flush(self):
"""Flush the write buffers of the stream if applicable.
This does nothing for read-only and non-blocking streams. For file
objects one also needs to call :func:`os.fsync` to write contents to
disk.
"""
try:
return self.stream.flush()
except AttributeError:
return super(NamedStream, self).flush()
def fileno(self):
"""Return the underlying file descriptor (an integer) of the stream if it exists.
An :exc:`IOError` is raised if the IO object does not use a file descriptor.
"""
try:
return self.stream.fileno()
except AttributeError:
# IOBase.fileno does not raise IOError as advertised so we do this here
raise IOError("This NamedStream does not use a file descriptor.")
# fake the important parts of the string API
# (other methods such as rfind() are automatically dealt with via __getattr__)
def __getitem__(self, x):
return self.name[x]
def __eq__(self, x):
return self.name == x
def __ne__(self, x):
return not self == x
def __lt__(self, x):
return self.name < x
def __len__(self):
return len(self.name)
def __add__(self, x):
return self.name + x
def __radd__(self, x):
return x + self.name
def __mul__(self, x):
return self.name * x
__rmul__ = __mul__
def __format__(self, format_spec):
return self.name.format(format_spec)
def __str__(self):
return self.name
def __repr__(self):
return "<NamedStream({0}, {1})>".format(self.stream, self.name)
def realpath(*args):
"""Join all args and return the real path, rooted at /.
Expands '~', '~user', and environment variables such as :envvar`$HOME`.
Returns ``None`` if any of the args is ``None``.
"""
if None in args:
return None
return os.path.realpath(os.path.expanduser(os.path.expandvars(os.path.join(*args))))
def get_ext(filename):
"""Return the lower-cased extension of *filename* without a leading dot.
:Returns: root, ext
"""
root, ext = os.path.splitext(filename)
if ext.startswith(os.extsep):
ext = ext[1:]
return root, ext.lower()
def check_compressed_format(root, ext):
"""Check if this is a supported gzipped/bzip2ed file format and return UPPERCASE format."""
# XYZReader&others are setup to handle both plain and compressed (bzip2, gz) files
# ..so if the first file extension is bzip2 or gz, look at the one to the left of it
if ext.lower() in ("bz2", "gz"):
try:
root, ext = get_ext(root)
except:
raise TypeError("Cannot determine coordinate format for '{0}.{1}'"
"".format(root, ext))
return ext.upper()
def format_from_filename_extension(filename):
"""Guess file format from the file extension"""
try:
root, ext = get_ext(filename)
except:
raise TypeError(
"Cannot determine file format for file '{0}'.\n"
" You can set the format explicitly with "
"'Universe(..., format=FORMAT)'.".format(filename))
format = check_compressed_format(root, ext)
return format
def guess_format(filename):
"""Return the format of *filename*
The current heuristic simply looks at the filename extension
and can work around compressed format extensions
*filename* can also be a stream, in which case
*filename.name* is looked at for a hint to the format
:Raises:
*ValueError*
.. versionadded:: 0.11.0
Moved into lib.util
"""
if isstream(filename):
# perhaps StringIO or open stream
try:
format = format_from_filename_extension(filename.name)
except AttributeError:
# format is None so we need to complain:
raise ValueError("guess_format requires an explicit format specifier "
"for stream {0}".format(filename))
else:
# iterator, list, filename: simple extension checking... something more
# complicated is left for the ambitious.
# Note: at the moment the upper-case extension *is* the format specifier
# and list of filenames is handled by ChainReader
format = (format_from_filename_extension(filename)
if not iterable(filename) else 'CHAIN')
return format.upper()
def iterable(obj):
"""Returns ``True`` if *obj* can be iterated over and is *not* a string
nor a :class:`NamedStream`"""
if isinstance(obj, (six.string_types, NamedStream)):
return False # avoid iterating over characters of a string
if hasattr(obj, 'next'):
return True # any iterator will do
try:
len(obj) # anything else that might work
except (TypeError, AttributeError):
return False
return True
def asiterable(obj):
"""Returns obj so that it can be iterated over; a string is *not* treated as iterable"""
if not iterable(obj):
obj = [obj]
return obj
#: Regular expresssion (see :mod:`re`) to parse a simple `FORTRAN edit descriptor`_.
#: ``(?P<repeat>\d?)(?P<format>[IFELAX])(?P<numfmt>(?P<length>\d+)(\.(?P<decimals>\d+))?)?``
#:
#: .. _FORTRAN edit descriptor: http://www.cs.mtu.edu/~shene/COURSES/cs201/NOTES/chap05/format.html
FORTRAN_format_regex = "(?P<repeat>\d+?)(?P<format>[IFEAX])(?P<numfmt>(?P<length>\d+)(\.(?P<decimals>\d+))?)?"
_FORTRAN_format_pattern = re.compile(FORTRAN_format_regex)
def strip(s):
"""Convert *s* to a string and return it white-space stripped."""
return str(s).strip()
class FixedcolumnEntry(object):
"""Represent an entry at specific fixed columns.
Reads from line[start:stop] and converts according to
typespecifier.
"""
convertors = {'I': int, 'F': float, 'E': float, 'A': strip}
def __init__(self, start, stop, typespecifier):
"""
:Arguments:
*start*
first column
*stop*
last column + 1
*typespecifier*
'I': int, 'F': float, 'E': float, 'A': stripped string
The start/stop arguments follow standard Python convention in that
they are 0-based and that the *stop* argument is not included.
"""
self.start = start
self.stop = stop
self.typespecifier = typespecifier
self.convertor = self.convertors[typespecifier]
def read(self, line):
"""Read the entry from *line* and convert to appropriate type."""
try:
return self.convertor(line[self.start:self.stop])
except ValueError:
raise ValueError("{0!r}: Failed to read&convert {1!r}".format(self, line[self.start:self.stop]))
def __len__(self):
"""Length of the field in columns (stop - start)"""
return self.stop - self.start
def __repr__(self):
return "FixedcolumnEntry({0:d},{1:d},{2!r})".format(self.start, self.stop, self.typespecifier)
class FORTRANReader(object):
"""FORTRANReader provides a method to parse FORTRAN formatted lines in a file.
Usage::
atomformat = FORTRANReader('2I10,2X,A8,2X,A8,3F20.10,2X,A8,2X,A8,F20.10')
for line in open('coordinates.crd'):
serial,TotRes,resName,name,x,y,z,chainID,resSeq,tempFactor = atomformat.read(line)
Fortran format edit descriptors; see `Fortran Formats`_ for the syntax.
Only simple one-character specifiers supported here: *I F E A X* (see
:data:`FORTRAN_format_regex`).
Strings are stripped of leading and trailing white space.
.. _`Fortran Formats`: http://www.webcitation.org/5xbaWMV2x
.. _`Fortran Formats (URL)`:
http://www.cs.mtu.edu/~shene/COURSES/cs201/NOTES/chap05/format.html
"""
def __init__(self, fmt):
"""Set up the reader with the FORTRAN format string.
The string *fmt* should look like '2I10,2X,A8,2X,A8,3F20.10,2X,A8,2X,A8,F20.10'.
"""
self.fmt = fmt.split(',')
descriptors = [self.parse_FORTRAN_format(descriptor) for descriptor in self.fmt]
start = 0
self.entries = []
for d in descriptors:
if d['format'] != 'X':
for x in range(d['repeat']):
stop = start + d['length']
self.entries.append(FixedcolumnEntry(start, stop, d['format']))
start = stop
else:
start += d['totallength']
def read(self, line):
"""Parse *line* according to the format string and return list of values.
Values are converted to Python types according to the format specifier.
:Returns: list of entries with appropriate types
:Raises: :exc:`ValueError` if any of the conversions cannot be made
(e.g. space for an int)
.. SeeAlso:: :meth:`FORTRANReader.number_of_matches`
"""
return [e.read(line) for e in self.entries]
def number_of_matches(self, line):
"""Return how many format entries could be populated with legal values."""
# not optimal, I suppose...
matches = 0
for e in self.entries:
try:
e.read(line)
matches += 1
except ValueError:
pass
return matches
def parse_FORTRAN_format(self, edit_descriptor):
"""Parse the descriptor.
parse_FORTRAN_format(edit_descriptor) --> dict
:Returns: dict with totallength (in chars), repeat, length,
format, decimals
:Raises: :exc:`ValueError` if the *edit_descriptor* is not recognized
and cannot be parsed
.. Note::
Specifiers: *L ES EN T TL TR / r S SP SS BN BZ* are *not*
supported, and neither are the scientific notation *Ew.dEe*
forms.
"""
m = _FORTRAN_format_pattern.match(edit_descriptor.upper())
if m is None:
try:
m = _FORTRAN_format_pattern.match("1" + edit_descriptor.upper())
if m is None:
raise ValueError # really no idea what the descriptor is supposed to mean
except:
raise ValueError("unrecognized FORTRAN format {0!r}".format(edit_descriptor))
d = m.groupdict()
if d['repeat'] == '':
d['repeat'] = 1
if d['format'] == 'X':
d['length'] = 1
for k in ('repeat', 'length', 'decimals'):
try:
d[k] = int(d[k])
except ValueError: # catches ''
d[k] = 0
except TypeError: # keep None
pass
d['totallength'] = d['repeat'] * d['length']
return d
def __len__(self):
"""Returns number of entries."""
return len(self.entries)
def __repr__(self):
return self.__class__.__name__ + "(" + ",".join(self.fmt) + ")"
def fixedwidth_bins(delta, xmin, xmax):
"""Return bins of width delta that cover xmin,xmax (or a larger range).
dict = fixedwidth_bins(delta,xmin,xmax)
The dict contains 'Nbins', 'delta', 'min', and 'max'.
"""
if not np.all(xmin < xmax):
raise ValueError('Boundaries are not sane: should be xmin < xmax.')
_delta = np.asarray(delta, dtype=np.float_)
_xmin = np.asarray(xmin, dtype=np.float_)
_xmax = np.asarray(xmax, dtype=np.float_)
_length = _xmax - _xmin
N = np.ceil(_length / _delta).astype(np.int_) # number of bins
dx = 0.5 * (N * _delta - _length) # add half of the excess to each end
return {'Nbins': N, 'delta': _delta, 'min': _xmin - dx, 'max': _xmax + dx}
# String functions
# ----------------
#: translation table for 3-letter codes --> 1-letter codes
#: .. SeeAlso:: :data:`alternative_inverse_aa_codes`
canonical_inverse_aa_codes = {
'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E',
'PHE': 'F', 'GLY': 'G', 'HIS': 'H', 'ILE': 'I',
'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N',
'PRO': 'P', 'GLN': 'Q', 'ARG': 'R', 'SER': 'S',
'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y'}
#: translation table for 1-letter codes --> *canonical* 3-letter codes.
#: The table is used for :func:`convert_aa_code`.
amino_acid_codes = {one: three for three, one in canonical_inverse_aa_codes.items()}
#: non-default charge state amino acids or special charge state descriptions
#: (Not fully synchronized with :class:`MDAnalysis.core.selection.ProteinSelection`.)
alternative_inverse_aa_codes = {
'HISA': 'H', 'HISB': 'H', 'HSE': 'H', 'HSD': 'H', 'HID': 'H', 'HIE': 'H', 'HIS1': 'H',
'HIS2': 'H',
'ASPH': 'D', 'ASH': 'D',
'GLUH': 'E', 'GLH': 'E',
'LYSH': 'K', 'LYN': 'K',
'ARGN': 'R',
'CYSH': 'C', 'CYS1': 'C', 'CYS2': 'C'}
#: lookup table from 3/4 letter resnames to 1-letter codes. Note that non-standard residue names
#: for tautomers or different protonation states such as HSE are converted to canonical 1-letter codes ("H").
#: The table is used for :func:`convert_aa_code`.
#: .. SeeAlso:: :data:`canonical_inverse_aa_codes` and :data:`alternative_inverse_aa_codes`
inverse_aa_codes = {}
inverse_aa_codes.update(canonical_inverse_aa_codes)
inverse_aa_codes.update(alternative_inverse_aa_codes)
def convert_aa_code(x):
"""Converts between 3-letter and 1-letter amino acid codes.
.. SeeAlso:: Data are defined in :data:`amino_acid_codes` and :data:`inverse_aa_codes`.
"""
if len(x) == 1:
d = amino_acid_codes
else:
d = inverse_aa_codes
try:
return d[x.upper()]
except KeyError:
raise ValueError("No conversion for {0} found (1 letter -> 3 letter or 3/4 letter -> 1 letter)".format(x))
#: Regular expression to match and parse a residue-atom selection; will match
#: "LYS300:HZ1" or "K300:HZ1" or "K300" or "4GB300:H6O" or "4GB300" or "YaA300".
RESIDUE = re.compile("""
(?P<aa>([ACDEFGHIKLMNPQRSTVWY]) # 1-letter amino acid
| # or
([0-9A-Z][a-zA-Z][A-Z][A-Z]?) # 3-letter or 4-letter residue name
)
\s* # white space allowed
(?P<resid>\d+) # resid
\s*
(: # separator ':'
\s*
(?P<atom>\w+) # atom name
)? # possibly one
""", re.VERBOSE | re.IGNORECASE)
# from GromacsWrapper cbook.IndexBuilder
def parse_residue(residue):
"""Process residue string.
Examples:
- "LYS300:HZ1" --> ("LYS", 300, "HZ1")
- "K300:HZ1" --> ("LYS", 300, "HZ1")
- "K300" --> ("LYS", 300, None)
- "4GB300:H6O" --> ("4GB", 300, "H6O")
- "4GB300" --> ("4GB", 300, None)
:Argument: The *residue* must contain a 1-letter or 3-letter or
4-letter residue string, a number (the resid) and
optionally an atom identifier, which must be separate
from the residue with a colon (":"). White space is
allowed in between.
:Returns: `(3-letter aa string, resid, atomname)`; known 1-letter
aa codes are converted to 3-letter codes
"""
# XXX: use _translate_residue() ....
m = RESIDUE.match(residue)
if not m:
raise ValueError("Selection {residue!r} is not valid (only 1/3/4 letter resnames, resid required).".format(**vars()))
resid = int(m.group('resid'))
residue = m.group('aa')
if len(residue) == 1:
resname = convert_aa_code(residue) # only works for AA
else:
resname = residue # use 3-letter for any resname
atomname = m.group('atom')
return (resname, resid, atomname)
def conv_float(s):
"""Convert an object *s* to float if possible.
Function to be passed into :func:`map` or a list comprehension. If
the argument can be interpreted as a float it is converted,
otherwise the original object is passed back.
"""
try:
return float(s)
except ValueError:
return s
def cached(key):
"""Cache a property within a class
Requires the Class to have a cache dict called ``_cache``.
Usage::
class A(object):
def__init__(self):
self._cache = dict()
@property
@cached('keyname')
def size(self):
# This code gets ran only if the lookup of keyname fails
# After this code has been ran once, the result is stored in
# _cache with the key: 'keyname'
size = 10.0
.. versionadded:: 0.9.0
"""
def cached_lookup(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return self._cache[key]
except KeyError:
self._cache[key] = ret = func(self, *args, **kwargs)
return ret
return wrapper
return cached_lookup
def unique_rows(arr, return_index=False):
"""Return the unique rows from an array
Arguments
---------
arr : np.array of shape (n1, m)
return_index : bool, optional
If True, returns indices of arr that formed answer (see np.unique)
Returns
-------
unique_rows (n2, m)
Examples
--------
Remove dupicate rows from an array:
>>> a = np.array([[0, 1], [1, 2], [1, 2], [0, 1], [2, 3]])
>>> b = unique_rows(a)
>>> b
array([[0, 1], [1, 2], [2, 3]])
"""
# From here, but adapted to handle any size rows
# https://mail.scipy.org/pipermail/scipy-user/2011-December/031200.html
# This seems to fail if arr.flags['OWNDATA'] is False
# this can occur when second dimension was created through broadcasting
# eg: idx = np.array([1, 2])[None, :]
if not arr.flags['OWNDATA']:
arr = arr.copy()
m = arr.shape[1]
if return_index:
u, r_idx = np.unique(arr.view(dtype=np.dtype([(str(i), arr.dtype)
for i in range(m)])),
return_index=True)
return u.view(arr.dtype).reshape(-1, m), r_idx
else:
u = np.unique(arr.view(
dtype=np.dtype([(str(i), arr.dtype) for i in range(m)])
))
return u.view(arr.dtype).reshape(-1, m)
def blocks_of(a, n, m):
"""Extract a view of (n, m) blocks along the diagonal of the array `a`
Parameters
----------
a : array_like
starting array
n : int
size of block in first dimension
m : int
size of block in second dimension
Returns
-------
(nblocks, n, m) view of the original array.
Where nblocks is the number of times the miniblock fits in the original.
Raises
------
ValueError
If the supplied `n` and `m` don't divide `a` into an integer number
of blocks.
Examples
--------
>>> arr = np.arange(16).reshape(4, 4)
>>> view = blocks_of(arr, 2, 2)
>>> view[:] = 100
>>> arr
array([[100, 100, 2, 3],
[100, 100, 6, 7],
[ 8, 9, 100, 100],
[ 12, 13, 100, 100]])
Notes
-----
n, m must divide a into an identical integer number of blocks.
Uses strides so probably requires that the array is C contiguous
Returns a view, so editing this modifies the original array
.. versionadded:: 0.12.0
"""
# based on:
# http://stackoverflow.com/a/10862636
# but generalised to handle non square blocks.
nblocks = a.shape[0] // n
nblocks2 = a.shape[1] // m
if not nblocks == nblocks2:
raise ValueError("Must divide into same number of blocks in both"
" directions. Got {} by {}"
"".format(nblocks, nblocks2))
new_shape = (nblocks, n, m)
new_strides = (n * a.strides[0] + m * a.strides[1],
a.strides[0], a.strides[1])
return np.lib.stride_tricks.as_strided(a, new_shape, new_strides)
class Namespace(dict):
"""Class to allow storing attributes in new namespace. """
def __getattr__(self, key):
# a.this causes a __getattr__ call for key = 'this'
try:
return dict.__getitem__(self, key)
except KeyError:
raise AttributeError('"{}" is not known in the namespace.'
.format(key))
def __setattr__(self, key, value):
dict.__setitem__(self, key, value)
def __delattr__(self, key):
try:
dict.__delitem__(self, key)
except KeyError:
raise AttributeError('"{}" is not known in the namespace.'
.format(key))
def __eq__(self, other):
try:
# this'll allow us to compare if we're storing arrays
assert_equal(self, other)
except AssertionError:
return False
return True
| kain88-de/mdanalysis | package/MDAnalysis/lib/util.py | Python | gpl-2.0 | 45,707 | [
"LAMMPS",
"MDAnalysis"
] | 3015c7d882038185d3cec84b869aadea596776a87ec06d71d3025dd9d2ee59ca |
# -*- test-case-name: twisted.python.test.test_deprecate -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Deprecation framework for Twisted.
To mark a method, function, or class as being deprecated do this::
from incremental import Version
from twisted.python.deprecate import deprecated
@deprecated(Version("Twisted", 8, 0, 0))
def badAPI(self, first, second):
'''
Docstring for badAPI.
'''
...
@deprecated(Version("Twisted", 16, 0, 0))
class BadClass(object):
'''
Docstring for BadClass.
'''
The newly-decorated badAPI will issue a warning when called, and BadClass will
issue a warning when instantiated. Both will also have a deprecation notice
appended to their docstring.
To deprecate properties you can use::
from incremental import Version
from twisted.python.deprecate import deprecatedProperty
class OtherwiseUndeprecatedClass(object):
@deprecatedProperty(Version('Twisted', 16, 0, 0))
def badProperty(self):
'''
Docstring for badProperty.
'''
@badProperty.setter
def badProperty(self, value):
'''
Setter sill also raise the deprecation warning.
'''
To mark module-level attributes as being deprecated you can use::
badAttribute = "someValue"
...
deprecatedModuleAttribute(
Version("Twisted", 8, 0, 0),
"Use goodAttribute instead.",
"your.full.module.name",
"badAttribute")
The deprecated attributes will issue a warning whenever they are accessed. If
the attributes being deprecated are in the same module as the
L{deprecatedModuleAttribute} call is being made from, the C{__name__} global
can be used as the C{moduleName} parameter.
See also L{incremental.Version}.
@type DEPRECATION_WARNING_FORMAT: C{str}
@var DEPRECATION_WARNING_FORMAT: The default deprecation warning string format
to use when one is not provided by the user.
"""
from __future__ import division, absolute_import
__all__ = [
'deprecated',
'deprecatedProperty',
'getDeprecationWarningString',
'getWarningMethod',
'setWarningMethod',
'deprecatedModuleAttribute',
]
import sys, inspect
from warnings import warn, warn_explicit
from dis import findlinestarts
from functools import wraps
from incremental import getVersionString
from twisted.python.compat import _PY3
DEPRECATION_WARNING_FORMAT = '%(fqpn)s was deprecated in %(version)s'
# Notionally, part of twisted.python.reflect, but defining it there causes a
# cyclic dependency between this module and that module. Define it here,
# instead, and let reflect import it to re-expose to the public.
def _fullyQualifiedName(obj):
"""
Return the fully qualified name of a module, class, method or function.
Classes and functions need to be module level ones to be correctly
qualified.
@rtype: C{str}.
"""
try:
name = obj.__qualname__
except AttributeError:
name = obj.__name__
if inspect.isclass(obj) or inspect.isfunction(obj):
moduleName = obj.__module__
return "%s.%s" % (moduleName, name)
elif inspect.ismethod(obj):
try:
cls = obj.im_class
except AttributeError:
# Python 3 eliminates im_class, substitutes __module__ and
# __qualname__ to provide similar information.
return "%s.%s" % (obj.__module__, obj.__qualname__)
else:
className = _fullyQualifiedName(cls)
return "%s.%s" % (className, name)
return name
# Try to keep it looking like something in twisted.python.reflect.
_fullyQualifiedName.__module__ = 'twisted.python.reflect'
_fullyQualifiedName.__name__ = 'fullyQualifiedName'
_fullyQualifiedName.__qualname__ = 'fullyQualifiedName'
def _getReplacementString(replacement):
"""
Surround a replacement for a deprecated API with some polite text exhorting
the user to consider it as an alternative.
@type replacement: C{str} or callable
@return: a string like "please use twisted.python.modules.getModule
instead".
"""
if callable(replacement):
replacement = _fullyQualifiedName(replacement)
return "please use %s instead" % (replacement,)
def _getDeprecationDocstring(version, replacement=None):
"""
Generate an addition to a deprecated object's docstring that explains its
deprecation.
@param version: the version it was deprecated.
@type version: L{incremental.Version}
@param replacement: The replacement, if specified.
@type replacement: C{str} or callable
@return: a string like "Deprecated in Twisted 27.2.0; please use
twisted.timestream.tachyon.flux instead."
"""
doc = "Deprecated in %s" % (getVersionString(version),)
if replacement:
doc = "%s; %s" % (doc, _getReplacementString(replacement))
return doc + "."
def _getDeprecationWarningString(fqpn, version, format=None, replacement=None):
"""
Return a string indicating that the Python name was deprecated in the given
version.
@param fqpn: Fully qualified Python name of the thing being deprecated
@type fqpn: C{str}
@param version: Version that C{fqpn} was deprecated in.
@type version: L{incremental.Version}
@param format: A user-provided format to interpolate warning values into, or
L{DEPRECATION_WARNING_FORMAT
<twisted.python.deprecate.DEPRECATION_WARNING_FORMAT>} if L{None} is
given.
@type format: C{str}
@param replacement: what should be used in place of C{fqpn}. Either pass in
a string, which will be inserted into the warning message, or a
callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
@return: A textual description of the deprecation
@rtype: C{str}
"""
if format is None:
format = DEPRECATION_WARNING_FORMAT
warningString = format % {
'fqpn': fqpn,
'version': getVersionString(version)}
if replacement:
warningString = "%s; %s" % (
warningString, _getReplacementString(replacement))
return warningString
def getDeprecationWarningString(callableThing, version, format=None,
replacement=None):
"""
Return a string indicating that the callable was deprecated in the given
version.
@type callableThing: C{callable}
@param callableThing: Callable object to be deprecated
@type version: L{incremental.Version}
@param version: Version that C{callableThing} was deprecated in
@type format: C{str}
@param format: A user-provided format to interpolate warning values into,
or L{DEPRECATION_WARNING_FORMAT
<twisted.python.deprecate.DEPRECATION_WARNING_FORMAT>} if L{None} is
given
@param callableThing: A callable to be deprecated.
@param version: The L{incremental.Version} that the callable
was deprecated in.
@param replacement: what should be used in place of the callable. Either
pass in a string, which will be inserted into the warning message,
or a callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
@return: A string describing the deprecation.
@rtype: C{str}
"""
return _getDeprecationWarningString(
_fullyQualifiedName(callableThing), version, format, replacement)
def _appendToDocstring(thingWithDoc, textToAppend):
"""
Append the given text to the docstring of C{thingWithDoc}.
If C{thingWithDoc} has no docstring, then the text just replaces the
docstring. If it has a single-line docstring then it appends a blank line
and the message text. If it has a multi-line docstring, then in appends a
blank line a the message text, and also does the indentation correctly.
"""
if thingWithDoc.__doc__:
docstringLines = thingWithDoc.__doc__.splitlines()
else:
docstringLines = []
if len(docstringLines) == 0:
docstringLines.append(textToAppend)
elif len(docstringLines) == 1:
docstringLines.extend(['', textToAppend, ''])
else:
spaces = docstringLines.pop()
docstringLines.extend(['',
spaces + textToAppend,
spaces])
thingWithDoc.__doc__ = '\n'.join(docstringLines)
def deprecated(version, replacement=None):
"""
Return a decorator that marks callables as deprecated. To deprecate a
property, see L{deprecatedProperty}.
@type version: L{incremental.Version}
@param version: The version in which the callable will be marked as
having been deprecated. The decorated function will be annotated
with this version, having it set as its C{deprecatedVersion}
attribute.
@param version: the version that the callable was deprecated in.
@type version: L{incremental.Version}
@param replacement: what should be used in place of the callable. Either
pass in a string, which will be inserted into the warning message,
or a callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
"""
def deprecationDecorator(function):
"""
Decorator that marks C{function} as deprecated.
"""
warningString = getDeprecationWarningString(
function, version, None, replacement)
@wraps(function)
def deprecatedFunction(*args, **kwargs):
warn(
warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
_appendToDocstring(deprecatedFunction,
_getDeprecationDocstring(version, replacement))
deprecatedFunction.deprecatedVersion = version
return deprecatedFunction
return deprecationDecorator
def deprecatedProperty(version, replacement=None):
"""
Return a decorator that marks a property as deprecated. To deprecate a
regular callable or class, see L{deprecated}.
@type version: L{incremental.Version}
@param version: The version in which the callable will be marked as
having been deprecated. The decorated function will be annotated
with this version, having it set as its C{deprecatedVersion}
attribute.
@param version: the version that the callable was deprecated in.
@type version: L{incremental.Version}
@param replacement: what should be used in place of the callable.
Either pass in a string, which will be inserted into the warning
message, or a callable, which will be expanded to its full import
path.
@type replacement: C{str} or callable
@return: A new property with deprecated setter and getter.
@rtype: C{property}
@since: 16.1.0
"""
class _DeprecatedProperty(property):
"""
Extension of the build-in property to allow deprecated setters.
"""
def _deprecatedWrapper(self, function):
@wraps(function)
def deprecatedFunction(*args, **kwargs):
warn(
self.warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
return deprecatedFunction
def setter(self, function):
return property.setter(self, self._deprecatedWrapper(function))
def deprecationDecorator(function):
if _PY3:
warningString = getDeprecationWarningString(
function, version, None, replacement)
else:
# Because Python 2 sucks, we need to implement our own here -- lack
# of __qualname__ means that we kinda have to stack walk. It maybe
# probably works. Probably. -Amber
functionName = function.__name__
className = inspect.stack()[1][3] # wow hax
moduleName = function.__module__
fqdn = "%s.%s.%s" % (moduleName, className, functionName)
warningString = _getDeprecationWarningString(
fqdn, version, None, replacement)
@wraps(function)
def deprecatedFunction(*args, **kwargs):
warn(
warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
_appendToDocstring(deprecatedFunction,
_getDeprecationDocstring(version, replacement))
deprecatedFunction.deprecatedVersion = version
result = _DeprecatedProperty(deprecatedFunction)
result.warningString = warningString
return result
return deprecationDecorator
def getWarningMethod():
"""
Return the warning method currently used to record deprecation warnings.
"""
return warn
def setWarningMethod(newMethod):
"""
Set the warning method to use to record deprecation warnings.
The callable should take message, category and stacklevel. The return
value is ignored.
"""
global warn
warn = newMethod
class _InternalState(object):
"""
An L{_InternalState} is a helper object for a L{_ModuleProxy}, so that it
can easily access its own attributes, bypassing its logic for delegating to
another object that it's proxying for.
@ivar proxy: a L{_ModuleProxy}
"""
def __init__(self, proxy):
object.__setattr__(self, 'proxy', proxy)
def __getattribute__(self, name):
return object.__getattribute__(object.__getattribute__(self, 'proxy'),
name)
def __setattr__(self, name, value):
return object.__setattr__(object.__getattribute__(self, 'proxy'),
name, value)
class _ModuleProxy(object):
"""
Python module wrapper to hook module-level attribute access.
Access to deprecated attributes first checks
L{_ModuleProxy._deprecatedAttributes}, if the attribute does not appear
there then access falls through to L{_ModuleProxy._module}, the wrapped
module object.
@ivar _module: Module on which to hook attribute access.
@type _module: C{module}
@ivar _deprecatedAttributes: Mapping of attribute names to objects that
retrieve the module attribute's original value.
@type _deprecatedAttributes: C{dict} mapping C{str} to
L{_DeprecatedAttribute}
@ivar _lastWasPath: Heuristic guess as to whether warnings about this
package should be ignored for the next call. If the last attribute
access of this module was a C{getattr} of C{__path__}, we will assume
that it was the import system doing it and we won't emit a warning for
the next access, even if it is to a deprecated attribute. The CPython
import system always tries to access C{__path__}, then the attribute
itself, then the attribute itself again, in both successful and failed
cases.
@type _lastWasPath: C{bool}
"""
def __init__(self, module):
state = _InternalState(self)
state._module = module
state._deprecatedAttributes = {}
state._lastWasPath = False
def __repr__(self):
"""
Get a string containing the type of the module proxy and a
representation of the wrapped module object.
"""
state = _InternalState(self)
return '<%s module=%r>' % (type(self).__name__, state._module)
def __setattr__(self, name, value):
"""
Set an attribute on the wrapped module object.
"""
state = _InternalState(self)
state._lastWasPath = False
setattr(state._module, name, value)
def __getattribute__(self, name):
"""
Get an attribute from the module object, possibly emitting a warning.
If the specified name has been deprecated, then a warning is issued.
(Unless certain obscure conditions are met; see
L{_ModuleProxy._lastWasPath} for more information about what might quash
such a warning.)
"""
state = _InternalState(self)
if state._lastWasPath:
deprecatedAttribute = None
else:
deprecatedAttribute = state._deprecatedAttributes.get(name)
if deprecatedAttribute is not None:
# If we have a _DeprecatedAttribute object from the earlier lookup,
# allow it to issue the warning.
value = deprecatedAttribute.get()
else:
# Otherwise, just retrieve the underlying value directly; it's not
# deprecated, there's no warning to issue.
value = getattr(state._module, name)
if name == '__path__':
state._lastWasPath = True
else:
state._lastWasPath = False
return value
class _DeprecatedAttribute(object):
"""
Wrapper for deprecated attributes.
This is intended to be used by L{_ModuleProxy}. Calling
L{_DeprecatedAttribute.get} will issue a warning and retrieve the
underlying attribute's value.
@type module: C{module}
@ivar module: The original module instance containing this attribute
@type fqpn: C{str}
@ivar fqpn: Fully qualified Python name for the deprecated attribute
@type version: L{incremental.Version}
@ivar version: Version that the attribute was deprecated in
@type message: C{str}
@ivar message: Deprecation message
"""
def __init__(self, module, name, version, message):
"""
Initialise a deprecated name wrapper.
"""
self.module = module
self.__name__ = name
self.fqpn = module.__name__ + '.' + name
self.version = version
self.message = message
def get(self):
"""
Get the underlying attribute value and issue a deprecation warning.
"""
# This might fail if the deprecated thing is a module inside a package.
# In that case, don't emit the warning this time. The import system
# will come back again when it's not an AttributeError and we can emit
# the warning then.
result = getattr(self.module, self.__name__)
message = _getDeprecationWarningString(self.fqpn, self.version,
DEPRECATION_WARNING_FORMAT + ': ' + self.message)
warn(message, DeprecationWarning, stacklevel=3)
return result
def _deprecateAttribute(proxy, name, version, message):
"""
Mark a module-level attribute as being deprecated.
@type proxy: L{_ModuleProxy}
@param proxy: The module proxy instance proxying the deprecated attributes
@type name: C{str}
@param name: Attribute name
@type version: L{incremental.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
"""
_module = object.__getattribute__(proxy, '_module')
attr = _DeprecatedAttribute(_module, name, version, message)
# Add a deprecated attribute marker for this module's attribute. When this
# attribute is accessed via _ModuleProxy a warning is emitted.
_deprecatedAttributes = object.__getattribute__(
proxy, '_deprecatedAttributes')
_deprecatedAttributes[name] = attr
def deprecatedModuleAttribute(version, message, moduleName, name):
"""
Declare a module-level attribute as being deprecated.
@type version: L{incremental.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
@type moduleName: C{str}
@param moduleName: Fully-qualified Python name of the module containing
the deprecated attribute; if called from the same module as the
attributes are being deprecated in, using the C{__name__} global can
be helpful
@type name: C{str}
@param name: Attribute name to deprecate
"""
module = sys.modules[moduleName]
if not isinstance(module, _ModuleProxy):
module = _ModuleProxy(module)
sys.modules[moduleName] = module
_deprecateAttribute(module, name, version, message)
def warnAboutFunction(offender, warningString):
"""
Issue a warning string, identifying C{offender} as the responsible code.
This function is used to deprecate some behavior of a function. It differs
from L{warnings.warn} in that it is not limited to deprecating the behavior
of a function currently on the call stack.
@param function: The function that is being deprecated.
@param warningString: The string that should be emitted by this warning.
@type warningString: C{str}
@since: 11.0
"""
# inspect.getmodule() is attractive, but somewhat
# broken in Python < 2.6. See Python bug 4845.
offenderModule = sys.modules[offender.__module__]
filename = inspect.getabsfile(offenderModule)
lineStarts = list(findlinestarts(offender.__code__))
lastLineNo = lineStarts[-1][1]
globals = offender.__globals__
kwargs = dict(
category=DeprecationWarning,
filename=filename,
lineno=lastLineNo,
module=offenderModule.__name__,
registry=globals.setdefault("__warningregistry__", {}),
module_globals=None)
warn_explicit(warningString, **kwargs)
def _passedArgSpec(argspec, positional, keyword):
"""
Take an I{inspect.ArgSpec}, a tuple of positional arguments, and a dict of
keyword arguments, and return a mapping of arguments that were actually
passed to their passed values.
@param argspec: The argument specification for the function to inspect.
@type argspec: I{inspect.ArgSpec}
@param positional: The positional arguments that were passed.
@type positional: L{tuple}
@param keyword: The keyword arguments that were passed.
@type keyword: L{dict}
@return: A dictionary mapping argument names (those declared in C{argspec})
to values that were passed explicitly by the user.
@rtype: L{dict} mapping L{str} to L{object}
"""
result = {}
unpassed = len(argspec.args) - len(positional)
if argspec.keywords is not None:
kwargs = result[argspec.keywords] = {}
if unpassed < 0:
if argspec.varargs is None:
raise TypeError("Too many arguments.")
else:
result[argspec.varargs] = positional[len(argspec.args):]
for name, value in zip(argspec.args, positional):
result[name] = value
for name, value in keyword.items():
if name in argspec.args:
if name in result:
raise TypeError("Already passed.")
result[name] = value
elif argspec.keywords is not None:
kwargs[name] = value
else:
raise TypeError("no such param")
return result
def _passedSignature(signature, positional, keyword):
"""
Take an L{inspect.Signature}, a tuple of positional arguments, and a dict of
keyword arguments, and return a mapping of arguments that were actually
passed to their passed values.
@param signature: The signature of the function to inspect.
@type signature: L{inspect.Signature}
@param positional: The positional arguments that were passed.
@type positional: L{tuple}
@param keyword: The keyword arguments that were passed.
@type keyword: L{dict}
@return: A dictionary mapping argument names (those declared in
C{signature}) to values that were passed explicitly by the user.
@rtype: L{dict} mapping L{str} to L{object}
"""
result = {}
kwargs = None
numPositional = 0
for (n, (name, param)) in enumerate(signature.parameters.items()):
if param.kind == inspect.Parameter.VAR_POSITIONAL:
# Varargs, for example: *args
result[name] = positional[n:]
numPositional = len(result[name]) + 1
elif param.kind == inspect.Parameter.VAR_KEYWORD:
# Variable keyword args, for example: **my_kwargs
kwargs = result[name] = {}
elif param.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY):
if n < len(positional):
result[name] = positional[n]
numPositional += 1
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
if name not in keyword:
if param.default == inspect.Parameter.empty:
raise TypeError("missing keyword arg {}".format(name))
else:
result[name] = param.default
else:
raise TypeError("'{}' parameter is invalid kind: {}".format(
name, param.kind))
if len(positional) > numPositional:
raise TypeError("Too many arguments.")
for name, value in keyword.items():
if name in signature.parameters.keys():
if name in result:
raise TypeError("Already passed.")
result[name] = value
elif kwargs is not None:
kwargs[name] = value
else:
raise TypeError("no such param")
return result
def _mutuallyExclusiveArguments(argumentPairs):
"""
Decorator which causes its decoratee to raise a L{TypeError} if two of the
given arguments are passed at the same time.
@param argumentPairs: pairs of argument identifiers, each pair indicating
an argument that may not be passed in conjunction with another.
@type argumentPairs: sequence of 2-sequences of L{str}
@return: A decorator, used like so::
@_mutuallyExclusiveArguments([["tweedledum", "tweedledee"]])
def function(tweedledum=1, tweedledee=2):
"Don't pass tweedledum and tweedledee at the same time."
@rtype: 1-argument callable taking a callable and returning a callable.
"""
def wrapper(wrappee):
if getattr(inspect, "signature", None):
# Python 3
spec = inspect.signature(wrappee)
_passed = _passedSignature
else:
# Python 2
spec = inspect.getargspec(wrappee)
_passed = _passedArgSpec
@wraps(wrappee)
def wrapped(*args, **kwargs):
arguments = _passed(spec, args, kwargs)
for this, that in argumentPairs:
if this in arguments and that in arguments:
raise TypeError("nope")
return wrappee(*args, **kwargs)
return wrapped
return wrapper
| whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/python/deprecate.py | Python | mit | 26,775 | [
"Amber"
] | 9b712669f96e114da3f344554ad8f33607c620f8f63d8e1b5a761557705e083c |
"""
==========================
Gaussian HMM of stock data
==========================
This script shows how to use Gaussian HMM.
It uses stock price data, which can be obtained from yahoo finance.
For more information on how to get stock prices with matplotlib, please refer
to date_demo1.py of matplotlib.
"""
from __future__ import print_function
import datetime
import numpy as np
import pylab as pl
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from hmmlearn.hmm import GaussianHMM
print(__doc__)
###############################################################################
# Downloading the data
date1 = datetime.date(1995, 1, 1) # start date
date2 = datetime.date(2012, 1, 6) # end date
# get quotes from yahoo finance
quotes = quotes_historical_yahoo_ochl("INTC", date1, date2)
if len(quotes) == 0:
raise SystemExit
# unpack quotes
dates = np.array([q[0] for q in quotes], dtype=int)
close_v = np.array([q[2] for q in quotes])
volume = np.array([q[5] for q in quotes])[1:]
# take diff of close value
# this makes len(diff) = len(close_t) - 1
# therefore, others quantity also need to be shifted
diff = close_v[1:] - close_v[:-1]
dates = dates[1:]
close_v = close_v[1:]
# pack diff and volume for training
X = np.column_stack([diff, volume])
###############################################################################
# Run Gaussian HMM
print("fitting to HMM and decoding ...", end='')
n_components = 5
# make an HMM instance and execute fit
model = GaussianHMM(n_components, covariance_type="diag", n_iter=1000)
model.fit(X)
# predict the optimal sequence of internal hidden state
hidden_states = model.predict(X)
print("done\n")
###############################################################################
# print trained parameters and plot
print("Transition matrix")
print(model.transmat_)
print()
print("means and vars of each hidden state")
for i in range(n_components):
print("%dth hidden state" % i)
print("mean = ", model.means_[i])
print("var = ", np.diag(model.covars_[i]))
print()
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
fig = pl.figure()
ax = fig.add_subplot(111)
for i in range(n_components):
# use fancy indexing to plot data in each state
idx = (hidden_states == i)
ax.plot_date(dates[idx], close_v[idx], 'o', label="%dth hidden state" % i)
ax.legend()
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = lambda x: '$%1.2f' % x
ax.grid(True)
fig.autofmt_xdate()
pl.show()
| fivejjs/hmmlearn | examples/plot_hmm_stock_analysis.py | Python | bsd-3-clause | 2,793 | [
"Gaussian"
] | 1bcefdacfa8238e4e6d5a8fa8e6fc480ee093854f059c3a8a87e167611d3af50 |
"""module for producing astronomical plots
(c) 2007-2013 Matt Hilton
U{http://astlib.sourceforge.net}
This module provides the matplotlib powered ImagePlot class, which is designed to be flexible. ImagePlots can have RA, Dec. coordinate axes, contour overlays, and have objects marked in them, using WCS coordinates. RGB plots are supported too.
@var DEC_TICK_STEPS: Defines the possible coordinate label steps on the delination axis in sexagesimal mode. Dictionary format: {'deg', 'unit'}
@type DEC_TICK_STEPS: dictionary list
@var RA_TICK_STEPS: Defines the possible coordinate label steps on the right ascension axis in sexagesimal mode. Dictionary format: {'deg', 'unit'}
@type RA_TICK_STEPS: dictionary list
@var DECIMAL_TICK_STEPS: Defines the possible coordinate label steps on both coordinate axes in decimal degrees mode.
@type DECIMAL_TICK_STEPS: list
@var DEG: Variable to stand in for the degrees symbol.
@type DEG: string
@var PRIME: Variable to stand in for the prime symbol.
@type PRIME: string
@var DOUBLE_PRIME: Variable to stand in for the double prime symbol.
@type DOUBLE_PRIME: string
"""
import math
from . import astImages
#from . import astWCS
from . import astCoords
import numpy
#import pyfits
from scipy import interpolate
import pylab
import matplotlib.patches as patches
import sys
# Handle unicode python 2 and 3
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
DEC_TICK_STEPS = [{'deg': 1.0 / 60.0 / 60.0,
'unit': "s"}, {'deg': 2.0 / 60.0 / 60.0,
'unit': "s"}, {'deg': 5.0 / 60.0 / 60.0,
'unit': "s"},
{'deg': 10.0 / 60.0 / 60.0,
'unit': "s"}, {'deg': 30.0 / 60.0 / 60.0,
'unit': "s"}, {'deg': 1.0 / 60.0,
'unit': "m"},
{'deg': 2.0 / 60.0,
'unit': "m"}, {'deg': 5.0 / 60.0,
'unit': "m"}, {'deg': 15.0 / 60.0,
'unit': "m"},
{'deg': 30.0 / 60.0,
'unit': "m"}, {'deg': 1.0,
'unit': "d"}, {'deg': 2.0,
'unit': "d"}, {'deg': 4.0,
'unit': "d"},
{'deg': 5.0,
'unit': "d"}, {'deg': 10.0,
'unit': "d"}, {'deg': 20.0,
'unit': "d"}, {'deg': 30.0,
'unit': "d"}]
RA_TICK_STEPS = [
{'deg': (0.5 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (1.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (2.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"},
{'deg': (4.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (5.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (10.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"},
{'deg': (20.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (30.0 / 60.0 / 60.0 / 24.0) * 360.0,
'unit': "s"}, {'deg': (1.0 / 60.0 / 24.0) * 360.0,
'unit': "m"},
{'deg': (2.0 / 60.0 / 24.0) * 360.0,
'unit': "m"}, {'deg': (5.0 / 60.0 / 24.0) * 360.0,
'unit': "m"}, {'deg': (10.0 / 60.0 / 24.0) * 360.0,
'unit': "m"},
{'deg': (20.0 / 60.0 / 24.0) * 360.0,
'unit': "m"}, {'deg': (30.0 / 60.0 / 24.0) * 360.0,
'unit': "m"}, {'deg': (1.0 / 24.0) * 360.0,
'unit': "h"}, {'deg': (3.0 / 24.0) * 360.0,
'unit': "h"},
{'deg': (6.0 / 24.0) * 360.0,
'unit': "h"}, {'deg': (12.0 / 24.0) * 360.0,
'unit': "h"}
]
DECIMAL_TICK_STEPS = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5,
1.0, 2.0, 2.5, 5.0, 10.0, 30.0, 90.0]
DEG = u("\N{DEGREE SIGN}")
PRIME = "$^\prime$"
DOUBLE_PRIME = "$^{\prime\prime}$"
#-----------------------------------------------------------------------------
class ImagePlot:
"""This class describes a matplotlib image plot containing an astronomical
image with an associated WCS.
Objects within the image boundaries can be marked by passing their WCS
coordinates to L{ImagePlot.addPlotObjects}.
Other images can be overlaid using L{ImagePlot.addContourOverlay}.
For images rotated with North at the top, East at the left (as can be done
using L{astImages.clipRotatedImageSectionWCS} or
L{astImages.resampleToTanProjection}, WCS coordinate axes can be plotted,
with tick marks set appropriately for the image size. Otherwise, a compass
can be plotted showing the directions of North and East in the image.
RGB images are also supported.
The plot can of course be tweaked further after creation using
matplotlib/pylab commands.
"""
def __init__(self, imageData, imageWCS, axe=[0.1, 0.1, 0.8, 0.8],
cutLevels=["smart", 99.5], colorMapName="gray", title=None,
axesLabels="sexagesimal", axesFontFamily="serif",
axesFontSize=12.0, RATickSteps="auto", decTickSteps="auto",
colorBar=False, interpolation="bilinear"):
"""Makes an ImagePlot from the given image array and astWCS. For
coordinate axes to work, the image and WCS should have been rotated
such that East is at the left, North is at the top (see e.g.
L{astImages.clipRotatedImageSectionWCS}, or
L{astImages.resampleToTanProjection}).
If imageData is given as a list in the format [r, g, b], a color RGB
plot will be made. However, in this case the cutLevels must be
specified manually for each component as a list - i.e. cutLevels = [[r
min, r max], [g min, g max], [b min, b max]]. In this case of course,
the colorMap will be ignored. All r, g, b image arrays must have the
same dimensions.
Set axesLabels = None to make a plot without coordinate axes plotted.
The axes can be marked in either sexagesimal or decimal celestial
coordinates. If RATickSteps or decTickSteps are set to "auto", the
appropriate axis scales will be determined automatically from the size
of the image array and associated WCS. The tick step sizes can be
overidden. If the coordinate axes are in sexagesimal format a
dictionary in the format {'deg', 'unit'} is needed (see
L{RA_TICK_STEPS} and L{DEC_TICK_STEPS} for examples). If the coordinate
axes are in decimal format, the tick step size is specified simply in
RA, dec decimal degrees.
@type imageData: numpy array or list
@param imageData: image data array or list of numpy arrays [r, g, b]
@type imageWCS: astWCS.WCS
@param imageWCS: astWCS.WCS object
@type axes: list
@param axes: specifies where in the current figure to draw the finder
chart (see pylab.axes)
@type cutLevels: list
@param cutLevels: sets the image scaling - available options:
- pixel values: cutLevels=[low value, high value].
- histogram equalisation: cutLevels=["histEq", number of bins (
e.g. 1024)]
- relative: cutLevels=["relative", cut per cent level (e.g. 99.5)]
- smart: cutLevels=["smart", cut per cent level (e.g. 99.5)]
["smart", 99.5] seems to provide good scaling over a range of different
images.
Note that for RGB images, cut levels must be specified manually i.e. as
a list: [[r min, rmax], [g min, g max], [b min, b max]]
@type colorMapName: string
@param colorMapName: name of a standard matplotlib colormap, e.g.
"hot", "cool", "gray" etc. (do "help(pylab.colormaps)" in the Python
interpreter to see available options)
@type title: string
@param title: optional title for the plot
@type axesLabels: string
@param axesLabels: either "sexagesimal" (for H:M:S, D:M:S), "decimal"
(for decimal degrees) or None (for no coordinate axes labels)
@type axesFontFamily: string
@param axesFontFamily: matplotlib fontfamily, e.g. 'serif', 'sans-serif' etc.
@type axesFontSize: float
@param axesFontSize: font size of axes labels and titles (in points)
@type colorBar: bool
@param colorBar: if True, plot a vertical color bar at the side of the
image indicating the intensity scale.
@type interpolation: string
@param interpolation: interpolation to apply to the image plot (see the
documentation for the matplotlib.pylab.imshow command)
"""
self.RADeg, self.decDeg = imageWCS.getCentreWCSCoords()
self.wcs = imageWCS
# Handle case where imageData is [r, g, b]
if type(imageData) == list:
if len(imageData) == 3:
if len(cutLevels) == 3:
r = astImages.normalise(imageData[0], cutLevels[0])
g = astImages.normalise(imageData[1], cutLevels[1])
b = astImages.normalise(imageData[2], cutLevels[2])
rgb = numpy.array(
[r.transpose(), g.transpose(), b.transpose()])
rgb = rgb.transpose()
self.data = rgb
self.rgbImage = True
else:
raise Exception("tried to create a RGB array, but "
"cutLevel is not a list of 3 lists")
else:
raise Exception("tried to create a RGB array but "
" imageData is not a list of 3 arrays")
else:
self.data = imageData
self.rgbImage = False
#self.axes = pylab.axes(axes)
self.cutLevels = cutLevels
self.colorMapName = colorMapName
self.title = title
self.axesLabels = axesLabels
self.colorBar = colorBar
self.axesFontSize = axesFontSize
self.axesFontFamily = axesFontFamily
self.flipXAxis = False
self.flipYAxis = False
self.interpolation = interpolation
if self.axesLabels is not None:
# Allow user to override the automatic coord tick spacing
if self.axesLabels == "sexagesimal":
if RATickSteps != "auto":
if type(RATickSteps) != dict or "deg" not in list(RATickSteps.keys()) \
or "unit" not in list(RATickSteps.keys()):
raise Exception(
"RATickSteps needs to be in format {'deg', 'unit'} for sexagesimal axes labels")
if decTickSteps != "auto":
if type(decTickSteps) != dict or "deg" not in list(decTickSteps.keys()) \
or "unit" not in list(decTickSteps.keys()):
raise Exception(
"decTickSteps needs to be in format {'deg', 'unit'} for sexagesimal axes labels")
elif self.axesLabels == "decimal":
if RATickSteps != "auto":
if type(RATickSteps) != float:
raise Exception(
"RATickSteps needs to be a float (if not 'auto') for decimal axes labels")
if decTickSteps != "auto":
if type(decTickSteps) != float:
raise Exception(
"decTickSteps needs to be a float (if not 'auto') for decimal axes labels")
self.RATickSteps = RATickSteps
self.decTickSteps = decTickSteps
self.calcWCSAxisLabels(axesLabels=self.axesLabels)
# this list stores objects to overplot, add to it using addPlotObjects()
self.plotObjects = []
# this list stores image data to overlay as contours, add to it using
# addContourOverlay()
self.contourOverlays = []
self.draw()
def draw(self):
"""Redraws the ImagePlot.
"""
pylab.axes(self.axes)
pylab.cla()
if self.title is not None:
pylab.title(self.title)
try:
colorMap = pylab.cm.get_cmap(self.colorMapName)
except AssertionError:
raise Exception(self.colorMapName +
"is not a defined matplotlib colormap.")
if not self.rgbImage:
self.cutImage = astImages.intensityCutImage(self.data,
self.cutLevels)
if self.cutLevels[0] == "histEq":
pylab.imshow(self.cutImage['image'],
interpolation=self.interpolation,
origin='lower',
cmap=colorMap)
else:
pylab.imshow(self.cutImage['image'],
interpolation=self.interpolation,
norm=self.cutImage['norm'], origin='lower',
cmap=colorMap)
else:
pylab.imshow(self.data, interpolation="bilinear", origin='lower')
if self.colorBar:
pylab.colorbar(shrink=0.8)
for c in self.contourOverlays:
pylab.contour(c['contourData']['scaledImage'],
c['contourData']['contourLevels'], colors=c['color'],
linewidths=c['width'])
for p in self.plotObjects:
for x, y, l in zip(p['x'], p['y'], p['objLabels']):
if p['symbol'] == "circle":
c = patches.Circle((x, y),
radius=p['sizePix'] / 2.0,
fill=False,
edgecolor=p['color'],
linewidth=p['width'])
self.axes.add_patch(c)
elif p['symbol'] == "box":
c = patches.Rectangle(
(x - p['sizePix'] / 2, y - p['sizePix'] / 2),
p['sizePix'],
p['sizePix'],
fill=False,
edgecolor=p['color'],
linewidth=p['width'])
self.axes.add_patch(c)
elif p['symbol'] == "cross":
pylab.plot([x - p['sizePix'] / 2, x + p['sizePix'] / 2],
[y, y],
linestyle='-',
linewidth=p['width'],
color=p['color'])
pylab.plot([x, x],
[y - p['sizePix'] / 2, y + p['sizePix'] / 2],
linestyle='-',
linewidth=p['width'],
color=p['color'])
elif p['symbol'] == "diamond":
c = patches.RegularPolygon([x, y],
4,
radius=p['sizePix'] / 2,
orientation=0,
edgecolor=p['color'],
fill=False,
linewidth=p['width'])
self.axes.add_patch(c)
if l is not None:
pylab.text(x, y + p['sizePix'] / 1.5, l,
horizontalalignment='center',
fontsize=p['objLabelSize'], color=p['color'])
if p['symbol'] == "compass":
x = p['x'][0]
y = p['y'][0]
ra = p['RA'][0]
dec = p['dec'][0]
westPoint, eastPoint, southPoint, northPoint = astCoords.calcRADecSearchBox(
ra, dec, p['sizeArcSec'] / 3600.0 / 2.0)
northPix = self.wcs.wcs2pix(ra, northPoint)
eastPix = self.wcs.wcs2pix(eastPoint, dec)
edx = eastPix[0] - x
edy = eastPix[1] - y
ndx = northPix[0] - x
ndy = northPix[1] - y
nArrow = patches.Arrow(x,
y,
ndx,
ndy,
edgecolor=p['color'],
facecolor=p['color'],
width=p['width'])
eArrow = patches.Arrow(x,
y,
edx,
edy,
edgecolor=p['color'],
facecolor=p['color'],
width=p['width'])
self.axes.add_patch(nArrow)
self.axes.add_patch(eArrow)
pylab.text(x + ndx + ndx * 0.2,
y + ndy + ndy * 0.2,
"N",
horizontalalignment='center',
verticalalignment='center',
fontsize=p['objLabelSize'],
color=p['color'])
pylab.text(x + edx + edx * 0.2,
y + edy + edy * 0.2,
"E",
horizontalalignment='center',
verticalalignment='center',
fontsize=p['objLabelSize'],
color=p['color'])
if p['symbol'] == "scaleBar":
x = p['x'][0]
y = p['y'][0]
ra = p['RA'][0]
dec = p['dec'][0]
westPoint, eastPoint, southPoint, northPoint = astCoords.calcRADecSearchBox(
ra, dec, p['sizeArcSec'] / 3600.0 / 2.0)
northPix = self.wcs.wcs2pix(ra, northPoint)
eastPix = self.wcs.wcs2pix(eastPoint, dec)
edx = eastPix[0] - x
edy = eastPix[1] - y
ndx = northPix[0] - x
ndy = northPix[1] - y
eArrow = patches.Arrow(x,
y,
edx,
edy,
edgecolor=p['color'],
facecolor=p['color'],
width=p['width'])
wArrow = patches.Arrow(x,
y,
-edx,
edy,
edgecolor=p['color'],
facecolor=p['color'],
width=p['width'])
self.axes.add_patch(eArrow)
self.axes.add_patch(wArrow)
# Work out label
scaleLabel = None
if p['sizeArcSec'] < 60.0:
scaleLabel = "%.0f %s" % (p['sizeArcSec'], DOUBLE_PRIME)
elif p['sizeArcSec'] >= 60.0 and p['sizeArcSec'] < 3600.0:
scaleLabel = "%.0f %s" % (p['sizeArcSec'] / 60.0, PRIME)
else:
scaleLabel = "%.0f %s" % (p['sizeArcSec'] / 3600.0, DEG)
pylab.text(x,
y + 0.025 * self.data.shape[1],
scaleLabel,
horizontalalignment='center',
verticalalignment='center',
fontsize=p['objLabelSize'],
color=p['color'])
if self.axesLabels is not None:
pylab.xticks(self.ticsRA[0], self.ticsRA[1], weight='normal',
family=self.axesFontFamily,
fontsize=self.axesFontSize)
pylab.yticks(self.ticsDec[0], self.ticsDec[1], weight='normal',
family=self.axesFontFamily,
fontsize=self.axesFontSize)
pylab.xlabel(self.RAAxisLabel,
family=self.axesFontFamily,
fontsize=self.axesFontSize)
pylab.ylabel(self.decAxisLabel,
family=self.axesFontFamily,
fontsize=self.axesFontSize)
else:
pylab.xticks([], [])
pylab.yticks([], [])
pylab.xlabel("")
pylab.ylabel("")
if not self.flipXAxis:
pylab.xlim(0, self.data.shape[1] - 1)
else:
pylab.xlim(self.data.shape[1] - 1, 0)
if not self.flipYAxis:
pylab.ylim(0, self.data.shape[0] - 1)
else:
pylab.ylim(self.data.shape[0] - 1, 0)
def addContourOverlay(self,
contourImageData,
contourWCS,
tag,
levels=["linear", "min", "max", 5],
width=1,
color="white",
smooth=0,
highAccuracy=False):
"""Adds image data to the ImagePlot as a contour overlay. The contours
can be removed using L{removeContourOverlay}. If a contour overlay
already exists with this tag, it will be replaced.
@type contourImageData: numpy array
@param contourImageData: image data array from which contours are to be
generated
@type contourWCS: astWCS.WCS
@param contourWCS: astWCS.WCS object for the image to be contoured
@type tag: string
@param tag: identifying tag for this set of contours
@type levels: list
@param levels: sets the contour levels - available options:
- values: contourLevels=[list of values specifying each level]
- linear spacing: contourLevels=['linear', min level value, max
level value, number of levels] - can use "min", "max" to
automatically set min, max levels from image data
- log spacing: contourLevels=['log', min level value, max level
value, number of levels] - can use "min", "max" to automatically
set min, max levels from image data
@type width: int
@param width: width of the overlaid contours
@type color: string
@param color: color of the overlaid contours, specified by the name of
a standard matplotlib color, e.g., "black", "white", "cyan" etc. (do
"help(pylab.colors)" in the Python interpreter to see available
options)
@type smooth: float
@param smooth: standard deviation (in arcsec) of Gaussian filter for
pre-smoothing of contour image data (set to 0 for no smoothing)
@type highAccuracy: bool
@param highAccuracy: if True, sample every corresponding pixel in each
image; otherwise, sample every nth pixel, where n = the ratio of the
image scales.
"""
if self.rgbImage:
backgroundData = self.data[:, :, 0]
else:
backgroundData = self.data
contourData = astImages.generateContourOverlay(backgroundData,
self.wcs,
contourImageData,
contourWCS, levels,
smooth,
highAccuracy=highAccuracy)
alreadyGot = False
for c in self.contourOverlays:
if c['tag'] == tag:
c['contourData'] = contourData
c['tag'] = tag
c['color'] = color
c['width'] = width
alreadyGot = True
if not alreadyGot:
self.contourOverlays.append({'contourData': contourData, 'tag':
tag, 'color': color, 'width': width})
self.draw()
def removeContourOverlay(self, tag):
"""Removes the contourOverlay from the ImagePlot corresponding to the
tag.
@type tag: string
@param tag: tag for contour overlay in ImagePlot.contourOverlays to be
removed
"""
index = 0
for p in self.contourOverlays:
if p['tag'] == tag:
self.plotObjects.remove(self.plotObjects[index])
index = index + 1
self.draw()
def addPlotObjects(self,
objRAs,
objDecs,
tag,
symbol="circle",
size=4.0,
width=1.0,
color="yellow",
objLabels=None,
objLabelSize=12.0):
"""Add objects with RA, dec coords objRAs, objDecs to the ImagePlot.
Only objects that fall within the image boundaries will be plotted.
symbol specifies the type of symbol with which to mark the object in
the image. The following values are allowed:
- "circle"
- "box"
- "cross"
- "diamond"
size specifies the diameter in arcsec of the symbol (if plotSymbol ==
"circle"), or the width of the box in arcsec (if plotSymbol == "box")
width specifies the thickness of the symbol lines in pixels
color can be any valid matplotlib color (e.g. "red", "green", etc.)
The objects can be removed from the plot by using removePlotObjects(),
and then calling draw(). If the ImagePlot already has a set of
plotObjects with the same tag, they will be replaced.
@type objRAs: numpy array or list
@param objRAs: object RA coords in decimal degrees
@type objDecs: numpy array or list
@param objDecs: corresponding object Dec. coords in decimal degrees
@type tag: string
@param tag: identifying tag for this set of objects
@type symbol: string
@param symbol: either "circle", "box", "cross", or "diamond"
@type size: float
@param size: size of symbols to plot (radius in arcsec, or width of box)
@type width: float
@param width: width of symbols in pixels
@type color: string
@param color: any valid matplotlib color string, e.g. "red", "green" etc.
@type objLabels: list
@param objLabels: text labels to plot next to objects in figure
@type objLabelSize: float
@param objLabelSize: size of font used for object labels (in points)
"""
pixCoords = self.wcs.wcs2pix(objRAs, objDecs)
xMax = self.data.shape[1]
yMax = self.data.shape[0]
if objLabels is None:
objLabels = [None] * len(objRAs)
xInPlot = []
yInPlot = []
RAInPlot = []
decInPlot = []
labelInPlot = []
for p, r, d, l in zip(pixCoords, objRAs, objDecs, objLabels):
if p[0] >= 0 and p[0] < xMax and p[1] >= 0 and p[1] < yMax:
xInPlot.append(p[0])
yInPlot.append(p[1])
RAInPlot.append(r)
decInPlot.append(d)
labelInPlot.append(l)
xInPlot = numpy.array(xInPlot)
yInPlot = numpy.array(yInPlot)
RAInPlot = numpy.array(RAInPlot)
decInPlot = numpy.array(decInPlot)
# Size of symbols in pixels in plot - converted from arcsec
sizePix = (size / 3600.0) / self.wcs.getPixelSizeDeg()
alreadyGot = False
for p in self.plotObjects:
if p['tag'] == tag:
p['x'] = xInPlot
p['y'] = yInPlot
p['RA'] = RAInPlot
p['dec'] = decInPlot
p['tag'] = tag
p['objLabels'] = objLabels
p['symbol'] = symbol
p['sizePix'] = sizePix
p['sizeArcSec'] = size
p['width'] = width
p['color'] = color
p['objLabelSize'] = objLabelSize
alreadyGot = True
if not alreadyGot:
self.plotObjects.append({'x': xInPlot,
'y': yInPlot,
'RA': RAInPlot,
'dec': decInPlot,
'tag': tag,
'objLabels': labelInPlot,
'symbol': symbol,
'sizePix': sizePix,
'width': width,
'color': color,
'objLabelSize': objLabelSize,
'sizeArcSec': size})
self.draw()
def removePlotObjects(self, tag):
"""Removes the plotObjects from the ImagePlot corresponding to the tag.
The plot must be redrawn for the change to take effect.
@type tag: string
@param tag: tag for set of objects in ImagePlot.plotObjects to be removed
"""
index = 0
for p in self.plotObjects:
if p['tag'] == tag:
self.plotObjects.remove(self.plotObjects[index])
index = index + 1
self.draw()
def addCompass(self, location, sizeArcSec, color="white", fontSize=12,
width=20.0):
"""Adds a compass to the ImagePlot at the given location ('N', 'NE',
'E', 'SE', 'S', 'SW', 'W', or 'NW'). Note these aren't directions on
the WCS coordinate grid, they are relative positions on the plot - so N
is top centre, NE is top right, SW is bottom right etc..
Alternatively, pixel coordinates (x, y) in the image can be given.
@type location: string or tuple
@param location: location in the plot where the compass is drawn:
- string: N, NE, E, SE, S, SW, W or NW
- tuple: (x, y)
@type sizeArcSec: float
@param sizeArcSec: length of the compass arrows on the plot in arc seconds
@type color: string
@param color: any valid matplotlib color string
@type fontSize: float
@param fontSize: size of font used to label N and E, in points
@type width: float
@param width: width of arrows used to mark compass
"""
if type(location) == str:
cRADeg, cDecDeg = self.wcs.getCentreWCSCoords()
RAMin, RAMax, decMin, decMax = self.wcs.getImageMinMaxWCSCoords()
westPoint, eastPoint, southPoint, northPoint = astCoords.calcRADecSearchBox(
cRADeg, cDecDeg, sizeArcSec / 3600.0 / 2.0)
xSizePix = (sizeArcSec / 3600.0) / self.wcs.getXPixelSizeDeg()
ySizePix = (sizeArcSec / 3600.0) / self.wcs.getYPixelSizeDeg()
X = self.data.shape[1]
Y = self.data.shape[0]
xBufferPix = 0.5 * xSizePix
yBufferPix = 0.5 * ySizePix
cx, cy = self.wcs.wcs2pix(cRADeg, cDecDeg)
foundLocation = False
x = cy
y = cx
if not self.wcs.isFlipped():
if location.find("N") != -1:
y = Y - 2 * yBufferPix
foundLocation = True
if location.find("S") != -1:
y = yBufferPix
foundLocation = True
if location.find("E") != -1:
x = xBufferPix * 2
foundLocation = True
if location.find("W") != -1:
x = X - xBufferPix
foundLocation = True
else:
if location.find("S") != -1:
y = Y - 2 * yBufferPix
foundLocation = True
if location.find("N") != -1:
y = yBufferPix
foundLocation = True
if location.find("W") != -1:
x = xBufferPix * 2
foundLocation = True
if location.find("E") != -1:
x = X - xBufferPix
foundLocation = True
if not foundLocation:
raise Exception(
"didn't understand location string for scale bar (should be e.g. N, S, E, W).")
RADeg, decDeg = self.wcs.pix2wcs(x, y)
elif type(location) == tuple or type(location) == list:
x, y = location
RADeg, decDeg = self.wcs.pix2wcs(x, y)
else:
raise Exception(
"didn't understand location for scale bar - should be string or tuple.")
alreadyGot = False
for p in self.plotObjects:
if p['tag'] == "compass":
p['x'] = [x]
p['y'] = [y]
p['RA'] = [RADeg]
p['dec'] = [decDeg]
p['tag'] = "compass"
p['objLabels'] = [None]
p['symbol'] = "compass"
p['sizeArcSec'] = sizeArcSec
p['width'] = width
p['color'] = color
p['objLabelSize'] = fontSize
alreadyGot = True
if not alreadyGot:
self.plotObjects.append({'x': [x],
'y': [y],
'RA': [RADeg],
'dec': [decDeg],
'tag': "compass",
'objLabels': [None],
'symbol': "compass",
'width': width,
'color': color,
'objLabelSize': fontSize,
'sizeArcSec': sizeArcSec})
self.draw()
def addScaleBar(self, location, sizeArcSec, color="white", fontSize=12,
width=20.0):
"""Adds a scale bar to the ImagePlot at the given location ('N', 'NE',
'E', 'SE', 'S', 'SW', 'W', or 'NW'). Note these aren't directions on
the WCS coordinate grid, they are relative positions on the plot - so N
is top centre, NE is top right, SW is bottom right etc..
Alternatively, pixel coordinates (x, y) in the image can be given.
@type location: string or tuple
@param location: location in the plot where the compass is drawn:
- string: N, NE, E, SE, S, SW, W or NW
- tuple: (x, y)
@type sizeArcSec: float
@param sizeArcSec: scale length to indicate on the plot in arc seconds
@type color: string
@param color: any valid matplotlib color string
@type fontSize: float
@param fontSize: size of font used to label N and E, in points
@type width: float
@param width: width of arrow used to mark scale
"""
# Work out where the scale bar is going in WCS coords from the relative location given
if type(location) == str:
cRADeg, cDecDeg = self.wcs.getCentreWCSCoords()
RAMin, RAMax, decMin, decMax = self.wcs.getImageMinMaxWCSCoords()
westPoint, eastPoint, southPoint, northPoint = astCoords.calcRADecSearchBox(
cRADeg, cDecDeg, sizeArcSec / 3600.0 / 2.0)
ySizePix = (sizeArcSec / 3600.0) / self.wcs.getYPixelSizeDeg()
X = self.data.shape[1]
Y = self.data.shape[0]
xBufferPix = 0.6 * ySizePix
yBufferPix = 0.05 * Y
cx, cy = self.wcs.wcs2pix(cRADeg, cDecDeg)
foundLocation = False
x = cy
y = cx
if not self.wcs.isFlipped():
if location.find("N") != -1:
y = Y - 1.5 * yBufferPix
foundLocation = True
if location.find("S") != -1:
y = yBufferPix
foundLocation = True
if location.find("E") != -1:
x = xBufferPix
foundLocation = True
if location.find("W") != -1:
x = X - xBufferPix
foundLocation = True
else:
if location.find("S") != -1:
y = Y - 1.5 * yBufferPix
foundLocation = True
if location.find("N") != -1:
y = yBufferPix
foundLocation = True
if location.find("W") != -1:
x = xBufferPix
foundLocation = True
if location.find("E") != -1:
x = X - xBufferPix
foundLocation = True
if not foundLocation:
raise Exception(
"didn't understand location string for scale bar (should be e.g. N, S, E, W).")
RADeg, decDeg = self.wcs.pix2wcs(x, y)
elif type(location) == tuple or type(location) == list:
x, y = location
RADeg, decDeg = self.wcs.pix2wcs(x, y)
else:
raise Exception(
"didn't understand location for scale bar - should be string or tuple.")
alreadyGot = False
for p in self.plotObjects:
if p['tag'] == "scaleBar":
p['x'] = [x]
p['y'] = [y]
p['RA'] = [RADeg]
p['dec'] = [decDeg]
p['tag'] = "scaleBar"
p['objLabels'] = [None]
p['symbol'] = "scaleBar"
p['sizeArcSec'] = sizeArcSec
p['width'] = width
p['color'] = color
p['objLabelSize'] = fontSize
alreadyGot = True
if not alreadyGot:
self.plotObjects.append({'x': [x],
'y': [y],
'RA': [RADeg],
'dec': [decDeg],
'tag': "scaleBar",
'objLabels': [None],
'symbol': "scaleBar",
'width': width,
'color': color,
'objLabelSize': fontSize,
'sizeArcSec': sizeArcSec})
self.draw()
def calcWCSAxisLabels(self, axesLabels="decimal"):
"""This function calculates the positions of coordinate labels for the
RA and Dec axes of the ImagePlot. The tick steps are calculated
automatically unless self.RATickSteps, self.decTickSteps are set to
values other than "auto" (see L{ImagePlot.__init__}).
The ImagePlot must be redrawn for changes to be applied.
@type axesLabels: string
@param axesLabels: either "sexagesimal" (for H:M:S, D:M:S), "decimal"
(for decimal degrees), or None for no coordinate axes labels
"""
# Label equinox on axes
equinox = self.wcs.getEquinox()
if equinox < 1984:
equinoxLabel = "B" + str(int(equinox))
else:
equinoxLabel = "J" + str(int(equinox))
self.axesLabels = axesLabels
ticsDict = self.getTickSteps()
# Manual override - note: no minor tick marks anymore, but may want to
# bring them back
if self.RATickSteps != "auto":
ticsDict['major']['RA'] = self.RATickSteps
if self.decTickSteps != "auto":
ticsDict['major']['dec'] = self.decTickSteps
RALocs = []
decLocs = []
RALabels = []
decLabels = []
key = "major"
#for key in ticsDict.keys(): # key is major or minor
if self.axesLabels == "sexagesimal":
self.RAAxisLabel = "R.A. (" + equinoxLabel + ")"
self.decAxisLabel = "Dec. (" + equinoxLabel + ")"
RADegStep = ticsDict[key]['RA']['deg']
decDegStep = ticsDict[key]['dec']['deg']
elif self.axesLabels == "decimal":
self.RAAxisLabel = "R.A. Degrees (" + equinoxLabel + ")"
self.decAxisLabel = "Dec. Degrees (" + equinoxLabel + ")"
RADegStep = ticsDict[key]['RA']
decDegStep = ticsDict[key]['dec']
else:
raise Exception(
"axesLabels must be either 'sexagesimal' or 'decimal'")
xArray = numpy.arange(0, self.data.shape[1], 1)
yArray = numpy.arange(0, self.data.shape[0], 1)
xWCS = self.wcs.pix2wcs(
xArray, numpy.zeros(xArray.shape[0], dtype=float))
yWCS = self.wcs.pix2wcs(
numpy.zeros(yArray.shape[0], dtype=float),
yArray)
xWCS = numpy.array(xWCS)
yWCS = numpy.array(yWCS)
ras = xWCS[:, 0]
decs = yWCS[:, 1]
RAEdges = numpy.array([ras[0], ras[-1]])
RAMin = RAEdges.min()
RAMax = RAEdges.max()
decMin = decs.min()
decMax = decs.max()
# Work out if wrapped around
midRAPix, midDecPix = self.wcs.wcs2pix((RAEdges[1] + RAEdges[0]) / 2.0,
(decMax + decMin) / 2.0)
if midRAPix < 0 or midRAPix > self.wcs.header['NAXIS1']:
wrappedRA = True
else:
wrappedRA = False
# Note RA, dec work in opposite sense below because E at left
if ras[1] < ras[0]:
self.flipXAxis = False
ra2x = interpolate.interp1d(ras[::-1], xArray[::-1], kind='linear')
else:
self.flipXAxis = True
ra2x = interpolate.interp1d(ras, xArray, kind='linear')
if decs[1] < decs[0]:
self.flipYAxis = True
dec2y = interpolate.interp1d(decs[::-1],
yArray[::-1],
kind='linear')
else:
self.flipYAxis = False
dec2y = interpolate.interp1d(decs, yArray, kind='linear')
if not wrappedRA:
RAPlotMin = RADegStep * math.modf(RAMin / RADegStep)[1]
RAPlotMax = RADegStep * math.modf(RAMax / RADegStep)[1]
if RAPlotMin < RAMin:
RAPlotMin = RAPlotMin + RADegStep
if RAPlotMax >= RAMax:
RAPlotMax = RAPlotMax - RADegStep
RADegs = numpy.arange(RAPlotMin, RAPlotMax + 0.0001, RADegStep)
else:
RAPlotMin = RADegStep * math.modf(RAMin / RADegStep)[1]
RAPlotMax = RADegStep * math.modf(RAMax / RADegStep)[1]
if RAPlotMin > RAMin:
RAPlotMin = RAPlotMin - RADegStep
if RAPlotMax <= RAMax:
RAPlotMax = RAPlotMax + RADegStep
for i in range(ras.shape[0]):
if ras[i] >= RAMax and ras[i] <= 360.0:
ras[i] = ras[i] - 360.0
if ras[1] < ras[0]:
ra2x = interpolate.interp1d(ras[::-1],
xArray[::-1],
kind='linear')
else:
ra2x = interpolate.interp1d(ras, xArray, kind='linear')
RADegs = numpy.arange(RAPlotMin, RAPlotMax - 360.0 - 0.0001,
-RADegStep)
decPlotMin = decDegStep * math.modf(decMin / decDegStep)[1]
decPlotMax = decDegStep * math.modf(decMax / decDegStep)[1]
if decPlotMin < decMin:
decPlotMin = decPlotMin + decDegStep
if decPlotMax >= decMax:
decPlotMax = decPlotMax - decDegStep
decDegs = numpy.arange(decPlotMin, decPlotMax + 0.0001, decDegStep)
if key == "major":
if axesLabels == "sexagesimal":
for r in RADegs:
if r < 0:
r = r + 360.0
h, m, s = astCoords.decimal2hms(r, ":").split(":")
hInt = int(round(float(h)))
if ticsDict[key]['RA']['unit'] == 'h' and (
60.0 - float(m)
) < 0.01: # Check for rounding error
hInt = hInt + 1
if hInt < 10:
hString = "0" + str(hInt)
else:
hString = str(hInt)
mInt = int(round(float(m)))
if ticsDict[key]['RA']['unit'] == 'm' and (
60.0 - float(s)
) < 0.01: # Check for rounding error
mInt = mInt + 1
if mInt < 10:
mString = "0" + str(mInt)
else:
mString = str(mInt)
sInt = int(round(float(s)))
if sInt < 10:
sString = "0" + str(sInt)
else:
sString = str(sInt)
if ticsDict[key]['RA']['unit'] == 'h':
rString = hString + "$^{\sf{h}}$"
elif ticsDict[key]['RA']['unit'] == 'm':
rString = hString + "$^{\sf{h}}$" + mString + "$^{\sf{m}}$"
else:
rString = hString + "$^{\sf{h}}$" + mString + "$^{\sf{m}}$" + sString + "$^{\sf{s}}$"
RALabels.append(rString)
for D in decDegs:
d, m, s = astCoords.decimal2dms(D, ":").split(":")
dInt = int(round(float(d)))
if ticsDict[key]['dec']['unit'] == 'd' and (
60.0 - float(m)
) < 0.01: # Check for rounding error
dInt = dInt + 1
if dInt < 10 and dInt >= 0 and D > 0:
dString = "+0" + str(dInt)
elif dInt > -10 and dInt <= 0 and D < 0:
dString = "-0" + str(abs(dInt))
elif dInt >= 10:
dString = "+" + str(dInt)
else:
dString = str(dInt)
mInt = int(round(float(m)))
if ticsDict[key]['dec']['unit'] == 'm' and (
60.0 - float(s)
) < 0.01: # Check for rounding error
mInt = mInt + 1
if mInt < 10:
mString = "0" + str(mInt)
else:
mString = str(mInt)
sInt = int(round(float(s)))
if sInt < 10:
sString = "0" + str(sInt)
else:
sString = str(sInt)
if ticsDict[key]['dec']['unit'] == 'd':
dString = dString + DEG
elif ticsDict[key]['dec']['unit'] == 'm':
dString = dString + DEG + mString + PRIME
else:
dString = dString + DEG + mString + PRIME + sString +\
DOUBLE_PRIME
decLabels.append(dString)
elif axesLabels == "decimal":
if not wrappedRA:
RALabels = RALabels + RADegs.tolist()
else:
nonNegativeLabels = []
for r in RADegs:
if r < 0:
r = r + 360.0
nonNegativeLabels.append(r)
RALabels = RALabels + nonNegativeLabels
decLabels = decLabels + decDegs.tolist()
# Format RALabels, decLabels to same number of d.p.
dpNumRA = len(str(ticsDict['major']['RA']).split(".")[-1])
dpNumDec = len(str(ticsDict['major']['dec']).split(".")[-1])
for i in range(len(RALabels)):
fString = "%." + str(dpNumRA) + "f"
RALabels[i] = fString % (RALabels[i])
for i in range(len(decLabels)):
fString = "%." + str(dpNumDec) + "f"
decLabels[i] = fString % (decLabels[i])
if key == 'minor':
RALabels = RALabels + RADegs.shape[0] * ['']
decLabels = decLabels + decDegs.shape[0] * ['']
RALocs = RALocs + ra2x(RADegs).tolist()
decLocs = decLocs + dec2y(decDegs).tolist()
self.ticsRA = [RALocs, RALabels]
self.ticsDec = [decLocs, decLabels]
def save(self, fileName):
"""Saves the ImagePlot in any format that matplotlib can understand, as
determined from the fileName extension.
@type fileName: string
@param fileName: path where plot will be written
"""
pylab.draw()
pylab.savefig(fileName)
def getTickSteps(self):
"""Chooses the appropriate WCS coordinate tick steps for the plot based
on its size. Whether the ticks are decimal or sexagesimal is set by
self.axesLabels.
Note: minor ticks not used at the moment.
@rtype: dictionary
@return: tick step sizes for major, minor plot ticks, in format
{'major', 'minor'}
"""
# Aim for 5 major tick marks on a plot
xArray = numpy.arange(0, self.data.shape[1], 1)
yArray = numpy.arange(0, self.data.shape[0], 1)
xWCS = self.wcs.pix2wcs(
xArray, numpy.zeros(xArray.shape[0], dtype=float))
yWCS = self.wcs.pix2wcs(
numpy.zeros(yArray.shape[0], dtype=float),
yArray)
xWCS = numpy.array(xWCS)
yWCS = numpy.array(yWCS)
ras = xWCS[:, 0]
decs = yWCS[:, 1]
RAEdges = numpy.array([ras[0], ras[-1]])
RAMin = RAEdges.min()
RAMax = RAEdges.max()
decMin = decs.min()
decMax = decs.max()
# Work out if wrapped around
midRAPix, midDecPix = self.wcs.wcs2pix((RAEdges[1] + RAEdges[0]) / 2.0,
(decMax + decMin) / 2.0)
if midRAPix < 0 or midRAPix > self.wcs.header['NAXIS1']:
wrappedRA = True
else:
wrappedRA = False
if not wrappedRA:
RAWidthDeg = RAMax - RAMin
else:
RAWidthDeg = (360.0 - RAMax) + RAMin
decHeightDeg = decMax - decMin
ticsDict = {}
ticsDict['major'] = {}
ticsDict['minor'] = {}
if self.axesLabels == "sexagesimal":
matchIndex = 0
for i in range(len(RA_TICK_STEPS)):
if RAWidthDeg / 2.5 > RA_TICK_STEPS[i]['deg']:
matchIndex = i
ticsDict['major']['RA'] = RA_TICK_STEPS[matchIndex]
ticsDict['minor']['RA'] = RA_TICK_STEPS[matchIndex - 1]
matchIndex = 0
for i in range(len(DEC_TICK_STEPS)):
if decHeightDeg / 2.5 > DEC_TICK_STEPS[i]['deg']:
matchIndex = i
ticsDict['major']['dec'] = DEC_TICK_STEPS[matchIndex]
ticsDict['minor']['dec'] = DEC_TICK_STEPS[matchIndex - 1]
return ticsDict
elif self.axesLabels == "decimal":
matchIndex = 0
for i in range(len(DECIMAL_TICK_STEPS)):
if RAWidthDeg / 2.5 > DECIMAL_TICK_STEPS[i]:
matchIndex = i
ticsDict['major']['RA'] = DECIMAL_TICK_STEPS[matchIndex]
ticsDict['minor']['RA'] = DECIMAL_TICK_STEPS[matchIndex - 1]
matchIndex = 0
for i in range(len(DECIMAL_TICK_STEPS)):
if decHeightDeg / 2.5 > DECIMAL_TICK_STEPS[i]:
matchIndex = i
ticsDict['major']['dec'] = DECIMAL_TICK_STEPS[matchIndex]
ticsDict['minor']['dec'] = DECIMAL_TICK_STEPS[matchIndex - 1]
return ticsDict
else:
raise Exception(
"axesLabels must be either 'sexagesimal' or 'decimal'")
| boada/astLib | astLib/astPlots.py | Python | lgpl-2.1 | 52,950 | [
"Gaussian"
] | 58addaea7a6d874a6fa414c3d20945288548621e62577476f46601ab08bf557a |
from numpy import (logical_and, asarray, pi, zeros_like,
piecewise, array, arctan2, tan, zeros, arange, floor)
from numpy.core.umath import (sqrt, exp, greater, less, cos, add, sin,
less_equal, greater_equal)
# From splinemodule.c
from .spline import cspline2d, sepfir2d
from scipy.special import comb, gamma
__all__ = ['spline_filter', 'bspline', 'gauss_spline', 'cubic', 'quadratic',
'cspline1d', 'qspline1d', 'cspline1d_eval', 'qspline1d_eval']
def factorial(n):
return gamma(n + 1)
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0, 4.0, 1.0], 'f') / 6.0
if intype in ['F', 'D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real, lmbda)
cki = cspline2d(Iin.imag, lmbda)
outr = sepfir2d(ckr, hcol, hcol)
outi = sepfir2d(cki, hcol, hcol)
out = (outr + 1j * outi).astype(intype)
elif intype in ['f', 'd']:
ckr = cspline2d(Iin, lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
_splinefunc_cache = {}
def _bspline_piecefunctions(order):
"""Returns the function defined over the left-side pieces for a bspline of
a given order.
The 0th piece is the first one less than 0. The last piece is a function
identical to 0 (returned as the constant 0). (There are order//2 + 2 total
pieces).
Also returns the condition functions that when evaluated return boolean
arrays for use with `numpy.piecewise`.
"""
try:
return _splinefunc_cache[order]
except KeyError:
pass
def condfuncgen(num, val1, val2):
if num == 0:
return lambda x: logical_and(less_equal(x, val1),
greater_equal(x, val2))
elif num == 2:
return lambda x: less_equal(x, val2)
else:
return lambda x: logical_and(less(x, val1),
greater_equal(x, val2))
last = order // 2 + 2
if order % 2:
startbound = -1.0
else:
startbound = -0.5
condfuncs = [condfuncgen(0, 0, startbound)]
bound = startbound
for num in range(1, last - 1):
condfuncs.append(condfuncgen(1, bound, bound - 1))
bound = bound - 1
condfuncs.append(condfuncgen(2, 0, -(order + 1) / 2.0))
# final value of bound is used in piecefuncgen below
# the functions to evaluate are taken from the left-hand side
# in the general expression derived from the central difference
# operator (because they involve fewer terms).
fval = factorial(order)
def piecefuncgen(num):
Mk = order // 2 - num
if (Mk < 0):
return 0 # final function is 0
coeffs = [(1 - 2 * (k % 2)) * float(comb(order + 1, k, exact=1)) / fval
for k in range(Mk + 1)]
shifts = [-bound - k for k in range(Mk + 1)]
def thefunc(x):
res = 0.0
for k in range(Mk + 1):
res += coeffs[k] * (x + shifts[k]) ** order
return res
return thefunc
funclist = [piecefuncgen(k) for k in range(last)]
_splinefunc_cache[order] = (funclist, condfuncs)
return funclist, condfuncs
def bspline(x, n):
"""B-spline basis function of order n.
Notes
-----
Uses numpy.piecewise and automatic function-generator.
"""
ax = -abs(asarray(x))
# number of pieces on the left-side is (n+1)/2
funclist, condfuncs = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist)
def gauss_spline(x, n):
"""Gaussian approximation to B-spline basis function of order n.
Parameters
----------
n : int
The order of the spline. Must be nonnegative, i.e., n >= 0
References
----------
.. [1] Bouma H., Vilanova A., Bescos J.O., ter Haar Romeny B.M., Gerritsen
F.A. (2007) Fast and Accurate Gaussian Derivatives Based on B-Splines. In:
Sgallari F., Murli A., Paragios N. (eds) Scale Space and Variational
Methods in Computer Vision. SSVM 2007. Lecture Notes in Computer
Science, vol 4485. Springer, Berlin, Heidelberg
"""
signsq = (n + 1) / 12.0
return 1 / sqrt(2 * pi * signsq) * exp(-x ** 2 / 2 / signsq)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def quadratic(x):
"""A quadratic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 2)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 0.5)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 0.75 - ax1 ** 2
cond2 = ~cond1 & less(ax, 1.5)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = (ax2 - 1.5) ** 2 / 2.0
return res
def _coeff_smooth(lam):
xi = 1 - 96 * lam + 24 * lam * sqrt(3 + 144 * lam)
omeg = arctan2(sqrt(144 * lam - 1), sqrt(xi))
rho = (24 * lam - 1 - sqrt(xi)) / (24 * lam)
rho = rho * sqrt((48 * lam + 24 * lam * sqrt(3 + 144 * lam)) / xi)
return rho, omeg
def _hc(k, cs, rho, omega):
return (cs / sin(omega) * (rho ** k) * sin(omega * (k + 1)) *
greater(k, -1))
def _hs(k, cs, rho, omega):
c0 = (cs * cs * (1 + rho * rho) / (1 - rho * rho) /
(1 - 2 * rho * rho * cos(2 * omega) + rho ** 4))
gamma = (1 - rho * rho) / (1 + rho * rho) / tan(omega)
ak = abs(k)
return c0 * rho ** ak * (cos(omega * ak) + gamma * sin(omega * ak))
def _cubic_smooth_coeff(signal, lamb):
rho, omega = _coeff_smooth(lamb)
cs = 1 - 2 * rho * cos(omega) + rho * rho
K = len(signal)
yp = zeros((K,), signal.dtype.char)
k = arange(K)
yp[0] = (_hc(0, cs, rho, omega) * signal[0] +
add.reduce(_hc(k + 1, cs, rho, omega) * signal))
yp[1] = (_hc(0, cs, rho, omega) * signal[0] +
_hc(1, cs, rho, omega) * signal[1] +
add.reduce(_hc(k + 2, cs, rho, omega) * signal))
for n in range(2, K):
yp[n] = (cs * signal[n] + 2 * rho * cos(omega) * yp[n - 1] -
rho * rho * yp[n - 2])
y = zeros((K,), signal.dtype.char)
y[K - 1] = add.reduce((_hs(k, cs, rho, omega) +
_hs(k + 1, cs, rho, omega)) * signal[::-1])
y[K - 2] = add.reduce((_hs(k - 1, cs, rho, omega) +
_hs(k + 2, cs, rho, omega)) * signal[::-1])
for n in range(K - 3, -1, -1):
y[n] = (cs * yp[n] + 2 * rho * cos(omega) * y[n + 1] -
rho * rho * y[n + 2])
return y
def _cubic_coeff(signal):
zi = -2 + sqrt(3)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 6.0
def _quadratic_coeff(signal):
zi = -3 + 2 * sqrt(2.0)
K = len(signal)
yplus = zeros((K,), signal.dtype.char)
powers = zi ** arange(K)
yplus[0] = signal[0] + zi * add.reduce(powers * signal)
for k in range(1, K):
yplus[k] = signal[k] + zi * yplus[k - 1]
output = zeros((K,), signal.dtype.char)
output[K - 1] = zi / (zi - 1) * yplus[K - 1]
for k in range(K - 2, -1, -1):
output[k] = zi * (output[k + 1] - yplus[k])
return output * 8.0
def cspline1d(signal, lamb=0.0):
"""
Compute cubic spline coefficients for rank-1 array.
Find the cubic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 4.0, 1.0]/ 6.0 .
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient, default is 0.0.
Returns
-------
c : ndarray
Cubic spline coefficients.
"""
if lamb != 0.0:
return _cubic_smooth_coeff(signal, lamb)
else:
return _cubic_coeff(signal)
def qspline1d(signal, lamb=0.0):
"""Compute quadratic spline coefficients for rank-1 array.
Parameters
----------
signal : ndarray
A rank-1 array representing samples of a signal.
lamb : float, optional
Smoothing coefficient (must be zero for now).
Returns
-------
c : ndarray
Quadratic spline coefficients.
See Also
--------
qspline1d_eval : Evaluate a quadratic spline at the new set of points.
Notes
-----
Find the quadratic spline coefficients for a 1-D signal assuming
mirror-symmetric boundary conditions. To obtain the signal back from the
spline representation mirror-symmetric-convolve these coefficients with a
length 3 FIR window [1.0, 6.0, 1.0]/ 8.0 .
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += np.random.randn(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
if lamb != 0.0:
raise ValueError("Smoothing quadratic splines not supported yet.")
else:
return _quadratic_coeff(signal)
def cspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx, dtype=cj.dtype)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = cspline1d_eval(cj, -newx[cond1])
res[cond2] = cspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx, dtype=cj.dtype)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
def qspline1d_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a quadratic spline at the new set of points.
Parameters
----------
cj : ndarray
Quadratic spline coefficients
newx : ndarray
New set of points.
dx : float, optional
Old sample-spacing, the default value is 1.0.
x0 : int, optional
Old origin, the default value is 0.
Returns
-------
res : ndarray
Evaluated a quadratic spline points.
See Also
--------
qspline1d : Compute quadratic spline coefficients for rank-1 array.
Notes
-----
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of::
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
Examples
--------
We can filter a signal to reduce and smooth out high-frequency noise with
a quadratic spline:
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import qspline1d, qspline1d_eval
>>> sig = np.repeat([0., 1., 0.], 100)
>>> sig += np.random.randn(len(sig))*0.05 # add noise
>>> time = np.linspace(0, len(sig))
>>> filtered = qspline1d_eval(qspline1d(sig), time)
>>> plt.plot(sig, label="signal")
>>> plt.plot(time, filtered, label="filtered")
>>> plt.legend()
>>> plt.show()
"""
newx = (asarray(newx) - x0) / dx
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = qspline1d_eval(cj, -newx[cond1])
res[cond2] = qspline1d_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 1.5).astype(int) + 1
for i in range(3):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * quadratic(newx - thisj)
res[cond3] = result
return res
| pizzathief/scipy | scipy/signal/bsplines.py | Python | bsd-3-clause | 13,730 | [
"Gaussian"
] | 578d9b7e44818018cc0d045588a9f65e8a1891b99e1ca12b50ebde826a243ade |
import sys
import os
import numpy as np
from scipy.stats import norm
import math
import random
import cv2
import run
def make_sharp(k, sd):
'''Create a sharpen kernel.
Input:
k - the radius of the kernel.
sd - the standard deviation of the gaussian filter used to make the kernel.
Output:
output - a numpy array of shape (2k+1, 2k+1) and dtype float.
The sharpen filter is constructed by first taking a filter with a 2 in the
center and 0's everywhere else, and subtracting from that a gaussian filter.
Note:
You can use the make_gaussian function from part one by typing:
import part1
part1.make_gaussian(k, sd)
'''
kernel = None
# Insert your code here.----------------------------------------------------
#---------------------------------------------------------------------------
return kernel
def test():
'''This script will perform a unit test on your function, and provide useful
output.
'''
np.set_printoptions(precision=3)
ks = [1, 2, 1, 2, 1]
sds = [1, 2, 3, 4, 5]
outputs = []
# 1,1
y = np.array([[-0.075, -0.124, -0.075],
[-0.124, 1.796, -0.124],
[-0.075, -0.124, -0.075]])
outputs.append(y)
# 2,2
y = np.array([[-0.023, -0.034, -0.038, -0.034, -0.023],
[-0.034, -0.049, -0.056, -0.049, -0.034],
[-0.038, -0.056, 1.937, -0.056, -0.038],
[-0.034, -0.049, -0.056, -0.049, -0.034],
[-0.023, -0.034, -0.038, -0.034, -0.023]])
outputs.append(y)
# 1,3
y = np.array([[-0.107, -0.113, -0.107],
[-0.113, 1.880, -0.113],
[-0.107, -0.113, -0.107]])
outputs.append(y)
# 2,4
y = np.array([[-0.035, -0.039, -0.04 , -0.039, -0.035],
[-0.039, -0.042, -0.044, -0.042, -0.039],
[-0.04 , -0.044, 1.955, -0.044, -0.04 ],
[-0.039, -0.042, -0.044, -0.042, -0.039],
[-0.035, -0.039, -0.04 , -0.039, -0.035]])
outputs.append(y)
# 1,5
y = np.array([[-0.11 , -0.112, -0.11 ],
[-0.112, 1.886, -0.112],
[-0.11 , -0.112, -0.11 ]])
outputs.append(y)
for k, sd, output in zip(ks, sds, outputs):
if __name__ == "__main__":
print "k:{}, sd:{}".format(k, sd)
usr_out = make_sharp(k, sd)
if not type(usr_out) == type(output):
if __name__ == "__main__":
print "Error- output has type {}. Expected type is {}.".format(
type(usr_out), type(output))
return False
if not usr_out.shape == output.shape:
if __name__ == "__main__":
print "Error- output has shape {}. Expected shape is {}.".format(
usr_out.shape, output.shape)
return False
if not usr_out.dtype == output.dtype:
if __name__ == "__main__":
print "Error- output has dtype {}. Expected dtype is {}.".format(
usr_out.dtype, output.dtype)
return False
if not np.all(np.abs(usr_out - output) < .005):
if __name__ == "__main__":
print "Error- output has value:\n{}\nExpected value:\n{}".format(
usr_out, output)
return False
if __name__ == "__main__":
print "Passed."
if __name__ == "__main__":
print "Success."
return True
if __name__ == "__main__":
# Testing code
print "Performing unit test. Answers will be accepted as long as they are \
within .005 of the input."
test()
| fieraloca/CODEPROJ | PYTHON/COMP_PHOTO/hw1/part2.py | Python | mit | 3,427 | [
"Gaussian"
] | 1e6ec04582f447d97af8c6a83a1d6e83cd4571dbe42634dc8bf40ccc4a7704f2 |
#########################################################################################
# LSF.py
# 10.11.2014
# Author: A.T.
#########################################################################################
""" LSF.py is a DIRAC independent class representing LSF batch system.
LSF objects are used as backend batch system representation for
LocalComputingElement and SSHComputingElement classes
"""
__RCSID__ = "$Id$"
import re
import commands
import os
class LSF(object):
def submitJob(self, **kwargs):
""" Submit nJobs to the condor batch system
"""
resultDict = {}
MANDATORY_PARAMETERS = ['Executable', 'OutputDir', 'ErrorDir',
'WorkDir', 'SubmitOptions', 'Queue']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
nJobs = kwargs.get('NJobs', 1)
preamble = kwargs.get('Preamble')
outputs = []
outputDir = kwargs['OutputDir']
errorDir = kwargs['ErrorDir']
executable = kwargs['Executable']
queue = kwargs['Queue']
submitOptions = kwargs['SubmitOptions']
outputDir = os.path.expandvars(outputDir)
errorDir = os.path.expandvars(errorDir)
executable = os.path.expandvars(executable)
for _i in xrange(int(nJobs)):
cmd = '%s; ' % preamble if preamble else ''
cmd += "bsub -o %s -e %s -q %s -J DIRACPilot %s %s" % (outputDir,
errorDir,
queue,
submitOptions,
executable)
status, output = commands.getstatusoutput(cmd)
if status == 0:
outputs.append(output)
else:
break
if outputs:
resultDict['Status'] = 0
resultDict['Jobs'] = []
for output in outputs:
match = re.search(r'Job <(\d*)>', output)
if match:
resultDict['Jobs'].append(match.groups()[0])
else:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
def killJob(self, **kwargs):
""" Kill jobs in the given list
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs.get('JobIDList')
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
successful = []
failed = []
for job in jobIDList:
status, output = commands.getstatusoutput('bkill %s' % job)
if status != 0:
failed.append(job)
else:
successful.append(job)
resultDict['Status'] = 0
if failed:
resultDict['Status'] = 1
resultDict['Message'] = output
resultDict['Successful'] = successful
resultDict['Failed'] = failed
return resultDict
def getCEStatus(self, **kwargs):
""" Method to return information on running and pending jobs.
"""
resultDict = {}
MANDATORY_PARAMETERS = ['Queue']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
queue = kwargs['Queue']
cmd = "bjobs -q %s -a" % queue
status, output = commands.getstatusoutput(cmd)
if status != 0:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
waitingJobs = 0
runningJobs = 0
lines = output.split("\n")
for line in lines:
if line.count("PEND") or line.count('PSUSP'):
waitingJobs += 1
if line.count("RUN") or line.count('USUSP'):
runningJobs += 1
# Final output
resultDict['Status'] = 0
resultDict["Waiting"] = waitingJobs
resultDict["Running"] = runningJobs
return resultDict
def getJobStatus(self, **kwargs):
""" Get the status information for the given list of jobs
"""
resultDict = {}
MANDATORY_PARAMETERS = ['JobIDList']
for argument in MANDATORY_PARAMETERS:
if argument not in kwargs:
resultDict['Status'] = -1
resultDict['Message'] = 'No %s' % argument
return resultDict
jobIDList = kwargs['JobIDList']
if not jobIDList:
resultDict['Status'] = -1
resultDict['Message'] = 'Empty job list'
return resultDict
cmd = 'bjobs ' + ' '.join(jobIDList)
status, output = commands.getstatusoutput(cmd)
if status != 0:
resultDict['Status'] = status
resultDict['Message'] = output
return resultDict
output = output.replace('\r', '')
lines = output.split('\n')
statusDict = {}
for job in jobIDList:
statusDict[job] = 'Unknown'
for line in lines:
if line.find(job) != -1:
if line.find('UNKWN') != -1:
statusDict[job] = 'Unknown'
else:
lsfStatus = line.split()[2]
if lsfStatus in ['DONE', 'EXIT']:
statusDict[job] = 'Done'
elif lsfStatus in ['RUN', 'SSUSP']:
statusDict[job] = 'Running'
elif lsfStatus in ['PEND', 'PSUSP']:
statusDict[job] = 'Waiting'
# Final output
status = 0
resultDict['Status'] = 0
resultDict['Jobs'] = statusDict
return resultDict
| chaen/DIRAC | Resources/Computing/BatchSystems/LSF.py | Python | gpl-3.0 | 5,616 | [
"DIRAC"
] | 8d3e4ccedccb69d91643ebc452090582edd2e8c0d0ccb1c498202fa7a3211dd3 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCheckmGenome(PythonPackage):
"""Assess the quality of microbial genomes recovered from isolates, single
cells, and metagenomes"""
homepage = "https://ecogenomics.github.io/CheckM"
pypi = "checkm-genome/checkm-genome-1.0.11.tar.gz"
version('1.0.13', sha256='ffb7e4966c0fac07c7e6e7db6f6eb5b48587fa83987f8a68efbaff2afb7da82e', deprecated=True)
version('1.0.11', sha256='e475d9817d12fa771dbccc80f47758b742fc67c25261dc8ca0c0dc898c2a5190', deprecated=True)
depends_on('hmmer@3.1b1:', type=('build', 'run'))
depends_on('pplacer', type=('build', 'run'))
depends_on('prodigal@2.6.1:', type=('build', 'run'))
depends_on('python@2.7.0:2.7', type=('build', 'run'))
depends_on('py-backports-functools-lru-cache', type=('build', 'run'), when='^python@:3.2')
depends_on('py-numpy@1.8.0:', type=('build', 'run'))
depends_on('py-scipy@0.9.0:', type=('build', 'run'))
depends_on('py-matplotlib@1.3.1:2.2.3', type=('build', 'run'))
depends_on('py-pysam@0.8.3:', type=('build', 'run'))
depends_on('py-dendropy@4.0.0:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-checkm-genome/package.py | Python | lgpl-2.1 | 1,317 | [
"pysam"
] | 0c9dae239955b771939676973e9233ad8fe0433d8f6659b96502ff1459ed00fc |
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing SAS, implemented as an easyblock
"""
import os
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.run import run_cmd_qa
class EB_SAS(EasyBlock):
"""Support for building/installing SAS."""
def __init__(self, *args, **kwargs):
"""Custom constructor for SAS easyblock, initialize custom class variables."""
super(EB_SAS, self).__init__(*args, **kwargs)
# Use default SAS Installation Data File path
self.license_file = ''
# Set custom SAS Installation Data File path if defined and existing
if self.cfg['license_file'] and os.path.isfile(self.cfg['license_file']):
self.license_file = self.cfg['license_file']
self.log.info("Custom SAS Installation Data File found: %s", self.license_file)
def configure_step(self):
"""No custom configurationprocedure for SAS."""
pass
def build_step(self):
"""No custom build procedure for SAS."""
pass
def install_step(self):
"""Custom install procedure for SAS."""
qa = {
"SAS Home:": self.installdir,
"Install SAS Software (default: Yes):": '',
"Configure SAS Software (default: Yes):": '',
"SAS Installation Data File:": self.license_file,
"Press Enter to continue:": '',
"Configure as a Unicode server (default: No):": 'N',
"SAS/ACCESS Interface to MySQL (default: Yes):": 'N',
"SAS/ACCESS Interface to Oracle (default: Yes):": 'N',
"SAS/ACCESS Interface to Sybase (default: Yes):": 'N',
"SAS/ACCESS Interface to SAP ASE (default: Yes):": 'N',
"Use PAM Authentication (default: No):": 'N',
"Port Number:": '',
"Configure SAS Studio Basic (default: Yes):": 'N',
"Press Enter to finish:": '',
"Global Standards Library:": os.path.join(self.installdir, 'cstGlobalLibrary'),
"Sample Library:": os.path.join(self.installdir, 'cstSampleLibrary'),
}
std_qa = {
r"Incomplete Deployment\s*(.*[^:])+Selection:": '2', # 2: Ignore previous deployment and start again
r"Select a language(.*[^:]\s*\n)+Selection:": '',
r"Select Deployment Task\s*(.*[^:]\s*\n)+Selection:": '',
r"Specify SAS Home\s*(.*[^:]\s*\n)+Selection:": '2', # Create a new SAS Home
r"Select Deployment Type\s*(.*[^:]\n)+Selection:": '2', # 2: Install SAS Foundation
r"Select Products to Install\s*(.*[^:]\n)+Selection:": '1', # SAS Foundation
r"Product\s*(.*[^:]\n)+Selections:": '',
r"Select Language Support\s*(.*[^:]\n)+Selections:": '',
r"Select Regional Settings\s*(.*[^:]\n)+Selection:": '',
r"Select Support Option\s*(.*[^:]\n)+Selection:": '2', # 2: Do Not Send
r"Select SAS Foundation Products(.*[^:]\s*\n)+Selection:": '',
}
no_qa = [
r"\.\.\.$",
]
run_cmd_qa("./setup.sh -console", qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for SAS."""
custom_paths = {
'files': [os.path.join('SASFoundation', self.version, 'sas')],
'dirs': ['licenses', os.path.join('SASFoundation', self.version, 'bin')],
}
super(EB_SAS, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom path locations for SAS."""
return {
'PATH': [os.path.join('SASFoundation', self.version)],
}
| hpcuantwerpen/easybuild-easyblocks | easybuild/easyblocks/s/sas.py | Python | gpl-2.0 | 4,729 | [
"ASE"
] | 31210aa4d7910d8907dbf8b49484822444eb61d56588fc7411c933085a2305f0 |
# $Id$
#
# Copyright (C) 2008-2011 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import Chem
from rdkit import RDConfig
import numpy
import math
import sys
import copy
import pprint
from rdkit.six import cmp
periodicTable=Chem.GetPeriodicTable()
class Font(object):
face='sans'
size='12'
weight='normal'
name=None
def __init__(self,face=None,size=None,name=None,weight=None):
if face: self.face=face
if size: self.size=size
if name: self.name=name
if weight: self.weight=weight
class DrawingOptions(object):
dotsPerAngstrom= 30
useFraction= 0.85
atomLabelFontFace= "sans"
atomLabelFontSize= 12
atomLabelMinFontSize= 7
bondLineWidth= 1.2
dblBondOffset= .25
dblBondLengthFrac= .8
defaultColor= (1,0,0)
selectColor= (1,0,0)
bgColor = (1,1,1)
colorBonds= True
noCarbonSymbols= True
includeAtomNumbers= False
atomNumberOffset= 0
radicalSymbol= u'\u2219'
dash= (4,4)
wedgeDashedBonds= True
showUnknownDoubleBonds= True
# used to adjust overall scaling for molecules that have been laid out with non-standard
# bond lengths
coordScale= 1.0
elemDict={
1:(0.55,0.55,0.55),
7:(0,0,1),
8:(1,0,0),
9:(.2,.8,.8),
15:(1,.5,0),
16:(.8,.8,0),
17:(0,.8,0),
35:(.5,.3,.1),
53:(.63,.12,.94),
0:(.5,.5,.5),
}
class MolDrawing(object):
atomPs = None
canvas = None
canvasSize = None
def __init__(self,canvas=None, drawingOptions=None):
self.canvas = canvas
if canvas:
self.canvasSize=canvas.size
self.atomPs = {}
if drawingOptions is None:
self.drawingOptions=DrawingOptions()
else:
self.drawingOptions=drawingOptions
self.boundingBoxes = {}
if self.drawingOptions.bgColor is not None:
self.canvas.addCanvasPolygon(((0,0),
(canvas.size[0],0),
(canvas.size[0],canvas.size[1]),
(0,canvas.size[1])),
color=self.drawingOptions.bgColor,
fill=True,stroke=False)
def transformPoint(self,pos):
res = [0,0]
res[0] = (pos[0] + self.molTrans[0])*self.currDotsPerAngstrom*self.drawingOptions.useFraction + self.drawingTrans[0]
res[1] = self.canvasSize[1]-((pos[1] + self.molTrans[1])*self.currDotsPerAngstrom*self.drawingOptions.useFraction + \
self.drawingTrans[1])
return res
def _getBondOffset(self,p1,p2):
# get the vector between the points:
dx = p2[0]-p1[0]
dy = p2[1]-p1[1]
# figure out the angle and the perpendicular:
ang = math.atan2(dy,dx)
perp = ang + math.pi/2.
# here's the offset for the parallel bond:
offsetX = math.cos(perp)*self.drawingOptions.dblBondOffset*self.currDotsPerAngstrom
offsetY = math.sin(perp)*self.drawingOptions.dblBondOffset*self.currDotsPerAngstrom
return perp,offsetX,offsetY
def _getOffsetBondPts(self,p1,p2,
offsetX,offsetY,
lenFrac=None):
if not lenFrac:
lenFrac = self.drawingOptions.dblBondLengthFrac
dx = p2[0]-p1[0]
dy = p2[1]-p1[1]
# ----
# now figure out where to start and end it:
# offset the start point:
fracP1 = p1[0]+offsetX,p1[1]+offsetY
# now move a portion of the way along the line to the neighbor:
frac = (1.-lenFrac)/2
fracP1 = fracP1[0]+dx*frac,\
fracP1[1]+dy*frac
fracP2 = fracP1[0]+dx*lenFrac,\
fracP1[1]+dy*lenFrac
return fracP1,fracP2
def _offsetDblBond(self,p1,p2,bond,a1,a2,conf,dir=1,
lenFrac=None):
perp,offsetX,offsetY = self._getBondOffset(p1,p2)
offsetX = offsetX*dir
offsetY = offsetY*dir
# if we're a ring bond, we may need to flip over to the other side:
if bond.IsInRing():
bondIdx = bond.GetIdx()
a1Idx = a1.GetIdx()
a2Idx = a2.GetIdx()
# find a ring bond from a1 to an atom other than a2:
for otherBond in a1.GetBonds():
if otherBond.GetIdx()!=bondIdx and \
otherBond.IsInRing():
sharedRing=False
for ring in self.bondRings:
if bondIdx in ring and otherBond.GetIdx() in ring:
sharedRing=True
break
if not sharedRing:
continue
a3 = otherBond.GetOtherAtom(a1)
if a3.GetIdx() != a2Idx:
p3 = self.transformPoint(conf.GetAtomPosition(a3.GetIdx())*self.drawingOptions.coordScale)
dx2 = p3[0] - p1[0]
dy2 = p3[1] - p1[1]
dotP = dx2*offsetX + dy2*offsetY
if dotP < 0:
perp += math.pi
offsetX = math.cos(perp)*self.drawingOptions.dblBondOffset*self.currDotsPerAngstrom
offsetY = math.sin(perp)*self.drawingOptions.dblBondOffset*self.currDotsPerAngstrom
fracP1,fracP2 = self._getOffsetBondPts(p1,p2,
offsetX,offsetY,
lenFrac=lenFrac)
return fracP1,fracP2
def _getBondAttachmentCoordinates(self, p1, p2, labelSize):
newpos = [None, None]
if labelSize != None:
labelSizeOffset = [labelSize[0][0]/2 + (cmp(p2[0], p1[0]) * labelSize[0][2]), labelSize[0][1]/2]
if p1[1] == p2[1]:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * labelSizeOffset[0]
else:
if abs(labelSizeOffset[1] * (p2[0] - p1[0]) / (p2[1] - p1[1])) < labelSizeOffset[0]:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * abs(labelSizeOffset[1] * (p2[0] - p1[0]) / (p2[1] - p1[1]))
else:
newpos[0] = p1[0] + cmp(p2[0], p1[0]) * labelSizeOffset[0]
if p1[0] == p2[0]:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * labelSizeOffset[1]
else:
if abs(labelSizeOffset[0] * (p1[1] - p2[1]) / (p2[0] - p1[0])) < labelSizeOffset[1]:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * abs(labelSizeOffset[0] * (p1[1] - p2[1]) / (p2[0] - p1[0]))
else:
newpos[1] = p1[1] + cmp(p2[1], p1[1]) * labelSizeOffset[1]
else:
newpos = copy.deepcopy(p1)
return newpos
def _drawWedgedBond(self,bond,pos,nbrPos,
width=None,color=None,
dash=None):
if width is None:
width = self.drawingOptions.bondLineWidth
if color is None:
color = self.drawingOptions.defaultColor
perp,offsetX,offsetY = self._getBondOffset(pos,nbrPos)
offsetX *=.75
offsetY *=.75
poly = ((pos[0],pos[1]),
(nbrPos[0]+offsetX,nbrPos[1]+offsetY),
(nbrPos[0]-offsetX,nbrPos[1]-offsetY))
#canvas.drawPolygon(poly,edgeColor=color,edgeWidth=1,fillColor=color,closed=1)
if not dash:
self.canvas.addCanvasPolygon(poly,color=color)
elif self.drawingOptions.wedgeDashedBonds and self.canvas.addCanvasDashedWedge:
self.canvas.addCanvasDashedWedge(poly[0],poly[1],poly[2],color=color)
else:
self.canvas.addCanvasLine(pos,nbrPos,linewidth=width*2,color=color,
dashes=dash)
def _drawBond(self,bond,atom,nbr,pos,nbrPos,conf,
width=None,color=None,color2=None,labelSize1=None,labelSize2=None):
if width is None:
width = self.drawingOptions.bondLineWidth
if color is None:
color = self.drawingOptions.defaultColor
p1_raw = copy.deepcopy(pos)
p2_raw = copy.deepcopy(nbrPos)
newpos = self._getBondAttachmentCoordinates(p1_raw, p2_raw, labelSize1)
newnbrPos = self._getBondAttachmentCoordinates(p2_raw, p1_raw, labelSize2)
bType=bond.GetBondType()
if bType == Chem.BondType.SINGLE:
bDir = bond.GetBondDir()
if bDir in (Chem.BondDir.BEGINWEDGE,Chem.BondDir.BEGINDASH):
# if the bond is "backwards", change the drawing direction:
if bond.GetBeginAtom().GetChiralTag() in (Chem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.ChiralType.CHI_TETRAHEDRAL_CCW):
p1,p2 = newpos,newnbrPos
wcolor=color
else:
p2,p1 = newpos,newnbrPos
if color2 is not None:
wcolor=color2
else:
wcolor=self.drawingOptions.defaultColor
if bDir==Chem.BondDir.BEGINWEDGE:
self._drawWedgedBond(bond,p1,p2,color=wcolor,width=width)
elif bDir==Chem.BondDir.BEGINDASH:
self._drawWedgedBond(bond,p1,p2,color=wcolor,width=width,
dash=self.drawingOptions.dash)
else:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2)
elif bType == Chem.BondType.DOUBLE:
crossBond = (self.drawingOptions.showUnknownDoubleBonds and \
bond.GetStereo() == Chem.BondStereo.STEREOANY)
if not crossBond and \
( bond.IsInRing() or (atom.GetDegree()!=1 and bond.GetOtherAtom(atom).GetDegree()!=1) ):
self.canvas.addCanvasLine(newpos,newnbrPos,linewidth=width,color=color,color2=color2)
fp1,fp2 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf)
self.canvas.addCanvasLine(fp1,fp2,linewidth=width,color=color,color2=color2)
else:
fp1,fp2 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf,dir=.5,
lenFrac=1.0)
fp3,fp4 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf,dir=-.5,
lenFrac=1.0)
if crossBond:
fp2,fp4=fp4,fp2
self.canvas.addCanvasLine(fp1,fp2,linewidth=width,color=color,color2=color2)
self.canvas.addCanvasLine(fp3,fp4,linewidth=width,color=color,color2=color2)
elif bType == Chem.BondType.AROMATIC:
self.canvas.addCanvasLine(newpos,newnbrPos,linewidth=width,color=color,color2=color2)
fp1,fp2 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf)
self.canvas.addCanvasLine(fp1,fp2,linewidth=width,color=color,color2=color2,
dash=self.drawingOptions.dash)
elif bType == Chem.BondType.TRIPLE:
self.canvas.addCanvasLine(newpos,newnbrPos,linewidth=width,color=color,color2=color2)
fp1,fp2 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf)
self.canvas.addCanvasLine(fp1,fp2,linewidth=width,color=color,color2=color2)
fp1,fp2 = self._offsetDblBond(newpos,newnbrPos,bond,atom,nbr,conf,dir=-1)
self.canvas.addCanvasLine(fp1,fp2,linewidth=width,color=color,color2=color2)
else:
self.canvas.addCanvasLine(newpos, newnbrPos, linewidth=width, color=color, color2=color2,
dash=(1,2))
def scaleAndCenter(self,mol,conf,coordCenter=False,canvasSize=None,ignoreHs=False):
if canvasSize is None:
canvasSize=self.canvasSize
xAccum = 0
yAccum = 0
minX = 1e8
minY = 1e8
maxX = -1e8
maxY = -1e8
nAts = mol.GetNumAtoms()
for i in range(nAts):
if ignoreHs and mol.GetAtomWithIdx(i).GetAtomicNum()==1: continue
pos = conf.GetAtomPosition(i)*self.drawingOptions.coordScale
xAccum += pos[0]
yAccum += pos[1]
minX = min(minX,pos[0])
minY = min(minY,pos[1])
maxX = max(maxX,pos[0])
maxY = max(maxY,pos[1])
dx = abs(maxX-minX)
dy = abs(maxY-minY)
xSize = dx*self.currDotsPerAngstrom
ySize = dy*self.currDotsPerAngstrom
if coordCenter:
molTrans = -xAccum/nAts,-yAccum/nAts
else:
molTrans = -(minX+(maxX-minX)/2),-(minY+(maxY-minY)/2)
self.molTrans = molTrans
if xSize>=.95*canvasSize[0]:
scale = .9*canvasSize[0]/xSize
xSize*=scale
ySize*=scale
self.currDotsPerAngstrom*=scale
self.currAtomLabelFontSize = max(self.currAtomLabelFontSize*scale,
self.drawingOptions.atomLabelMinFontSize)
if ySize>=.95*canvasSize[1]:
scale = .9*canvasSize[1]/ySize
xSize*=scale
ySize*=scale
self.currDotsPerAngstrom*=scale
self.currAtomLabelFontSize = max(self.currAtomLabelFontSize*scale,
self.drawingOptions.atomLabelMinFontSize)
drawingTrans = canvasSize[0]/2,canvasSize[1]/2
self.drawingTrans = drawingTrans
def _drawLabel(self,label,pos,baseOffset,font,color=None,**kwargs):
if color is None:
color = self.drawingOptions.defaultColor
x1 = pos[0]
y1 = pos[1]
labelP = x1,y1
labelSize = self.canvas.addCanvasText(label,(x1,y1,baseOffset),font,color,**kwargs)
return labelSize
def AddMol(self,mol,centerIt=True,molTrans=None,drawingTrans=None,
highlightAtoms=[],confId=-1,flagCloseContactsDist=2,
highlightMap=None, ignoreHs=False,highlightBonds=[],**kwargs):
"""Set the molecule to be drawn.
Parameters:
hightlightAtoms -- list of atoms to highlight (default [])
highlightMap -- dictionary of (atom, color) pairs (default None)
Notes:
- specifying centerIt will cause molTrans and drawingTrans to be ignored
"""
conf = mol.GetConformer(confId)
if 'coordScale' in kwargs:
self.drawingOptions.coordScale=kwargs['coordScale']
self.currDotsPerAngstrom=self.drawingOptions.dotsPerAngstrom
self.currAtomLabelFontSize=self.drawingOptions.atomLabelFontSize
if centerIt:
self.scaleAndCenter(mol,conf,ignoreHs=ignoreHs)
else:
if molTrans is None:
molTrans = (0,0)
self.molTrans = molTrans
if drawingTrans is None:
drawingTrans = (0,0)
self.drawingTrans = drawingTrans
font = Font(face=self.drawingOptions.atomLabelFontFace,size=self.currAtomLabelFontSize)
obds=None
if not mol.HasProp('_drawingBondsWedged'):
# this is going to modify the molecule, get ready to undo that
obds=[x.GetBondDir() for x in mol.GetBonds()]
Chem.WedgeMolBonds(mol,conf)
includeAtomNumbers = kwargs.get('includeAtomNumbers',self.drawingOptions.includeAtomNumbers)
self.atomPs[mol] = {}
self.boundingBoxes[mol] = [0]*4
self.activeMol = mol
self.bondRings = mol.GetRingInfo().BondRings()
labelSizes = {}
for atom in mol.GetAtoms():
labelSizes[atom.GetIdx()] = None
if ignoreHs and atom.GetAtomicNum()==1:
drawAtom=False
else:
drawAtom=True
idx = atom.GetIdx()
pos = self.atomPs[mol].get(idx,None)
if pos is None:
pos = self.transformPoint(conf.GetAtomPosition(idx)*self.drawingOptions.coordScale)
self.atomPs[mol][idx] = pos
if drawAtom:
self.boundingBoxes[mol][0]=min(self.boundingBoxes[mol][0],pos[0])
self.boundingBoxes[mol][1]=min(self.boundingBoxes[mol][1],pos[1])
self.boundingBoxes[mol][2]=max(self.boundingBoxes[mol][2],pos[0])
self.boundingBoxes[mol][3]=max(self.boundingBoxes[mol][3],pos[1])
if not drawAtom: continue
nbrSum = [0,0]
for bond in atom.GetBonds():
nbr = bond.GetOtherAtom(atom)
if ignoreHs and nbr.GetAtomicNum()==1: continue
nbrIdx = nbr.GetIdx()
if nbrIdx > idx:
nbrPos = self.atomPs[mol].get(nbrIdx,None)
if nbrPos is None:
nbrPos = self.transformPoint(conf.GetAtomPosition(nbrIdx)*self.drawingOptions.coordScale)
self.atomPs[mol][nbrIdx] = nbrPos
self.boundingBoxes[mol][0]=min(self.boundingBoxes[mol][0],nbrPos[0])
self.boundingBoxes[mol][1]=min(self.boundingBoxes[mol][1],nbrPos[1])
self.boundingBoxes[mol][2]=max(self.boundingBoxes[mol][2],nbrPos[0])
self.boundingBoxes[mol][3]=max(self.boundingBoxes[mol][3],nbrPos[1])
else:
nbrPos = self.atomPs[mol][nbrIdx]
nbrSum[0] += nbrPos[0]-pos[0]
nbrSum[1] += nbrPos[1]-pos[1]
iso = atom.GetIsotope()
labelIt= not self.drawingOptions.noCarbonSymbols or \
atom.GetAtomicNum()!=6 or \
atom.GetFormalCharge()!=0 or \
atom.GetNumRadicalElectrons() or \
includeAtomNumbers or \
iso or \
atom.HasProp('molAtomMapNumber') or \
atom.GetDegree()==0
orient=''
if labelIt:
baseOffset = 0
if includeAtomNumbers:
symbol = str(atom.GetIdx())
symbolLength = len(symbol)
else:
base = atom.GetSymbol()
symbolLength = len(base)
nHs = atom.GetTotalNumHs()
if nHs>0:
if nHs>1:
hs='H<sub>%d</sub>'%nHs
symbolLength += 1 + len(str(nHs))
else:
hs ='H'
symbolLength += 1
else:
hs = ''
chg = atom.GetFormalCharge()
if chg!=0:
if chg==1:
chg = '+'
elif chg==-1:
chg = '-'
elif chg>1:
chg = '+%d'%chg
elif chg<-1:
chg = '-%d'%chg
symbolLength += len(chg)
else:
chg = ''
if chg:
chg = '<sup>%s</sup>'%chg
if atom.GetNumRadicalElectrons():
rad = self.drawingOptions.radicalSymbol*atom.GetNumRadicalElectrons()
rad = '<sup>%s</sup>'%rad
symbolLength += atom.GetNumRadicalElectrons()
else:
rad = ''
isotope=''
isotopeLength = 0
if iso:
isotope='<sup>%d</sup>'%atom.GetIsotope()
isotopeLength = len(str(atom.GetIsotope()))
symbolLength += isotopeLength
mapNum=''
mapNumLength = 0
if atom.HasProp('molAtomMapNumber'):
mapNum=':'+atom.GetProp('molAtomMapNumber')
mapNumLength = 1 + len(str(atom.GetProp('molAtomMapNumber')))
symbolLength += mapNumLength
deg = atom.GetDegree()
# This should be done in a better way in the future:
# 'baseOffset' should be determined by getting the size of 'isotope' and the size of 'base', or the size of 'mapNum' and the size of 'base'
# (depending on 'deg' and 'nbrSum[0]') in order to determine the exact position of the base
if deg==0:
if periodicTable.GetElementSymbol(atom.GetAtomicNum()) in ('O','S','Se','Te','F','Cl','Br','I','At'):
symbol = '%s%s%s%s%s%s'%(hs,isotope,base,chg,rad,mapNum)
else:
symbol = '%s%s%s%s%s%s'%(isotope,base,hs,chg,rad,mapNum)
elif deg>1 or nbrSum[0]<1:
symbol = '%s%s%s%s%s%s'%(isotope,base,hs,chg,rad,mapNum)
baseOffset = 0.5 - (isotopeLength + len(base) / 2.) / symbolLength
else:
symbol = '%s%s%s%s%s%s'%(rad,chg,hs,isotope,base,mapNum)
baseOffset = -0.5 + (mapNumLength + len(base) / 2.) / symbolLength
if deg==1:
if abs(nbrSum[1])>1:
islope=nbrSum[0]/abs(nbrSum[1])
else:
islope=nbrSum[0]
if abs(islope)>.3:
if islope>0:
orient='W'
else:
orient='E'
elif abs(nbrSum[1])>10:
if nbrSum[1]>0:
orient='N'
else :
orient='S'
else:
orient = 'C'
if highlightMap and idx in highlightMap:
color = highlightMap[idx]
elif highlightAtoms and idx in highlightAtoms:
color = self.drawingOptions.selectColor
else:
color = self.drawingOptions.elemDict.get(atom.GetAtomicNum(),(0,0,0))
labelSize = self._drawLabel(symbol, pos, baseOffset, font, color=color,orientation=orient)
labelSizes[atom.GetIdx()] = [labelSize, orient]
for bond in mol.GetBonds():
atom, idx = bond.GetBeginAtom(), bond.GetBeginAtomIdx()
nbr, nbrIdx = bond.GetEndAtom(), bond.GetEndAtomIdx()
pos = self.atomPs[mol].get(idx,None)
nbrPos = self.atomPs[mol].get(nbrIdx,None)
if highlightBonds and bond.GetIdx() in highlightBonds:
width=2.0*self.drawingOptions.bondLineWidth
color = self.drawingOptions.selectColor
color2 = self.drawingOptions.selectColor
elif highlightAtoms and idx in highlightAtoms and nbrIdx in highlightAtoms:
width=2.0*self.drawingOptions.bondLineWidth
color = self.drawingOptions.selectColor
color2 = self.drawingOptions.selectColor
elif highlightMap is not None and idx in highlightMap and nbrIdx in highlightMap:
width=2.0*self.drawingOptions.bondLineWidth
color = highlightMap[idx]
color2 = highlightMap[nbrIdx]
else:
width=self.drawingOptions.bondLineWidth
if self.drawingOptions.colorBonds:
color = self.drawingOptions.elemDict.get(atom.GetAtomicNum(),(0,0,0))
color2 = self.drawingOptions.elemDict.get(nbr.GetAtomicNum(),(0,0,0))
else:
color = self.drawingOptions.defaultColor
color2= color
self._drawBond(bond,atom,nbr,pos,nbrPos,conf,
color=color,width=width,color2=color2,labelSize1=labelSizes[idx],labelSize2=labelSizes[nbrIdx])
# if we modified the bond wedging state, undo those changes now
if obds:
for i,d in enumerate(obds):
mol.GetBondWithIdx(i).SetBondDir(d)
if flagCloseContactsDist>0:
tol = flagCloseContactsDist*flagCloseContactsDist
for i,atomi in enumerate(mol.GetAtoms()):
pi = numpy.array(self.atomPs[mol][i])
for j in range(i+1,mol.GetNumAtoms()):
pj = numpy.array(self.atomPs[mol][j])
d = pj-pi
dist2 = d[0]*d[0]+d[1]*d[1]
if dist2<=tol:
self.canvas.addCanvasPolygon(((pi[0]-2*flagCloseContactsDist,
pi[1]-2*flagCloseContactsDist),
(pi[0]+2*flagCloseContactsDist,
pi[1]-2*flagCloseContactsDist),
(pi[0]+2*flagCloseContactsDist,
pi[1]+2*flagCloseContactsDist),
(pi[0]-2*flagCloseContactsDist,
pi[1]+2*flagCloseContactsDist)),
color=(1.,0,0),
fill=False,stroke=True)
| AlexanderSavelyev/rdkit | rdkit/Chem/Draw/MolDrawing.py | Python | bsd-3-clause | 22,658 | [
"RDKit"
] | c4d01fbe4925bbce66f08dd21ecfa1100664309ef60911631f3b57514b2a0811 |
# sql/expression.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`.ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`.ClauseElement` structures
together, and in other cases simply return a single :class:`.ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools
import re
from operator import attrgetter
from .. import util, exc, inspection
from . import operators
from .operators import ColumnOperators
from .visitors import Visitable, cloned_traverse
import operator
functions = util.importlater("sqlalchemy.sql", "functions")
sqlutil = util.importlater("sqlalchemy.sql", "util")
sqltypes = util.importlater("sqlalchemy", "types")
default = util.importlater("sqlalchemy.engine", "default")
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
'tuple_', 'type_coerce', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
NO_ARG = util.symbol('NO_ARG')
def nullsfirst(column):
"""Return a NULLS FIRST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullsfirst())
produces::
ORDER BY mycol DESC NULLS FIRST
"""
return UnaryExpression(column, modifier=operators.nullsfirst_op)
def nullslast(column):
"""Return a NULLS LAST ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol).nullslast())
produces::
ORDER BY mycol DESC NULLS LAST
"""
return UnaryExpression(column, modifier=operators.nullslast_op)
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(desc(table1.mycol))
produces::
ORDER BY mycol DESC
"""
return UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
someselect.order_by(asc(table1.mycol))
produces::
ORDER BY mycol ASC
"""
return UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.outerjoin()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`.Join`.
Similar functionality is also available via the
:meth:`~.FromClause.join()` method on any
:class:`.FromClause`.
:param left: The left side of the join.
:param right: The right side of the join.
:param onclause: Optional criterion for the ``ON`` clause, is
derived from foreign key relationships established between
left and right otherwise.
To chain joins together, use the :meth:`.FromClause.join` or
:meth:`.FromClause.outerjoin` methods on the resulting
:class:`.Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`.FromClause`.
The returned object is an instance of :class:`.Select`.
All arguments which accept :class:`.ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
.. seealso::
:ref:`coretutorial_selecting` - Core Tutorial description of
:func:`.select`.
:param columns:
A list of :class:`.ClauseElement` objects, typically
:class:`.ColumnElement` objects or subclasses, which will form the
columns clause of the resulting statement. For all members which are
instances of :class:`.Selectable`, the individual :class:`.ColumnElement`
members of the :class:`.Selectable` will be added individually to the
columns clause. For example, specifying a
:class:`~sqlalchemy.schema.Table` instance will result in all the
contained :class:`~sqlalchemy.schema.Column` objects within to be added
to the columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
:param whereclause:
A :class:`.ClauseElement` expression which will be used to form the
``WHERE`` clause.
:param from_obj:
A list of :class:`.ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from" objects are
automatically located within the columns and whereclause ClauseElements.
Use this parameter to explicitly specify "from" objects which are not
automatically locatable. This could include
:class:`~sqlalchemy.schema.Table` objects that aren't otherwise present,
or :class:`.Join` objects whose presence will supercede that of the
:class:`~sqlalchemy.schema.Table` objects already located in the other
clauses.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind=None:
an :class:`~.base.Engine` or :class:`~.base.Connection` instance
to which the
resulting :class:`.Select` object will be bound. The :class:`.Select`
object will otherwise automatically bind to whatever
:class:`~.base.Connectable` instances can be located within its contained
:class:`.ClauseElement` members.
:param correlate=True:
indicates that this :class:`.Select` object should have its
contained :class:`.FromClause` elements "correlated" to an enclosing
:class:`.Select` object. This means that any :class:`.ClauseElement`
instance within the "froms" collection of this :class:`.Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
The boolean argument may also be a column expression or list
of column expressions - this is a special calling form which
is understood by the Postgresql dialect to render the
``DISTINCT ON (<columns>)`` syntax.
``distinct`` is also available via the :meth:`~.Select.distinct`
generative method.
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement.
Certain database dialects also support
alternate values for this parameter:
* With the MySQL dialect, the value ``"read"`` translates to
``LOCK IN SHARE MODE``.
* With the Oracle and Postgresql dialects, the value ``"nowait"``
translates to ``FOR UPDATE NOWAIT``.
* With the Postgresql dialect, the values "read" and ``"read_nowait"``
translate to ``FOR SHARE`` and ``FOR SHARE NOWAIT``, respectively.
.. versionadded:: 0.7.7
:param group_by:
a list of :class:`.ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
:param having:
a :class:`.ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
:param limit=None:
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
:param offset=None:
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param order_by:
a scalar or list of :class:`.ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`.Select` object will use these
names as well for targeting column members.
use_labels is also available via the :meth:`~.SelectBase.apply_labels`
generative method.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj,
**kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`.Alias` object derived
from a :class:`.Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Represent an ``INSERT`` statement via the :class:`.Insert` SQL
construct.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Represent an ``UPDATE`` statement via the :class:`.Update` SQL
construct.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause=None, **kwargs):
"""Represent a ``DELETE`` statement via the :class:`.Delete` SQL
construct.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all :class:`.ColumnElement`
subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause.
e.g.::
distinct(a)
renders::
DISTINCT a
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`.ColumnElement` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
return Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`.Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`.FromClause` subclasses.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`.CompoundSelect`.
\*selects
a list of :class:`.Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def alias(selectable, name=None):
"""Return an :class:`.Alias` object.
An :class:`.Alias` represents any :class:`.FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the
:meth:`~.FromClause.alias` method
available on all :class:`.FromClause` subclasses.
When an :class:`.Alias` is created from a :class:`.Table` object,
this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For :func:`.select` objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The ``name`` parameter is optional, and provides the name
to use in the rendered SQL. If blank, an "anonymous" name
will be deterministically generated at compile time.
Deterministic means the name is guaranteed to be unique against
other constructs used in the same statement, and will also be the
same name for each successive compilation of the same statement
object.
:param selectable: any :class:`.FromClause` subclass,
such as a table, select statement, etc.
:param name: string name to be assigned as the alias.
If ``None``, a name will be deterministically generated
at compile time.
"""
return Alias(selectable, name=name)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
return Tuple(*expr)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type,
on the Python side only.
:func:`.type_coerce` is roughly similar to :func:`.cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
type_ = sqltypes.to_instance(type_)
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif isinstance(expr, BindParameter):
bp = expr._clone()
bp.type = type_
return bp
elif not isinstance(expr, Visitable):
if expr is None:
return null()
else:
return literal(expr, type_=type_)
else:
return Label(None, expr, type_=type_)
def label(name, obj):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`.ColumnElement`.
name
label name
obj
a :class:`.ColumnElement`.
"""
return Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of :class:`.ColumnClause`, which
represents the "syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object. It is often used directly
within :func:`~.expression.select` constructs or with lightweight
:func:`~.expression.table` constructs.
Note that the :func:`~.expression.column` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the
``sql`` package::
from sqlalchemy.sql import table, column
:param text: the name of the column. Quoting rules will be applied
to the clause like any other column name. For textual column constructs
that are not to be quoted, use the :func:`literal_column` function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object
which will provide result-set translation for this column.
See :class:`.ColumnClause` for further examples.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Represent a textual table clause.
The object returned is an instance of :class:`.TableClause`, which
represents the "syntactical" portion of the schema-level
:class:`~.schema.Table` object.
It may be used to construct lightweight table constructs.
Note that the :func:`~.expression.table` function is not part of
the ``sqlalchemy`` namespace. It must be imported from the
``sql`` package::
from sqlalchemy.sql import table, column
:param name: Name of the table.
:param columns: A collection of :func:`~.expression.column` constructs.
See :class:`.TableClause` for further examples.
"""
return TableClause(name, *columns)
def bindparam(key, value=NO_ARG, type_=None, unique=False, required=NO_ARG,
quote=None, callable_=None):
"""Create a bind parameter clause with the given key.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
Defaults to ``None``, however if neither ``value`` nor
``callable`` are passed explicitly, the ``required`` flag will be
set to ``True`` which has the effect of requiring a value be present
when the statement is actually executed.
.. versionchanged:: 0.8 The ``required`` flag is set to ``True``
automatically if ``value`` or ``callable`` is not passed.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
If ``True``, a value is required at execution time. If not passed,
is set to ``True`` or ``False`` based on whether or not
one of ``value`` or ``callable`` were passed..
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
return BindParameter(key, value, type_=type_,
callable_=callable_,
unique=unique, required=required,
quote=quote)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create a SQL construct that is represented by a literal string.
E.g.::
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
return TextClause(text, bind=bind, *args, **kwargs)
def over(func, partition_by=None, order_by=None):
"""Produce an OVER clause against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
return Over(func, partition_by=partition_by, order_by=order_by)
def null():
"""Return a :class:`Null` object, which compiles to ``NULL``.
"""
return Null()
def true():
"""Return a :class:`True_` object, which compiles to ``true``, or the
boolean equivalent for the target dialect.
"""
return True_()
def false():
"""Return a :class:`False_` object, which compiles to ``false``, or the
boolean equivalent for the target dialect.
"""
return False_()
class _FunctionGenerator(object):
"""Generate :class:`.Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
tokens = len(self.__names)
if tokens == 2:
package, fname = self.__names
elif tokens == 1:
package, fname = "_default", self.__names[0]
else:
package = None
if package is not None and \
package in functions._registry and \
fname in functions._registry[package]:
func = functions._registry[package][fname]
return func(*c, **o)
return Function(self.__names[-1],
packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
"""Generate SQL function expressions.
:data:`.func` is a special object instance which generates SQL
functions based on name-based attributes, e.g.::
>>> print func.count(1)
count(:param_1)
The element is a column-oriented SQL element like any other, and is
used in that way::
>>> print select([func.count(table.c.id)])
SELECT count(sometable.id) FROM sometable
Any name can be given to :data:`.func`. If the function name is unknown to
SQLAlchemy, it will be rendered exactly as is. For common SQL functions
which SQLAlchemy is aware of, the name may be interpreted as a *generic
function* which will be compiled appropriately to the target database::
>>> print func.current_timestamp()
CURRENT_TIMESTAMP
To call functions which are present in dot-separated packages,
specify them in the same manner::
>>> print func.stats.yield_curve(5, 10)
stats.yield_curve(:yield_curve_1, :yield_curve_2)
SQLAlchemy can be made aware of the return type of functions to enable
type-specific lexical and result-based behavior. For example, to ensure
that a string-based function returns a Unicode value and is similarly
treated as a string in expressions, specify
:class:`~sqlalchemy.types.Unicode` as the type:
>>> print func.my_string(u'hi', type_=Unicode) + ' ' + \
... func.my_string(u'there', type_=Unicode)
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a :data:`.func` call is usually an instance of
:class:`.Function`.
This object meets the "column" interface, including comparison and labeling
functions. The object can also be passed the :meth:`~.Connectable.execute`
method of a :class:`.Connection` or :class:`.Engine`, where it will be
wrapped inside of a SELECT statement first::
print connection.execute(func.current_timestamp()).scalar()
In a few exception cases, the :data:`.func` accessor
will redirect a name to a built-in expression such as :func:`.cast`
or :func:`.extract`, as these names have well-known meaning
but are not exactly the same as "functions" from a SQLAlchemy
perspective.
.. versionadded:: 0.8 :data:`.func` can return non-function expression
constructs for common quasi-functional names like :func:`.cast`
and :func:`.extract`.
Functions which are interpreted as "generic" functions know how to
calculate their return type automatically. For a listing of known generic
functions, see :ref:`generic_functions`.
"""
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _truncated_label(unicode):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
unicode(self) +
unicode(other))
def __radd__(self, other):
return _anonymous_label(
unicode(other) +
unicode(self))
def apply_map(self, map_):
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, basestring):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _clone(element, **kw):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, basestring):
return TextClause(unicode(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return literal_column(str(element))
def _interpret_as_from(element):
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, basestring):
return TextClause(unicode(element))
elif hasattr(insp, "selectable"):
return insp.selectable
raise exc.ArgumentError("FROM expression expected")
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return null()
elif element is False:
return false()
elif element is True:
return true()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return sqlutil.Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return sqlutil.Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
inspection._self_inspects(ClauseElement)
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class _DefaultColumnComparator(operators.ColumnOperators):
"""Defines comparison and math operations.
See :class:`.ColumnOperators` and :class:`.Operators` for descriptions
of all operations.
"""
@util.memoized_property
def type(self):
return self.expr.type
def operate(self, op, *other, **kwargs):
o = self.operators[op.__name__]
return o[0](self, self.expr, op, *(other + o[1:]), **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = self.operators[op.__name__]
return o[0](self, self.expr, op, other, reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than Postgresql don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, other_comparator.type
def _boolean_compare(self, expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
**kwargs):
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()' operators can "
"be used with None/True/False")
else:
obj = self._check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def _binary_operate(self, expr, op, obj, reverse=False, result_type=None,
**kw):
obj = self._check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(left, right, op, type_=result_type)
def _scalar(self, expr, op, fn, **kw):
return fn(expr)
def _in_impl(self, expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return self._boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return self._boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return self._boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, ColumnOperators):
raise exc.InvalidRequestError('in() function accept'
's either a list of non-selectable values, '
'or a selectable: %r' % o)
elif o is None:
o = null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like
# comparison against zero row selectable. We use != to
# build the contradiction as it handles NULL values
# appropriately, i.e. "not (x IN ())" should not return NULL
# values for x.
util.warn('The IN-predicate on "%s" was invoked with an '
'empty sequence. This results in a '
'contradiction, which nonetheless can be '
'expensive to evaluate. Consider alternative '
'strategies for improved performance.' % expr)
return expr != expr
return self._boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _unsupported_impl(self, expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _neg_impl(self, expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg)
def _match_impl(self, expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return self._boolean_compare(expr, operators.match_op,
self._check_literal(expr, operators.match_op,
other))
def _distinct_impl(self, expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(self, expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
self._check_literal(expr, operators.and_, cleft),
self._check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False),
operators.between_op)
def _collate_impl(self, expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operators = {
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_binary_operate,),
"concat_op": (_binary_operate,),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, desc),
"asc_op": (_scalar, asc),
"nullsfirst_op": (_scalar, nullsfirst),
"nullslast_op": (_scalar, nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_unsupported_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
}
def _check_literal(self, expr, operator, other):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
isinstance(other.type, sqltypes.NullType):
# TODO: perhaps we should not mutate the incoming
# bindparam() here and instead make a copy of it.
# this might be the only place that we're mutating
# an incoming construct.
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, sqltypes.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, (ColumnElement, TextClause)):
return expr._bind_param(operator, other)
else:
return other
class ColumnElement(ClauseElement, ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
:class:`.ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`.ColumnElement` may be associated with
a :class:`.Selectable` which was derived from another :class:`.Selectable`.
An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a
:class:`~sqlalchemy.schema.Table`. For the ambitious, an in-depth
discussion of this concept can be found at
`Expression Transformations <http://techspot.zzzeek.org/2008/01/23/expression-transformations/>`_.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
_key_label = None
_alt_names = ()
@util.memoized_property
def type(self):
return sqltypes.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
key = str(self)
else:
key = name
co = ColumnClause(_as_truncated(name) if name_is_truncatable else name,
selectable,
type_=getattr(self,
'type', None))
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self._data.update((c.key, c) for c in cols)
self.__dict__['_all_cols'] = util.column_set(self)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self._data[other.name]
self._all_cols.remove(other)
if column.key in self._data:
self._all_cols.remove(self._data[column.key])
self._all_cols.add(column)
self._data[column.key] = column
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'%r, which has the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None), value))
self._all_cols.remove(existing)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
ColumnElement.proxy_set._reset(value)
self._all_cols.add(value)
self._data[key] = value
def clear(self):
self._data.clear()
self._all_cols.clear()
def remove(self, column):
del self._data[column.key]
self._all_cols.remove(column)
def update(self, value):
self._data.update(value)
self._all_cols.clear()
self._all_cols.update(self._data.values())
def extend(self, iter):
self.update((c.key, c) for c in iter)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
self.__dict__['_all_cols'] = util.column_set(self._data.values())
def contains_column(self, col):
# this has to be done via set() membership
return col in self._all_cols
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_cols)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, colset):
util.ImmutableProperties.__init__(self, data)
self.__dict__['_all_cols'] = colset
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
is_selectable = True
@property
def selectable(self):
return self
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
The most common forms of :class:`.FromClause` are the
:class:`.Table` and the :func:`.select` constructs. Key
features common to all :class:`.FromClause` objects include:
* a :attr:`.c` collection, which provides per-name access to a collection
of :class:`.ColumnElement` objects.
* a :attr:`.primary_key` attribute, which is a collection of all those
:class:`.ColumnElement` objects that indicate the ``primary_key`` flag.
* Methods to generate various derivations of a "from" clause, including
:meth:`.FromClause.alias`, :meth:`.FromClause.join`,
:meth:`.FromClause.select`.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
_memoized_property = util.group_expirable_memoized_property(["_columns"])
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`.FromClause`.
.. seealso::
:func:`~.sql.expression.select` - general purpose
method which allows for arbitrary column lists.
"""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`.FromClause` against another
:class:`.FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`.FromClause`.
This is shorthand for calling::
from sqlalchemy import alias
a = alias(self, name=name)
See :func:`~.expression.alias` for details.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
# this is essentially an "identity" check in the base class.
# Other constructs override this to traverse through
# contained elements.
return fromclause in self._cloned_set
def _is_lexical_equivalent(self, other):
"""Return True if this FromClause and the other represent
the same lexical identity.
This tests if either one is a copy of the other, or
if they are the same via annotation identity.
"""
return self._cloned_set.intersection(other._cloned_set)
def replace_selectable(self, old, alias):
"""replace all occurrences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`.FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`.ColumnElement`, return the exported
:class:`.ColumnElement` object from this :class:`.Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common ancestor
column.
:param column: the target :class:`.ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`.ColumnElement`, if the given
:class:`.ColumnElement` is actually present within a sub-element
of this :class:`.FromClause`. Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`.FromClause`.
"""
def embedded(expanded_proxy_set, target_set):
for t in target_set.difference(expanded_proxy_set):
if not set(_expand_cloned([t])
).intersection(expanded_proxy_set):
return False
return True
# don't dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
expanded_proxy_set = set(_expand_cloned(c.proxy_set))
i = target_set.intersection(expanded_proxy_set)
if i and (not require_embedded
or embedded(expanded_proxy_set, target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
self._memoized_property.expire_instance(self)
@_memoized_property
def columns(self):
"""A named-based collection of :class:`.ColumnElement` objects
maintained by this :class:`.FromClause`.
The :attr:`.columns`, or :attr:`.c` collection, is the gateway
to the construction of SQL expressions using table-bound or
other selectable-bound columns::
select([mytable]).where(mytable.c.somecolumn == 5)
"""
if '_columns' not in self.__dict__:
self._init_collections()
self._populate_column_collection()
return self._columns.as_immutable()
@_memoized_property
def primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._init_collections()
self._populate_column_collection()
return self.primary_key
@_memoized_property
def foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._init_collections()
self._populate_column_collection()
return self.foreign_keys
c = property(attrgetter('columns'),
doc="An alias for the :attr:`.columns` attribute.")
_select_iterable = property(attrgetter('columns'))
def _init_collections(self):
assert '_columns' not in self.__dict__
assert 'primary_key' not in self.__dict__
assert 'foreign_keys' not in self.__dict__
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
@property
def _cols_populated(self):
return '_columns' in self.__dict__
def _populate_column_collection(self):
"""Called on subclasses to establish the .c collection.
Each implementation has a different way of establishing
this collection.
"""
def _refresh_for_new_column(self, column):
"""Given a column added to the .c collection of an underlying
selectable, produce the local version of that column, assuming this
selectable ultimately should proxy this column.
this is used to "ping" a derived selectable to add a new column
to its .c. collection when a Column has been added to one of the
Table objects it ultimtely derives from.
If the given selectable hasn't populated it's .c. collection yet,
it should at least pass on the message to the contained selectables,
but it will return None.
This method is currently used by Declarative to allow Table
columns to be added to a partially constructed inheritance
mapping that may have already produced joins. The method
isn't public right now, as the full span of implications
and/or caveats aren't yet clear.
It's also possible that this functionality could be invoked by
default via an event, which would require that
selectables maintain a weak referencing collection of all
derivations.
"""
if not self._cols_populated:
return None
elif column.key in self.columns and self.columns[column.key] is column:
return column
else:
return None
class BindParameter(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
_is_crud = False
def __init__(self, key, value, type_=None, unique=False,
callable_=None,
isoutparam=False, required=False,
quote=None,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a BindParameter.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`BindParameter` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`BindParameter` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend.
:param required:
a value is required at execution time.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
"""
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.quote = quote
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = sqltypes._type_map.get(type(value),
sqltypes.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
# legacy, some outside users may be calling this
_Executable = Executable
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None,
):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return sqltypes.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self.bindparams = dict((b.key, clone(b, **kw))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword in a SQL statement.
Public constructor is the :func:`false()` function.
"""
__visit_name__ = 'false'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword in a SQL statement.
Public constructor is the :func:`true()` function.
"""
__visit_name__ = 'true'
def __init__(self):
self.type = sqltypes.BOOLEANTYPE
def compare(self, other):
return isinstance(other, True_)
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_',
sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
class Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
self.type = kw.pop('type_', None)
if self.type is None:
self.type = _type_from_args(clauses)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs.
.. seealso::
:class:`.Function` - named SQL function.
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
packagenames = ()
def __init__(self, *clauses, **kwargs):
"""Construct a :class:`.FunctionElement`.
"""
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
"""Fulfill the 'columns' contract of :class:`.ColumnElement`.
Returns a single-element list consisting of this object.
"""
return [self]
@util.memoized_property
def clauses(self):
"""Return the underlying :class:`.ClauseList` which contains
the arguments for this :class:`.FunctionElement`.
"""
return self.clause_expr.element
def over(self, partition_by=None, order_by=None):
"""Produce an OVER clause against this function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
The expression::
func.row_number().over(order_by='x')
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
See :func:`~.expression.over` for a full description.
.. versionadded:: 0.7
"""
return over(self, partition_by=partition_by, order_by=order_by)
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone, **kw):
self.clause_expr = clone(self.clause_expr, **kw)
self._reset_exported()
FunctionElement.clauses._reset(self)
def select(self):
"""Produce a :func:`~.expression.select` construct
against this :class:`.FunctionElement`.
This is shorthand for::
s = select([function_element])
"""
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind' and return a scalar value.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.scalar` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute().scalar()
def execute(self):
"""Execute this :class:`.FunctionElement` against an embedded
'bind'.
This first calls :meth:`~.FunctionElement.select` to
produce a SELECT construct.
Note that :class:`.FunctionElement` can be passed to
the :meth:`.Connectable.execute` method of :class:`.Connection`
or :class:`.Engine`.
"""
return self.select().execute()
def _bind_param(self, operator, obj):
return BindParameter(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function.
See the superclass :class:`.FunctionElement` for a description
of public methods.
.. seealso::
:data:`.func` - namespace which produces registered or ad-hoc
:class:`.Function` instances.
:class:`.GenericFunction` - allows creation of registered function
types.
"""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
"""Construct a :class:`.Function`.
The :data:`.func` construct is normally used to construct
new :class:`.Function` instances.
"""
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
class Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return Grouping(self)
else:
return self
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, basestring):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Exists(UnaryExpression):
__visit_name__ = UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (SelectBase, ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
UnaryExpression.__init__(self, s, operator=operators.exists,
type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, *fromclause):
e = self._clone()
e.element = self.element.correlate(*fromclause).self_group()
return e
def correlate_except(self, *fromclause):
e = self._clone()
e.element = self.element.correlate_except(*fromclause).self_group()
return e
def select_from(self, clause):
"""return a new :class:`.Exists` construct, applying the given
expression to the :meth:`.Select.select_from` method of the select
statement contained.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`.FromClause`
elements.
The public constructor function for :class:`.Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`.FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
"""Construct a new :class:`.Join`.
The usual entrypoint here is the :func:`~.expression.join`
function or the :meth:`.FromClause.join` method of any
:class:`.FromClause` object.
"""
self.left = _interpret_as_from(left)
self.right = _interpret_as_from(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or \
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self.primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self.foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _refresh_for_new_column(self, column):
col = self.left._refresh_for_new_column(column)
if col is None:
col = self.right._refresh_for_new_column(column)
if col is not None:
if self._cols_populated:
self._columns[col._label] = col
self.foreign_keys.add(col)
if col.primary_key:
self.primary_key.add(col)
return col
return None
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
self.onclause = clone(self.onclause, **kw)
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return sqlutil.join_condition(left, right, a_subset=left_right)
def select(self, whereclause=None, **kwargs):
"""Create a :class:`.Select` from this :class:`.Join`.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select
j = select([j.left, j.right], **kw).\\
where(whereclause).\\
select_from(j)
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""return an alias of this :class:`.Join`.
Used against a :class:`.Join` object,
:meth:`~.Join.alias` calls the :meth:`~.Join.select`
method first so that a subquery against a
:func:`.select` construct is generated.
the :func:`~expression.select` construct also has the
``correlate`` flag set to ``False`` and will not
auto-correlate inside an enclosing :func:`~expression.select`
construct.
The equivalent long-hand form, given a :class:`.Join` object
``j``, is::
from sqlalchemy import select, alias
j = alias(
select([j.left, j.right]).\\
select_from(j).\\
with_labels(True).\\
correlate(False),
name=name
)
See :func:`~.expression.alias` for further details on
aliases.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`~.expression.alias` module level
function as well as the :meth:`.FromClause.alias` method available on all
:class:`.FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, name=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if name is None:
if self.original.named_with_column:
name = getattr(self.original, 'name', None)
name = _anonymous_label('%%(%d %s)s' % (id(self), name
or 'anon'))
self.name = name
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _refresh_for_new_column(self, column):
col = self.element._refresh_for_new_column(column)
if col is not None:
if not self._cols_populated:
return None
else:
return col._make_proxy(self)
else:
return None
def _copy_internals(self, clone=_clone, **kw):
# don't apply anything to an aliased Table
# for now. May want to drive this from
# the given **kw.
if isinstance(self.element, TableClause):
return
self._reset_exported()
self.element = clone(self.element, **kw)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, **kw):
if column_collections:
for c in self.c:
yield c
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class CTE(Alias):
"""Represent a Common Table Expression.
The :class:`.CTE` object is obtained using the
:meth:`.SelectBase.cte` method from any selectable.
See that method for complete examples.
.. versionadded:: 0.7.6
"""
__visit_name__ = 'cte'
def __init__(self, selectable,
name=None,
recursive=False,
cte_alias=False,
_restates=frozenset()):
self.recursive = recursive
self.cte_alias = cte_alias
self._restates = _restates
super(CTE, self).__init__(selectable, name=name)
def alias(self, name=None):
return CTE(
self.original,
name=name,
recursive=self.recursive,
cte_alias=self.name
)
def union(self, other):
return CTE(
self.original.union(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
def union_all(self, other):
return CTE(
self.original.union_all(other),
name=self.name,
recursive=self.recursive,
_restates=self._restates.union([self])
)
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', sqltypes.NULLTYPE)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
def _init_collections(self):
pass
@property
def columns(self):
return self.element.columns
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
# this could be
# self.element.foreign_keys
# see SelectableTest.test_join_condition
return set()
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element}
def __setstate__(self, state):
self.element = state['element']
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`.ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self.quote = element.quote
self._proxies = [element]
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`.Selectable`.
:class:`.ColumnClause` is constructed by itself typically via
the :func:`~.expression.column` function. It may be placed directly
into constructs such as :func:`.select` constructs::
from sqlalchemy.sql import column, select
c1, c2 = column("c1"), column("c2")
s = select([c1, c2]).where(c1==5)
There is also a variant on :func:`~.expression.column` known
as :func:`~.expression.literal_column` - the difference is that
in the latter case, the string value is assumed to be an exact
expression, rather than a column name, so that no quoting rules
or similar are applied::
from sqlalchemy.sql import literal_column, select
s = select([literal_column("5 + 7")])
:class:`.ColumnClause` can also be used in a table-like
fashion by combining the :func:`~.expression.column` function
with the :func:`~.expression.table` function, to produce
a "lightweight" form of table metadata::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The above construct can be created in an ad-hoc fashion and is
not associated with any :class:`.schema.MetaData`, unlike it's
more full fledged :class:`.schema.Table` counterpart.
:param text: the text of the element.
:param selectable: parent selectable.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`.ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and other.is_literal
):
return super(ColumnClause, self).\
_compare_name_for_result(other)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class TableClause(Immutable, FromClause):
"""Represents a minimal "table" construct.
The constructor for :class:`.TableClause` is the
:func:`~.expression.table` function. This produces
a lightweight table object that has only a name and a
collection of columns, which are typically produced
by the :func:`~.expression.column` function::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
The :class:`.TableClause` construct serves as the base for
the more commonly used :class:`~.schema.Table` object, providing
the usual set of :class:`~.expression.FromClause` services including
the ``.c.`` collection and statement generation methods.
It does **not** provide all the additional schema-level services
of :class:`~.schema.Table`, including constraints, references to other
tables, or support for :class:`.MetaData`-level services. It's useful
on its own as an ad-hoc construct used to generate quick SQL
statements when a more fully fledged :class:`~.schema.Table`
is not on hand.
"""
__visit_name__ = 'table'
named_with_column = True
implicit_returning = False
""":class:`.TableClause` doesn't support having a primary key or column
-level defaults, so implicit returning doesn't apply."""
_autoincrement_column = None
"""No PK or default support so no autoincrement column."""
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self.primary_key = ColumnSet()
self.foreign_keys = set()
for c in columns:
self.append_column(c)
def _init_collections(self):
pass
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.key] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`.TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`.insert` construct against this
:class:`.TableClause`.
E.g.::
table.insert().values(name='foo')
See :func:`.insert` for argument and usage information.
"""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`.update` construct against this
:class:`.TableClause`.
E.g.::
table.update().where(table.c.id==7).values(name='foo')
See :func:`.update` for argument and usage information.
"""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`.delete` construct against this
:class:`.TableClause`.
E.g.::
table.delete().where(table.c.id==7)
See :func:`.delete` for argument and usage information.
"""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class SelectBase(Executable, FromClause):
"""Base class for :class:`.Select` and ``CompoundSelects``."""
_order_by_clause = ClauseList()
_group_by_clause = ClauseList()
_limit = None
_offset = None
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union(
{'autocommit': autocommit})
if limit is not None:
self._limit = util.asint(limit)
if offset is not None:
self._offset = util.asint(offset)
self._bind = bind
if order_by is not None:
self._order_by_clause = ClauseList(*util.to_list(order_by))
if group_by is not None:
self._group_by_clause = ClauseList(*util.to_list(group_by))
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`ScalarSelect`.
"""
return ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
.. seealso::
:meth:`~.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
def cte(self, name=None, recursive=False):
"""Return a new :class:`.CTE`, or Common Table Expression instance.
Common table expressions are a SQL standard whereby SELECT
statements can draw upon secondary statements specified along
with the primary statement, using a clause called "WITH".
Special semantics regarding UNION can also be employed to
allow "recursive" queries, where a SELECT statement can draw
upon the set of rows that have previously been selected.
SQLAlchemy detects :class:`.CTE` objects, which are treated
similarly to :class:`.Alias` objects, as special elements
to be delivered to the FROM clause of the statement as well
as to a WITH clause at the top of the statement.
.. versionadded:: 0.7.6
:param name: name given to the common table expression. Like
:meth:`._FromClause.alias`, the name can be left as ``None``
in which case an anonymous symbol will be used at query
compile time.
:param recursive: if ``True``, will render ``WITH RECURSIVE``.
A recursive common table expression is intended to be used in
conjunction with UNION ALL in order to derive rows
from those already selected.
The following examples illustrate two examples from
Postgresql's documentation at
http://www.postgresql.org/docs/8.4/static/queries-with.html.
Example 1, non recursive::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
orders = Table('orders', metadata,
Column('region', String),
Column('amount', Integer),
Column('product', String),
Column('quantity', Integer)
)
regional_sales = select([
orders.c.region,
func.sum(orders.c.amount).label('total_sales')
]).group_by(orders.c.region).cte("regional_sales")
top_regions = select([regional_sales.c.region]).\\
where(
regional_sales.c.total_sales >
select([
func.sum(regional_sales.c.total_sales)/10
])
).cte("top_regions")
statement = select([
orders.c.region,
orders.c.product,
func.sum(orders.c.quantity).label("product_units"),
func.sum(orders.c.amount).label("product_sales")
]).where(orders.c.region.in_(
select([top_regions.c.region])
)).group_by(orders.c.region, orders.c.product)
result = conn.execute(statement).fetchall()
Example 2, WITH RECURSIVE::
from sqlalchemy import Table, Column, String, Integer, MetaData, \\
select, func
metadata = MetaData()
parts = Table('parts', metadata,
Column('part', String),
Column('sub_part', String),
Column('quantity', Integer),
)
included_parts = select([
parts.c.sub_part,
parts.c.part,
parts.c.quantity]).\\
where(parts.c.part=='our part').\\
cte(recursive=True)
incl_alias = included_parts.alias()
parts_alias = parts.alias()
included_parts = included_parts.union_all(
select([
parts_alias.c.part,
parts_alias.c.sub_part,
parts_alias.c.quantity
]).
where(parts_alias.c.part==incl_alias.c.sub_part)
)
statement = select([
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
]).\
select_from(included_parts.join(parts,
included_parts.c.part==parts.c.part)).\\
group_by(included_parts.c.sub_part)
result = conn.execute(statement).fetchall()
.. seealso::
:meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`.
"""
return CTE(self, name=name, recursive=recursive)
@_generative
@util.deprecated('0.6',
message=":func:`.autocommit` is deprecated. Use "
":func:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True."""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied."""
self._limit = util.asint(limit)
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied."""
self._offset = util.asint(offset)
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
This is an **in-place** mutation method; the
:meth:`~.SelectBase.order_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
This is an **in-place** mutation method; the
:meth:`~.SelectBase.group_by` method is preferred, as it provides standard
:term:`method chaining`.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class ScalarSelect(Generative, Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.')
c = columns
@_generative
def where(self, crit):
"""Apply a WHERE clause to the SELECT statement referred to
by this :class:`.ScalarSelect`.
"""
self.element = self.element.where(crit)
def self_group(self, **kwargs):
return self
class CompoundSelect(SelectBase):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
def __init__(self, keyword, *selects, **kwargs):
self._auto_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError('All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' % (1, len(self.selects[0].c), n
+ 1, len(s.c)))
self.selects.append(s.self_group(self))
SelectBase.__init__(self, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(self,
name=cols[0]._label if self.use_labels else None,
key=cols[0]._key_label if self.use_labels else None)
# hand-construct the "_proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy._proxies = [c._annotate({'weight': i + 1}) for (i,
c) in enumerate(cols)]
def _refresh_for_new_column(self, column):
for s in self.selects:
s._refresh_for_new_column(column)
if not self._cols_populated:
return None
raise NotImplementedError("CompoundSelect constructs don't support "
"addition of columns to underlying selectables")
def _copy_internals(self, clone=_clone, **kw):
self._reset_exported()
self.selects = [clone(s, **kw) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class HasPrefixes(object):
_prefixes = ()
@_generative
def prefix_with(self, *expr, **kw):
"""Add one or more expressions following the statement keyword, i.e.
SELECT, INSERT, UPDATE, or DELETE. Generative.
This is used to support backend-specific prefix keywords such as those
provided by MySQL.
E.g.::
stmt = table.insert().prefix_with("LOW_PRIORITY", dialect="mysql")
Multiple prefixes can be specified by multiple calls
to :meth:`.prefix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
will be rendered following the INSERT, UPDATE, or DELETE
keyword.
:param \**kw: A single keyword 'dialect' is accepted. This is an
optional string dialect name which will
limit rendering of this prefix to only that dialect.
"""
dialect = kw.pop('dialect', None)
if kw:
raise exc.ArgumentError("Unsupported argument(s): %s" %
",".join(kw))
self._setup_prefixes(expr, dialect)
def _setup_prefixes(self, prefixes, dialect=None):
self._prefixes = self._prefixes + tuple(
[(_literal_as_text(p), dialect) for p in prefixes])
class Select(HasPrefixes, SelectBase):
"""Represents a ``SELECT`` statement.
.. seealso::
:func:`~.expression.select` - the function which creates
a :class:`.Select` object.
:ref:`coretutorial_selecting` - Core Tutorial description
of :func:`.select`.
"""
__visit_name__ = 'select'
_prefixes = ()
_hints = util.immutabledict()
_distinct = False
_from_cloned = None
_correlate = ()
_correlate_except = None
_memoized_property = SelectBase._memoized_property
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`SelectBase` superclass.
"""
self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
else:
self._distinct = [
_literal_as_text(e)
for e in util.to_list(distinct)
]
if from_obj is not None:
self._from_obj = util.OrderedSet(
_interpret_as_from(f)
for f in util.to_list(from_obj))
else:
self._from_obj = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._setup_prefixes(prefixes)
SelectBase.__init__(self, **kwargs)
@property
def _froms(self):
# would love to cache this,
# but there's just enough edge cases, particularly now that
# declarative encourages construction of SQL expressions
# without tables present, to just regen this each time.
froms = []
seen = set()
translate = self._from_cloned
def add(items):
for item in items:
if translate and item in translate:
item = translate[item]
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
add(_from_objects(*self._raw_columns))
if self._whereclause is not None:
add(_from_objects(self._whereclause))
add(self._from_obj)
return froms
def _get_display_froms(self, explicit_correlate_froms=None,
implicit_correlate_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = set(itertools.chain(*[
_expand_cloned(f._hide_froms)
for f in froms]))
if toremove:
# if we're maintaining clones of froms,
# add the copies out to the toremove list. only include
# clones that are lexical equivalents.
if self._from_cloned:
toremove.update(
self._from_cloned[f] for f in
toremove.intersection(self._from_cloned)
if self._from_cloned[f]._is_lexical_equivalent(f)
)
# filter out to FROM clauses not in the list,
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
if self._correlate:
to_correlate = self._correlate
if to_correlate:
froms = [
f for f in froms if f not in
_cloned_intersection(
_cloned_intersection(froms, explicit_correlate_froms or ()),
to_correlate
)
]
if self._correlate_except is not None:
froms = [
f for f in froms if f not in
_cloned_difference(
_cloned_intersection(froms, explicit_correlate_froms or ()),
self._correlate_except
)
]
if self._auto_correlate and \
implicit_correlate_froms and \
len(froms) > 1:
froms = [
f for f in froms if f not in
_cloned_intersection(froms, implicit_correlate_froms)
]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses due to "
"auto-correlation; specify "
"correlate(<tables>) to control "
"correlation manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given selectable to this
:class:`.Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
``selectable`` argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
"""
self._hints = self._hints.union(
{(selectable, dialect_name): text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@_memoized_property.method
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
froms = self._froms
return froms + list(_from_objects(*froms))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone, **kw):
# Select() object has been cloned and probably adapted by the
# given clone function. Apply the cloning function to internal
# objects
# 1. keep a dictionary of the froms we've cloned, and what
# they've become. This is consulted later when we derive
# additional froms from "whereclause" and the columns clause,
# which may still reference the uncloned parent table.
# as of 0.7.4 we also put the current version of _froms, which
# gets cleared on each generation. previously we were "baking"
# _froms into self._from_obj.
self._from_cloned = from_cloned = dict((f, clone(f, **kw))
for f in self._from_obj.union(self._froms))
# 3. update persistent _from_obj with the cloned versions.
self._from_obj = util.OrderedSet(from_cloned[f] for f in
self._from_obj)
# the _correlate collection is done separately, what can happen
# here is the same item is _correlate as in _from_obj but the
# _correlate version has an annotation on it - (specifically
# RelationshipProperty.Comparator._criterion_exists() does
# this). Also keep _correlate liberally open with it's previous
# contents, as this set is used for matching, not rendering.
self._correlate = set(clone(f) for f in
self._correlate).union(self._correlate)
# 4. clone other things. The difficulty here is that Column
# objects are not actually cloned, and refer to their original
# .table, resulting in the wrong "from" parent after a clone
# operation. Hence _from_cloned and _from_obj supercede what is
# present here.
self._raw_columns = [clone(c, **kw) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr), **kw))
# erase exported column list, _froms collection,
# etc.
self._reset_exported()
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
self.append_column(column)
def reduce_columns(self, only_synonyms=True):
"""Return a new :func`.select` construct with redundantly
named, equivalently-valued columns removed from the columns clause.
"Redundant" here means two columns where one refers to the
other either based on foreign key, or via a simple equality
comparison in the WHERE clause of the statement. The primary purpose
of this method is to automatically construct a select statement
with all uniquely-named columns, without the need to use
table-qualified labels as :meth:`.apply_labels` does.
When columns are omitted based on foreign key, the referred-to
column is the one that's kept. When columns are omitted based on
WHERE eqivalence, the first column in the columns clause is the
one that's kept.
:param only_synonyms: when True, limit the removal of columns
to those which have the same name as the equivalent. Otherwise,
all columns that are equivalent to another are removed.
.. versionadded:: 0.8
"""
return self.with_only_columns(
sqlutil.reduce_columns(
self.inner_columns,
only_synonyms=only_synonyms,
*(self._whereclause, ) + tuple(self._from_obj)
)
)
@_generative
def with_only_columns(self, columns):
"""Return a new :func:`.select` construct with its columns
clause replaced with the given columns.
.. versionchanged:: 0.7.3
Due to a bug fix, this method has a slight
behavioral change as of version 0.7.3.
Prior to version 0.7.3, the FROM clause of
a :func:`.select` was calculated upfront and as new columns
were added; in 0.7.3 and later it's calculated
at compile time, fixing an issue regarding late binding
of columns to parent tables. This changes the behavior of
:meth:`.Select.with_only_columns` in that FROM clauses no
longer represented in the new list are dropped,
but this behavior is more consistent in
that the FROM clauses are consistently derived from the
current columns clause. The original intent of this method
is to allow trimming of the existing columns list to be fewer
columns than originally present; the use case of replacing
the columns list with an entirely different one hadn't
been anticipated until 0.7.3 was released; the usage
guidelines below illustrate how this should be done.
This method is exactly equivalent to as if the original
:func:`.select` had been called with the given columns
clause. I.e. a statement::
s = select([table1.c.a, table1.c.b])
s = s.with_only_columns([table1.c.b])
should be exactly equivalent to::
s = select([table1.c.b])
This means that FROM clauses which are only derived
from the column list will be discarded if the new column
list no longer contains that FROM::
>>> table1 = table('t1', column('a'), column('b'))
>>> table2 = table('t2', column('a'), column('b'))
>>> s1 = select([table1.c.a, table2.c.b])
>>> print s1
SELECT t1.a, t2.b FROM t1, t2
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1
The preferred way to maintain a specific FROM clause
in the construct, assuming it won't be represented anywhere
else (i.e. not in the WHERE clause, etc.) is to set it using
:meth:`.Select.select_from`::
>>> s1 = select([table1.c.a, table2.c.b]).\\
... select_from(table1.join(table2,
... table1.c.a==table2.c.a))
>>> s2 = s1.with_only_columns([table2.c.b])
>>> print s2
SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a
Care should also be taken to use the correct
set of column objects passed to :meth:`.Select.with_only_columns`.
Since the method is essentially equivalent to calling the
:func:`.select` construct in the first place with the given
columns, the columns passed to :meth:`.Select.with_only_columns`
should usually be a subset of those which were passed
to the :func:`.select` construct, not those which are available
from the ``.c`` collection of that :func:`.select`. That
is::
s = select([table1.c.a, table1.c.b]).select_from(table1)
s = s.with_only_columns([table1.c.b])
and **not**::
# usually incorrect
s = s.with_only_columns([s.c.b])
The latter would produce the SQL::
SELECT b
FROM (SELECT t1.a AS a, t1.b AS b
FROM t1), t1
Since the :func:`.select` construct is essentially being
asked to select both from ``table1`` as well as itself.
"""
self._reset_exported()
rc = []
for c in columns:
c = _interpret_as_column_or_from(c)
if isinstance(c, ScalarSelect):
c = c.self_group(against=operators.comma_op)
rc.append(c)
self._raw_columns = rc
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self, *expr):
"""Return a new select() construct which will apply DISTINCT to its
columns clause.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if expr:
expr = [_literal_as_text(e) for e in expr]
if isinstance(self._distinct, list):
self._distinct = self._distinct + expr
else:
self._distinct = expr
else:
self._distinct = True
@_generative
def select_from(self, fromclause):
"""return a new :func:`.select` construct with the
given FROM expression
merged into its list of FROM objects.
E.g.::
table1 = table('t1', column('a'))
table2 = table('t2', column('b'))
s = select([table1.c.a]).\\
select_from(
table1.join(table2, table1.c.a==table2.c.b)
)
The "from" list is a unique set on the identity of each element,
so adding an already present :class:`.Table` or other selectable
will have no effect. Passing a :class:`.Join` that refers
to an already present :class:`.Table` or other selectable will have
the effect of concealing the presence of that selectable as
an individual element in the rendered FROM list, instead
rendering it into a JOIN clause.
While the typical purpose of :meth:`.Select.select_from` is to
replace the default, derived FROM clause with a join, it can
also be called with individual table elements, multiple times
if desired, in the case that the FROM clause cannot be fully
derived from the columns clause::
select([func.count('*')]).select_from(table1)
"""
self.append_from(fromclause)
@_generative
def correlate(self, *fromclauses):
"""return a new :class:`.Select` which will correlate the given FROM
clauses to that of an enclosing :class:`.Select`.
Calling this method turns off the :class:`.Select` object's
default behavior of "auto-correlation". Normally, FROM elements
which appear in a :class:`.Select` that encloses this one via
its :term:`WHERE clause`, ORDER BY, HAVING or
:term:`columns clause` will be omitted from this :class:`.Select`
object's :term:`FROM clause`.
Setting an explicit correlation collection using the
:meth:`.Select.correlate` method provides a fixed list of FROM objects
that can potentially take place in this process.
When :meth:`.Select.correlate` is used to apply specific FROM clauses
for correlation, the FROM elements become candidates for
correlation regardless of how deeply nested this :class:`.Select`
object is, relative to an enclosing :class:`.Select` which refers to
the same FROM object. This is in contrast to the behavior of
"auto-correlation" which only correlates to an immediate enclosing
:class:`.Select`. Multi-level correlation ensures that the link
between enclosed and enclosing :class:`.Select` is always via
at least one WHERE/ORDER BY/HAVING/columns clause in order for
correlation to take place.
If ``None`` is passed, the :class:`.Select` object will correlate
none of its FROM entries, and all will render unconditionally
in the local FROM clause.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate collection.
.. versionchanged:: 0.8.0 ORM-mapped classes are accepted by
:meth:`.Select.correlate`.
.. versionchanged:: 0.8.0 The :meth:`.Select.correlate` method no
longer unconditionally removes entries from the FROM clause; instead,
the candidate FROM entries must also be matched by a FROM entry
located in an enclosing :class:`.Select`, which ultimately encloses
this one as present in the WHERE clause, ORDER BY clause, HAVING
clause, or columns clause of an enclosing :meth:`.Select`.
.. versionchanged:: 0.8.2 explicit correlation takes place
via any level of nesting of :class:`.Select` objects; in previous
0.8 versions, correlation would only occur relative to the immediate
enclosing :class:`.Select` construct.
.. seealso::
:meth:`.Select.correlate_except`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclauses)
@_generative
def correlate_except(self, *fromclauses):
"""return a new :class:`.Select` which will omit the given FROM
clauses from the auto-correlation process.
Calling :meth:`.Select.correlate_except` turns off the
:class:`.Select` object's default behavior of
"auto-correlation" for the given FROM elements. An element
specified here will unconditionally appear in the FROM list, while
all other FROM elements remain subject to normal auto-correlation
behaviors.
.. versionchanged:: 0.8.2 The :meth:`.Select.correlate_except`
method was improved to fully prevent FROM clauses specified here
from being omitted from the immediate FROM clause of this
:class:`.Select`.
If ``None`` is passed, the :class:`.Select` object will correlate
all of its FROM entries.
.. versionchanged:: 0.8.2 calling ``correlate_except(None)`` will
correctly auto-correlate all FROM clauses.
:param \*fromclauses: a list of one or more :class:`.FromClause`
constructs, or other compatible constructs (i.e. ORM-mapped
classes) to become part of the correlate-exception collection.
.. seealso::
:meth:`.Select.correlate`
:ref:`correlated_subqueries`
"""
self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
self._correlate_except = set(self._correlate_except or ()).union(
_interpret_as_from(f) for f in fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.correlate` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause)
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
This is an **in-place** mutation method; the
:meth:`~.Select.column` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
if isinstance(column, ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
This is an **in-place** mutation method; the
:meth:`~.Select.prefix_with` method is preferred, as it provides standard
:term:`method chaining`.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.where` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
whereclause = _literal_as_text(whereclause)
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
This is an **in-place** mutation method; the
:meth:`~.Select.having` method is preferred, as it provides standard
:term:`method chaining`.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
This is an **in-place** mutation method; the
:meth:`~.Select.select_from` method is preferred, as it provides standard
:term:`method chaining`.
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
self._from_obj = self._from_obj.union([fromclause])
@_memoized_property
def _columns_plus_names(self):
if self.use_labels:
names = set()
def name_for_col(c):
if c._label is None:
return (None, c)
name = c._label
if name in names:
name = c.anon_label
else:
names.add(name)
return name, c
return [
name_for_col(c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
else:
return [
(None, c)
for c in util.unique_list(_select_iterables(self._raw_columns))
]
def _populate_column_collection(self):
for name, c in self._columns_plus_names:
if not hasattr(c, '_make_proxy'):
continue
if name is None:
key = None
elif self.use_labels:
key = c._key_label
if key is not None and key in self.c:
key = c.anon_label
else:
key = None
c._make_proxy(self, key=key,
name=name,
name_is_truncatable=True)
def _refresh_for_new_column(self, column):
for fromclause in self._froms:
col = fromclause._refresh_for_new_column(column)
if col is not None:
if col in self.inner_columns and self._cols_populated:
our_label = col._key_label if self.use_labels else col.key
if our_label not in self.c:
return col._make_proxy(self,
name=col._label if self.use_labels else None,
key=col._key_label if self.use_labels else None,
name_is_truncatable=True)
return None
return None
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions and should not require explicit use.
"""
if isinstance(against, CompoundSelect):
return self
return FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
froms = self._froms
if not froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class UpdateBase(HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
kwargs = util.immutabledict()
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if isinstance(parameters, (list, tuple)) and \
isinstance(parameters[0], (list, tuple, dict)):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table that is
the target of the INSERT, UPDATE, or DELETE. Each element can be any
column expression. :class:`~sqlalchemy.schema.Table` objects will be
expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
def __init__(self, table, values, prefixes):
self.table = table
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`. However,
the :meth:`.ValuesBase.values` method can be used to "fix" a particular
set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a list
of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the :meth:`.Connection.execute`
method. Passing a list of parameter sets to :meth:`.ValuesBase.values`
produces a construct of this form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT statement many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, as it does not depend on a special SQL
syntax.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
self.kwargs = kwargs
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self.kwargs = kwargs
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning=None,
prefixes=None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.kwargs = kwargs
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
# old names for compatibility
_BindParamClause = BindParameter
_Label = Label
_SelectBase = SelectBase
_BinaryExpression = BinaryExpression
_Cast = Cast
_Null = Null
_False = False_
_True = True_
_TextClause = TextClause
_UnaryExpression = UnaryExpression
_Case = Case
_Tuple = Tuple
_Over = Over
_Generative = Generative
_TypeClause = TypeClause
_Extract = Extract
_Exists = Exists
_Grouping = Grouping
_FromGrouping = FromGrouping
_ScalarSelect = ScalarSelect
| VagrantApe/flaskMicroblog | venv/lib/python2.7/site-packages/sqlalchemy/sql/expression.py | Python | bsd-3-clause | 219,350 | [
"VisIt"
] | d49dbfd752acafc970883e2f6093b8b3e2c8f2e103322c8f2d2222e36703aaf3 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import inspect
import logging
from abc import abstractproperty
from collections import OrderedDict
from types import TypeType
from twitter.common.collections import OrderedSet
from pants.engine.addressable import Exactly
from pants.engine.selectors import Get, type_or_constraint_repr
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class _RuleVisitor(ast.NodeVisitor):
def __init__(self):
super(_RuleVisitor, self).__init__()
self.gets = []
def visit_Call(self, node):
if not isinstance(node.func, ast.Name) or node.func.id != Get.__name__:
return
self.gets.append(Get.extract_constraints(node))
def rule(output_type, input_selectors):
"""A @decorator that declares that a particular static function may be used as a TaskRule.
:param Constraint output_type: The return/output type for the Rule. This may be either a
concrete Python type, or an instance of `Exactly` representing a union of multiple types.
:param list input_selectors: A list of Selector instances that matches the number of arguments
to the @decorated function.
"""
def wrapper(func):
if not inspect.isfunction(func):
raise ValueError('The @rule decorator must be applied innermost of all decorators.')
caller_frame = inspect.stack()[1][0]
module_ast = ast.parse(inspect.getsource(func))
def resolve_type(name):
resolved = caller_frame.f_globals.get(name) or caller_frame.f_builtins.get(name)
if not isinstance(resolved, (TypeType, Exactly)):
raise ValueError('Expected either a `type` constructor or TypeConstraint instance; '
'got: {}'.format(name))
return resolved
gets = OrderedSet()
for node in ast.iter_child_nodes(module_ast):
if isinstance(node, ast.FunctionDef) and node.name == func.__name__:
rule_visitor = _RuleVisitor()
rule_visitor.visit(node)
gets.update(Get(resolve_type(p), resolve_type(s)) for p, s in rule_visitor.gets)
func._rule = TaskRule(output_type, input_selectors, func, input_gets=list(gets))
return func
return wrapper
class Rule(AbstractClass):
"""Rules declare how to produce products for the product graph.
A rule describes what dependencies must be provided to produce a particular product. They also act
as factories for constructing the nodes within the graph.
"""
@abstractproperty
def output_constraint(self):
"""An output Constraint type for the rule."""
@abstractproperty
def input_selectors(self):
"""Collection of input selectors."""
class TaskRule(datatype('TaskRule', ['output_constraint', 'input_selectors', 'input_gets', 'func']), Rule):
"""A Rule that runs a task function when all of its input selectors are satisfied.
TODO: Make input_gets non-optional when more/all rules are using them.
"""
def __new__(cls, output_type, input_selectors, func, input_gets=None):
# Validate result type.
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Expected an output_type for rule `{}`, got: {}".format(
func.__name__, output_type))
# Validate selectors.
if not isinstance(input_selectors, list):
raise TypeError("Expected a list of Selectors for rule `{}`, got: {}".format(
func.__name__, type(input_selectors)))
# Validate gets.
input_gets = [] if input_gets is None else input_gets
if not isinstance(input_gets, list):
raise TypeError("Expected a list of Gets for rule `{}`, got: {}".format(
func.__name__, type(input_gets)))
# Create.
return super(TaskRule, cls).__new__(cls, constraint, tuple(input_selectors), tuple(input_gets), func)
def __str__(self):
return '({}, {!r}, {})'.format(type_or_constraint_repr(self.output_constraint),
self.input_selectors,
self.func.__name__)
class SingletonRule(datatype('SingletonRule', ['output_constraint', 'value']), Rule):
"""A default rule for a product, which is thus a singleton for that product."""
def __new__(cls, output_type, value):
# Validate result type.
if isinstance(output_type, Exactly):
constraint = output_type
elif isinstance(output_type, type):
constraint = Exactly(output_type)
else:
raise TypeError("Expected an output_type for rule; got: {}".format(output_type))
# Create.
return super(SingletonRule, cls).__new__(cls, constraint, value)
@property
def input_selectors(self):
return tuple()
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__, type_or_constraint_repr(self.output_constraint), self.value)
class RootRule(datatype('RootRule', ['output_constraint']), Rule):
"""Represents a root input to an execution of a rule graph.
Roots act roughly like parameters, in that in some cases the only source of a
particular type might be when a value is provided as a root subject at the beginning
of an execution.
"""
def input_selectors(self):
return []
class RuleIndex(datatype('RuleIndex', ['rules', 'roots'])):
"""Holds an index of Tasks and Singletons used to instantiate Nodes."""
@classmethod
def create(cls, rule_entries):
"""Creates a NodeBuilder with tasks indexed by their output type."""
# NB make tasks ordered so that gen ordering is deterministic.
serializable_rules = OrderedDict()
serializable_roots = set()
def add_task(product_type, rule):
if product_type not in serializable_rules:
serializable_rules[product_type] = OrderedSet()
serializable_rules[product_type].add(rule)
def add_rule(rule):
if isinstance(rule, RootRule):
serializable_roots.add(rule.output_constraint)
return
# TODO: The heterogenity here has some confusing implications here:
# see https://github.com/pantsbuild/pants/issues/4005
for kind in rule.output_constraint.types:
# NB Ensure that interior types from SelectDependencies work by
# indexing on the list of types in the constraint.
add_task(kind, rule)
add_task(rule.output_constraint, rule)
for entry in rule_entries:
if isinstance(entry, Rule):
add_rule(entry)
elif hasattr(entry, '__call__'):
rule = getattr(entry, '_rule', None)
if rule is None:
raise TypeError("Expected callable {} to be decorated with @rule.".format(entry))
add_rule(rule)
else:
raise TypeError("Unexpected rule type: {}. "
"Rules either extend Rule, or are static functions "
"decorated with @rule.".format(type(entry)))
return cls(serializable_rules, serializable_roots)
| UnrememberMe/pants | src/python/pants/engine/rules.py | Python | apache-2.0 | 7,202 | [
"VisIt"
] | e5d38c84aa922115927ac91c15d083ace31c0146457793e5d7127eb4fa1feaa1 |
# -*- coding: utf-8 -*-
#
# hh_psc_alpha.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Example using Hodgkin-Huxley neuron
----------------------------------------
This example produces a rate-response (FI) curve of the Hodgkin-Huxley
neuron ``hh_psc_alpha`` in response to a range of different current (DC) stimulations.
The result is plotted using matplotlib.
Since a DC input affetcs only the neuron's channel dynamics, this routine
does not yet check correctness of synaptic response.
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
simtime = 1000
# Amplitude range, in pA
dcfrom = 0
dcstep = 20
dcto = 2000
h = 0.1 # simulation step size in mS
neuron = nest.Create('hh_psc_alpha')
sd = nest.Create('spike_detector')
nest.SetStatus(sd, {'to_memory': False})
nest.Connect(neuron, sd, syn_spec={'weight': 1.0, 'delay': h})
# Simulation loop
n_data = int(dcto / float(dcstep))
amplitudes = np.zeros(n_data)
event_freqs = np.zeros(n_data)
for i, amp in enumerate(range(dcfrom, dcto, dcstep)):
nest.SetStatus(neuron, {'I_e': float(amp)})
print("Simulating with current I={} pA".format(amp))
nest.Simulate(1000) # one second warm-up time for equilibrium state
nest.SetStatus(sd, {'n_events': 0}) # then reset spike counts
nest.Simulate(simtime) # another simulation call to record firing rate
n_events = nest.GetStatus(sd, keys={'n_events'})[0][0]
amplitudes[i] = amp
event_freqs[i] = n_events / (simtime / 1000.)
plt.plot(amplitudes, event_freqs)
plt.show()
| hakonsbm/nest-simulator | pynest/examples/hh_psc_alpha.py | Python | gpl-2.0 | 2,223 | [
"NEURON"
] | c4e0de05151fcea6b362f0d862319cecfaf3dee8fc547e77ee80a83f20d71622 |
from net_modules import keypoints_2d
import tensorflow as tf
import zutils.tf_math_funcs as tmf
import numpy as np
import zutils.pt_utils as ptu
import net_modules.auto_struct.utils as asu
import prettytensor as pt
import math
from zutils.py_utils import *
import zutils.tf_graph_utils as tgu
from net_modules.auto_struct.generic_encoder import Factory as BaseFactory
class BasicFactory(BaseFactory):
structure_param_num = 2
def __init__(self, output_channels, options):
"""
:param output_channels: output_channels for the encoding net
"""
super().__init__(output_channels, options)
self.keypoint_init()
def keypoint_init(self):
self.input_size = None
self.base_gaussian_stddev = keypoints_2d.gaussian_2d_base_stddev
if "base_gaussian_stddev" in self.options and self.options["base_gaussian_stddev"] is not None:
self.base_gaussian_stddev = self.options["base_gaussian_stddev"]
self.enable_random_transform = True
if "lm_tps_probability" in self.options:
self.lm_tps_probability = self.options["lm_tps_probability"]
else:
self.lm_tps_probability = 0.3
def use_random_transform(self):
# random for transformation for train phase only
return (
self.enable_random_transform and
ptu.default_phase() == pt.Phase.train and
"keypoint_transform_loss_weight" in self.options and
rbool(self.options["keypoint_transform_loss_weight"]) and
not ("freeze_encoded_structure" in self.options and rbool(self.options["freeze_encoded_structure"]))
)
def image_size(self, img_h, img_w):
if self.target_input_size is None:
return img_h, img_w, img_h, img_w
if isinstance(self.input_size, (list, tuple)):
actual_h = self.input_size[0]
actual_w = self.input_size[1]
else:
actual_h = self.input_size
actual_w = self.input_size
if isinstance(self.target_input_size, (list, tuple)):
full_h = self.target_input_size[0]
full_w = self.target_input_size[1]
else:
full_h = self.target_input_size
full_w = self.target_input_size
return actual_h, actual_w, full_h, full_w
def augment_images(self, image_tensor):
if not hasattr(self, "target_size"):
if hasattr(self, "input_size") and self.input_size is not None:
self.target_size = self.input_size * 2
actual_h, actual_w, full_h, full_w = \
self.image_size(tmf.get_shape(image_tensor)[0], tmf.get_shape(image_tensor)[1])
# random data augmentation for transformation invariance
aug_cache = dict()
aug_cache["original_image"] = image_tensor
if not self.use_random_transform():
return image_tensor, aug_cache, None
batch_size = tmf.get_shape(image_tensor)[0]
# get the landmarks using current model
mos_tmp = asu.ModuleOutputStrip()
with tgu.EnableAuxLoss(False):
main_heatmap = self.input_to_heatmap(image_tensor, mos_tmp)
main_keypoint_param = self.heatmap2structure_basic(main_heatmap)
main_keypoint_param = main_keypoint_param[:, :, :2]
del mos_tmp
aug_cache["network_predefined"] = True # in the parent function reuse=True for network definition
with tf.variable_scope("transform_invariance"):
h = tmf.get_shape(image_tensor)[1]
w = tmf.get_shape(image_tensor)[2]
im = image_tensor
im_shape = tmf.get_shape(im)
# ---- RANDOM LANDMARK TPS TRANSFORM ----
lm_n_points = tmf.get_shape(main_keypoint_param)[1]
lm_rand_pt_std = 0.05 #0.1
lm_tps_cp = tf.random_normal(shape=[batch_size, lm_n_points, 2], stddev=lm_rand_pt_std)
lm_tps_cp *= np.sqrt(np.reshape([full_w/full_h, full_h/full_w], [1, 1, 2]))
# remark: y,x: y enlarge normalized coordinate according to aspect ratio, x shrink normalized coordinate
lm_tps_fp = self.coordinate_to_stn(main_keypoint_param, aspect_ratio=full_w/full_h)
lm_tps_fp = tf.stop_gradient(lm_tps_fp)
im_t_1 = pt.wrap(im).spatial_transformer_tps(
None, None, lm_tps_cp, out_size=[h, w], fp_more=lm_tps_fp
)
im_t_1 = tf.reshape(im_t_1, im_shape)
aug_cache["lm_tps"] = dict()
aug_cache["lm_tps"]["transform"] = lm_tps_cp
aug_cache["lm_tps"]["control_points"] = lm_tps_fp
aug_cache["lm_tps"]["num_points"] = lm_n_points
# ---- RANDOM TPS TRANSFORM ----
n_points = 7
rand_pt_std = 0.1 # 0.2
tps_transform = tf.random_normal(shape=[batch_size, n_points*n_points, 2], stddev=rand_pt_std)
im_t_2 = pt.wrap(im).spatial_transformer_tps(
n_points, n_points,
tps_transform,
out_size=[h, w],
)
im_t_2 = tf.reshape(im_t_2, im_shape)
aug_cache["tps"] = dict()
aug_cache["tps"]["transform"] = tps_transform
aug_cache["tps"]["num_points"] = n_points
# -------------- SELECT RANDOM TPS --------------------
global_step = tf.train.get_global_step()
lm_tps_step_lower = 5000
lm_tps_step_upper = 10000
lm_tps_random_upper_th = self.lm_tps_probability
lm_tps_random_th = tf.where(
global_step <= lm_tps_step_lower, tf.constant(0, dtype=tf.float32),
tf.where(
global_step > lm_tps_step_upper, tf.constant(1, dtype=tf.float32),
tf.to_float(global_step-lm_tps_step_lower)/(lm_tps_step_upper-lm_tps_step_lower)
) * lm_tps_random_upper_th
)
use_lm_tps = tf.random_uniform([batch_size]) < lm_tps_random_th
use_lm_tps = tf.zeros_like(use_lm_tps)
im_t = tf.where(
tf.tile(tmf.expand_dims(use_lm_tps, axis=-1, ndims=3), [1] + im_shape[1:]),
im_t_1, im_t_2
)
aug_cache["use_lm_tps"] = use_lm_tps
# ---- RANDOM SIMILARITY TRANSFORM ----
# generate random transformation and generate the image
trans_range = np.array([-0.15, 0.15]) # translation
rotation_std = 10 # degree
scale_std = 1.25 # scale
# canonicalize parameter range
rotation_std = rotation_std/180 * np.pi
scale_std = np.log(scale_std)
trans_range = trans_range * 2. # spatial transformer use [-1, 1] for the coordinates
# generate random transformation
rand_base_t = tf.random_uniform(shape=[batch_size, 2, 1])
rand_trans = rand_base_t*(trans_range[1]-trans_range[0]) + trans_range[0] # trans x, y
rand_rotation = tf.random_normal(shape=[batch_size, 1, 1]) * rotation_std
rand_scale = tf.exp(tf.random_normal(shape=[batch_size, 1, 1]) * scale_std)
if "keypoint_random_horizontal_mirroring" in self.options and \
self.options["keypoint_random_horizontal_mirroring"]:
horizontal_sign = tf.to_float(tf.random_uniform([batch_size, 1, 1]) > 0.5)
else:
horizontal_sign = 1.
if "keypoint_random_vertical_mirroring" in self.options and \
self.options["keypoint_random_vertical_mirroring"]:
vertical_sign = tf.to_float(tf.random_uniform([batch_size, 1], 1) > 0.5)
else:
vertical_sign = 1.
# concatenate parameters
rand_cos = tf.cos(rand_rotation)
rand_sin = tf.sin(rand_rotation)
rand_rot_matrix = tf.concat(
[
tf.concat([rand_cos, rand_sin], axis=1)*horizontal_sign,
tf.concat([-rand_sin, rand_cos], axis=1)*vertical_sign,
], axis=2)
rand_sim_matrix = tf.concat(
[rand_scale*rand_rot_matrix, rand_trans],
axis=2
)
transform = rand_sim_matrix
im_t = pt.wrap(im_t).spatial_transformer(
tf.reshape(transform, [batch_size, 6]), out_size=im_shape[1:3]
)
im_t = tf.reshape(im_t, im_shape)
aug_cache["sim_transform"] = transform
# fuse converted images
im_a = tf.concat([im, im_t], axis=0)
return im_a, aug_cache, None
def heatmap_postprocess(self, heatmap):
extra_outputs = dict()
extra_outputs["heatmap_extra"] = dict()
heatmap_ch = tmf.get_shape(heatmap)[3]
expected_channels = self.options["keypoint_num"] + 1
if heatmap_ch != self.options["keypoint_num"] + 1:
extra_outputs["heatmap_extra"]["feature"] = heatmap
if hasattr(self, "pt_defaults_scope_value"):
pt_scope = pt.defaults_scope(**self.pt_defaults_scope_value())
else:
pt_scope = dummy_class_for_with()
with pt_scope:
heatmap = pt.wrap(heatmap).conv2d(1, expected_channels, activation_fn=None)
return heatmap, extra_outputs
def heatmap_postpostprocess(self, heatmap, image_tensor=None, heatmap_extra=None):
extra_outputs = dict()
extra_outputs["for_decoder"] = dict()
extra_outputs["save"] = dict()
return heatmap, extra_outputs
def heatmap2structure_internal(self, heatmap_tensor):
keypoint_map = heatmap_tensor[:, :, :, :-1] # remove bg
# convert keypoint map to coordinate
keypoint_param = keypoints_2d.keypoint_map_to_gaussian_coordinate(
keypoint_map,
use_hard_max_as_anchors=self.options["use_hard_max_as_anchors"] if
"use_hard_max_as_anchors" in self.options else None
)
# Remark: keypoint_param has been scaled according to aspect ratio
# keypoint_map_shape = tmf.get_shape(keypoint_map)
# batch_size = keypoint_map_shape[0]
batch_size = tmf.get_shape(keypoint_map)[0]
keypoint_prob = tf.ones([batch_size, tmf.get_shape(keypoint_map)[3]], dtype=keypoint_map.dtype)
keypoint_param = tf.concat([
keypoint_param[:, :, :2],
tf.reduce_mean(keypoint_param[:, :, 2:4], axis=2, keep_dims=True)
], axis=2) # use isotropic gaussian
return keypoint_param, keypoint_prob
def heatmap2structure_basic(self, heatmap_tensor):
keypoint_param, _ = self.heatmap2structure_internal(heatmap_tensor)
keypoint_param = keypoint_param[:, :, :self.structure_param_num]
return keypoint_param
def heatmap2structure(self, heatmap_tensor):
return self.heatmap2structure_internal(heatmap_tensor) + (heatmap_tensor, None)
def heatmap2structure_poststep(self, structure_pack):
# extra outputs
extra_outputs = dict()
extra_outputs["for_decoder"] = dict()
# get necessary information
keypoint_param, keypoint_prob, heatmap_tensor = structure_pack
# compute keypoints
keypoint_map = heatmap_tensor[:, :, :, :-1] # remove bg
# get image size
actual_h, actual_w, full_h, full_w = self.image_size(
tmf.get_shape(keypoint_map)[1], tmf.get_shape(keypoint_map)[2]
)
batch_size = tmf.get_shape(keypoint_param)[0]
main_batch_size = batch_size // 2 if self.use_random_transform() else batch_size
output_shape = tmf.get_shape(keypoint_map)
out_h = output_shape[1]
out_w = output_shape[2]
out_ah = int(out_h * (actual_h/full_h))
out_aw = int(out_w * (actual_w/full_w))
out_scaling = math.sqrt((actual_h/full_h) * (actual_w/full_w))
if "keypoint_concentration_loss_weight" in self.options and \
rbool(self.options["keypoint_concentration_loss_weight"]):
gaussian_spatial_entropy = tf.reduce_mean(
tf.reduce_sum(
keypoints_2d.gaussian2d_exp_entropy(keypoint_param, stddev_scaling=out_scaling),
axis=1
), axis=0)
keypoint_concentration_loss = gaussian_spatial_entropy * self.options["keypoint_concentration_loss_weight"]
keypoint_concentration_loss.disp_name = 'concentration'
tf.add_to_collection(
"aux_loss", keypoint_concentration_loss)
if "keypoint_separation_loss_weight" in self.options and \
rbool(self.options["keypoint_separation_loss_weight"]):
assert "keypoint_separation_bandwidth" in self.options, "keypoint_separation_bandwidth must be defined"
keypoint_separation_bandwidth = self.options["keypoint_separation_bandwidth"] * out_scaling
keypoint_loc = keypoint_param[:, :, :2]
keypoint_dist = tf.reduce_sum(tf.square(
tf.expand_dims(keypoint_loc, axis=1) - tf.expand_dims(keypoint_loc, axis=2)), axis=3)
keypoint_vicinity = tf.exp(-keypoint_dist / (2*(keypoint_separation_bandwidth**2))) # quadratic
keypoint_vicinity = tf.where(
tf.eye(tmf.get_shape(keypoint_loc)[1], batch_shape=[batch_size]) > 0,
tf.zeros_like(keypoint_vicinity), keypoint_vicinity
)
keypoint_separation_loss = tf.reduce_sum(keypoint_vicinity) / batch_size
keypoint_separation_loss *= self.options["keypoint_separation_loss_weight"]
keypoint_separation_loss.disp_name = 'kp_separate'
tgu.add_to_aux_loss(keypoint_separation_loss)
regularized_map_full = keypoints_2d.gaussian_coordinate_to_keypoint_map(
keypoint_param, tmf.get_shape(keypoint_map)[1], tmf.get_shape(keypoint_map)[2]
)
# heatmap for patch_features
background_weights = tf.pad(
tf.ones([batch_size, out_ah, out_aw, 1], dtype=regularized_map_full.dtype) / (actual_h * actual_w),
[
[0, 0], [(out_h - out_ah) // 2, (out_h - out_ah) - (out_h - out_ah) // 2],
[(out_w - out_aw) // 2, (out_w - out_aw) - (out_w - out_aw) // 2], [0, 0]
], mode="CONSTANT", constant_values=0
)
keypoint_param_for_patch_features = keypoint_param
heatmap_stddev_for_patch_features = None
if heatmap_stddev_for_patch_features is not None:
keypoint_param_for_patch_features = tf.concat([
keypoint_param[:, :, :2], heatmap_stddev_for_patch_features
], axis=2)
regularized_map_full = keypoints_2d.gaussian_coordinate_to_keypoint_map(
keypoint_param_for_patch_features, tmf.get_shape(keypoint_map)[1], tmf.get_shape(keypoint_map)[2]
)
# visualize the computed gaussian
regularized_map = regularized_map_full[:main_batch_size]
cropped_regularized_map = \
regularized_map[:, (full_h-actual_h)//2:(full_h+actual_h)//2, (full_w-actual_w)//2:(full_w+actual_w)//2]
extra_outputs["save"] = dict(
regularized_map=cropped_regularized_map,
keypoint_prob=keypoint_prob[:main_batch_size]
)
keypoint_param = keypoint_param[:, :, :self.structure_param_num]
structure_param = keypoint_param
return structure_param, extra_outputs
def cleanup_augmentation_patchfeatures(self, patch_features, aug_cache):
main_batch_size = aug_cache["main_batch_size"]
batch_size = tmf.get_shape(patch_features)[0]
if batch_size == main_batch_size:
return patch_features
return patch_features[:main_batch_size]
def cleanup_augmentation_structure(self, structure_param, aug_cache, condition_tensor=None):
actual_h, actual_w, full_h, full_w = self.image_size(
tmf.get_shape(aug_cache["original_image"])[0],
tmf.get_shape(aug_cache["original_image"])[1]
)
full_a = full_w / full_h
af_scaling = math.sqrt((actual_h / full_h)*(actual_w / full_w))
if not self.use_random_transform():
keypoint_param = structure_param
batch_size = tmf.get_shape(structure_param)[0]
else:
with tf.variable_scope("transform_invariance"):
lm_tps_cp = aug_cache["lm_tps"]["transform"]
lm_tps_fp = aug_cache["lm_tps"]["control_points"]
tps_transform = aug_cache["tps"]["transform"]
tps_n_points = aug_cache["tps"]["num_points"]
use_lm_tps = aug_cache["use_lm_tps"]
transform = aug_cache["sim_transform"]
batch_size = tmf.get_shape(structure_param)[0] // 2
# keypoint_num = tmf.get_shape(structure_param)[1]
# transform keypoints and match keypoints
keypoint_param2 = structure_param[batch_size:, :, :2]
keypoint_param = structure_param[:batch_size, :, :2]
# keypoint matching
kp1 = self.coordinate_to_stn(keypoint_param, aspect_ratio=full_a)
kp2 = self.coordinate_to_stn(keypoint_param2, aspect_ratio=full_a)
kp1h_from2 = (
pt.wrap(kp2).
coordinate_inv_transformer(transform)
)
kp1from2 = tf.where(
tf.tile(tmf.expand_dims(use_lm_tps, axis=-1, ndims=2), [1]+tmf.get_shape(kp2)[1:]),
kp1h_from2.coordinate_inv_transformer_tps(None, None, lm_tps_cp, fp_more=lm_tps_fp),
kp1h_from2.coordinate_inv_transformer_tps(tps_n_points, tps_n_points, tps_transform)
)
kp_diff_loss = tf.reduce_sum(
tf.reduce_sum(tf.square(kp1from2 - kp1), axis=[0, 1]) *
np.array([full_a, 1/full_a])) / (af_scaling * batch_size)
# remark: x,y: [-1,1]x[-1,1] --> [-aspect,+aspect]x[-1/aspect,+1/aspect], note the square
transform_invariant_loss = self.options["keypoint_transform_loss_weight"] * kp_diff_loss
tgu.add_to_aux_loss(transform_invariant_loss, "enc_transform")
# optical flow
of_condition = None
if condition_tensor is not None:
assert condition_tensor is not None, "need optical flow condition"
for v in condition_tensor:
if v["type"] == "optical_flow":
of_condition = v
optical_flow_transform_loss_weight = None
if "optical_flow_transform_loss_weight" in self.options:
optical_flow_transform_loss_weight = self.options["optical_flow_transform_loss_weight"]
if optical_flow_transform_loss_weight is None:
if of_condition is not None and "keypoint_transform_loss_weight" in self.options:
optical_flow_transform_loss_weight = self.options["keypoint_transform_loss_weight"]
optical_flow_strength_loss_weight = None
if "optical_flow_strength_loss_weight" in self.options:
optical_flow_strength_loss_weight = self.options["optical_flow_strength_loss_weight"]
if ptu.default_phase() == pt.Phase.train and \
(rbool(optical_flow_transform_loss_weight) or rbool(optical_flow_strength_loss_weight)):
assert of_condition is not None, "need optical flow condition"
# coordinate before padding
pre_keypoint_param = keypoint_param[:, :, :2]
scaling_factor = np.array(self.target_input_size) / np.array(self.input_size)
pre_keypoint_param = keypoints_2d.scale_keypoint_param(
pre_keypoint_param, scaling_factor, src_aspect_ratio=full_a)
# only use valid
ind_offset = tf.reshape(of_condition["offset"], [-1])
flow_map = of_condition["flow"] # [batch_size, h, w, 2]
valid_mask = tf.not_equal(ind_offset, 0)
# interpolation mask
flow_h, flow_w = tmf.get_shape(flow_map)[1:3]
if rbool(optical_flow_transform_loss_weight):
pre_interp_weights = keypoints_2d.gaussian_coordinate_to_keypoint_map(tf.concat([
pre_keypoint_param,
tf.ones_like(pre_keypoint_param[:, :, -1:]) / math.sqrt(flow_h * flow_w)
], axis=2), km_h=flow_h, km_w=flow_w) # [batch_size, h, w, keypoint_num]
pre_interp_weights /= tf.reduce_sum(pre_interp_weights, axis=[1, 2], keep_dims=True) + tmf.epsilon
# pointwise flow
next_ind = np.arange(batch_size) + ind_offset
next_keypoint_param = tf.gather(pre_keypoint_param, next_ind)
pointwise_flow = tf.reduce_sum(
tf.expand_dims(flow_map, axis=3)*tf.expand_dims(pre_interp_weights, axis=4),
axis=[1, 2]
)
# flow transform constraint
next_keypoint_param_2 = pre_keypoint_param + pointwise_flow
kp_of_trans_loss = tf.reduce_mean(tf.boolean_mask(
tmf.sum_per_sample(tf.square(next_keypoint_param_2 - next_keypoint_param)),
mask=valid_mask
))
optical_flow_transform_loss = kp_of_trans_loss * optical_flow_transform_loss_weight
tgu.add_to_aux_loss(optical_flow_transform_loss, "flow_trans")
if rbool(optical_flow_strength_loss_weight):
pre_interp_weights = keypoints_2d.gaussian_coordinate_to_keypoint_map(tf.concat([
pre_keypoint_param,
tf.ones_like(pre_keypoint_param[:, :, -1:]) * (1/16) #self.base_gaussian_stddev
], axis=2), km_h=flow_h, km_w=flow_w) # [batch_size, h, w, keypoint_num]
pre_interp_weights /= tf.reduce_sum(pre_interp_weights, axis=[1, 2], keep_dims=True) + tmf.epsilon
kp_of_strength_loss = tf.reduce_mean(tmf.sum_per_sample(
tf.boolean_mask(pre_interp_weights, mask=valid_mask) *
tf.sqrt(tf.reduce_sum(
tf.square(tf.boolean_mask(flow_map, mask=valid_mask)), axis=3, keep_dims=True))
))
# kp_of_strength_loss = 1/(kp_of_strength_loss+1)
kp_of_strength_loss = -kp_of_strength_loss
optical_flow_strength_loss = kp_of_strength_loss * optical_flow_strength_loss_weight
tgu.add_to_aux_loss(optical_flow_strength_loss, "flow_strength")
# scale the parameters based on the padding ------
if self.target_input_size is not None:
assert self.input_size is not None, "self.input_size must be specified if self.target_input_size"
scaling_factor = np.array(self.target_input_size) / np.array(self.input_size)
keypoint_param = keypoints_2d.scale_keypoint_param(
keypoint_param, scaling_factor,
src_aspect_ratio=full_a)
return keypoint_param
def structure_param2euclidean(self, structure_param):
return keypoints_2d.gaussian2dparam_to_recon_code(structure_param)
def coordinate_to_stn(self, keypoint_param, aspect_ratio):
# Remark: keypoint_param is scaled according to aspect_ratio
# need to make it [0, 1] x [0, 1]
return tf.concat([ # swap yx to xy, and [0,1] -> [-1,1]
keypoint_param[:, :, 1:2] / math.sqrt(aspect_ratio) * 2 - 1.,
keypoint_param[:, :, 0:1] * math.sqrt(aspect_ratio) * 2 - 1.
], axis=2)
Factory = BasicFactory
| YutingZhang/lmdis-rep | net_modules/auto_struct/keypoint_encoder.py | Python | apache-2.0 | 23,819 | [
"Gaussian"
] | d565a97a3318b3261d929139b4bcc7840c037aec7879efeb2314f1d702bda3d9 |
# -*- coding: utf-8 -*-
#
# evaluate_quantal_stp_synapse.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Example for the quantal_stp_synapse
-----------------------------------
The quantal_stp_synapse is a stochastic version of the Tsodys-Markram model
for synaptic short term plasticity (STP).
This script compares the two variants of the Tsodyks/Markram synapse in NEST.
This synapse model implements synaptic short-term depression and
short-term facilitation according to the quantal release model
described by Fuhrmann et al. [1] and Loebel et al. [2].
Each presynaptic spike will stochastically activate a fraction of
the available release sites. This fraction is binomialy
distributed and the release probability per site is governed by the
Fuhrmann et al. (2002) model. The solution of the differential
equations is taken from Maass and Markram 2002 [3].
The connection weight is interpreted as the maximal weight that can
be obtained if all n release sites are activated.
Parameters:
The following parameters can be set in the status dictionary:
U double - Maximal fraction of available resources [0,1],
default=0.5
u double - available fraction of resources [0,1], default=0.5
p double - probability that a vesicle is available, default = 1.0
n long - total number of release sites, default = 1
a long - number of available release sites, default = n
tau_rec double - time constant for depression in ms, default=800 ms
tau_rec double - time constant for facilitation in ms, default=0 (off)
References:
[1] Fuhrmann, G., Segev, I., Markram, H., & Tsodyks, M. V. (2002). Coding of
information by activity-dependent synapses. Journal of Neurophysiology.
[2] Loebel, A., Silberberg, G., Helbig, D., Markram, H., Tsodyks,
M. V, & Richardson, M. J. E. (2009). Multiquantal release underlies
the distribution of synaptic efficacies in the neocortex. Frontiers
in Computational Neuroscience, 3:27. doi:10.3389/neuro.10.027.
[3] Maass, W., & Markram, H. (2002). Synapses as dynamic memory buffers.
'''
import nest
import nest.voltage_trace
import numpy
import pylab
nest.ResetKernel()
'''
On average, the quantal_stp_synapse converges to the tsodyks2_synapse,
so we can compare the two by running multiple trials.
First we define the number of trials as well as the number of release sites.
'''
n_syn = 10.0 # number of synapses in a connection
n_trials = 100 # number of measurement trials
'''
Next, we define parameter sets for facilitation
'''
fac_params = {"U": 0.02, "u": 0.02, "tau_fac": 500.,
"tau_rec": 200., "weight": 1.}
'''
Then, we assign the parameter set to the synapse models
'''
t1_params = fac_params # for tsodyks2_synapse
t2_params = t1_params.copy() # for quantal_stp_synapse
t1_params['x'] = t1_params['U']
t2_params['n'] = n_syn
'''
To make the responses comparable, we have to scale the weight by the number of
synapses.
'''
t2_params['weight'] = 1. / n_syn
'''
Next, we chage the defaults of the various models to our parameters.
'''
nest.SetDefaults("tsodyks2_synapse", t1_params)
nest.SetDefaults("quantal_stp_synapse", t2_params)
nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3.})
'''
We create three different neurons.
Neuron one is the sender, the two other neurons receive the synapses.
'''
neuron = nest.Create("iaf_psc_exp", 3)
'''
The connection from neuron 1 to neuron 2 is a deterministic synapse.
'''
nest.Connect([neuron[0]], [neuron[1]], syn_spec="tsodyks2_synapse")
'''
The connection from neuron 1 to neuron 3 has a stochastic quantal_stp_synapse.
'''
nest.Connect([neuron[0]], [neuron[2]], syn_spec="quantal_stp_synapse")
'''
The voltmeter will show us the synaptic responses in neurons 2 and 3.
'''
voltmeter = nest.Create("voltmeter", 2)
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True})
'''
One dry run to bring all synapses into their rest state.
The default initialization does not achieve this. In large network simulations
this problem does not show, but in small simulations like this,
we would see it.
'''
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
'''
Only now do we connect the voltmeter to the neurons.
'''
nest.Connect([voltmeter[0]], [neuron[1]])
nest.Connect([voltmeter[1]], [neuron[2]])
'''
This loop runs over the n_trials trials and performs a standard protocol
of a high-rate response, followed by a pause and then a recovery response.
'''
for t in range(n_trials):
nest.SetStatus([neuron[0]], "I_e", 376.0)
nest.Simulate(500.0)
nest.SetStatus([neuron[0]], "I_e", 0.0)
nest.Simulate(1000.0)
'''
Flush the last voltmeter events from the queue by simulating one time-step.
'''
nest.Simulate(.1)
'''
Extract the reference trace.
'''
vm = numpy.array(nest.GetStatus([voltmeter[1]], 'events')[0]['V_m'])
vm_reference = numpy.array(nest.GetStatus([voltmeter[0]], 'events')[0]['V_m'])
vm.shape = (n_trials, 1500)
vm_reference.shape = (n_trials, 1500)
'''
Now compute the mean of all trials and plot agains trials and references.
'''
vm_mean = numpy.array([numpy.mean(vm[:, i]) for (i, j) in enumerate(vm[0, :])])
vm_ref_mean = numpy.array([numpy.mean(vm_reference[:, i])
for (i, j) in enumerate(vm_reference[0, :])])
pylab.plot(vm_mean)
pylab.plot(vm_ref_mean)
'''
Finally, print the mean-suqared error between the trial-average and
the reference trace. The value should be < 10^-9.
'''
print (numpy.mean((vm_ref_mean - vm_mean) ** 2))
| HBPNeurorobotics/nest-simulator | pynest/examples/evaluate_quantal_stp_synapse.py | Python | gpl-2.0 | 6,275 | [
"NEURON"
] | d3d62bb934bfc908205a604aa68b0a4f49de5b29e531e16091e9dce6c528b536 |
###############################################################################
# begin : Sun Jan 8 21:24:38 BRST 2012
# copyright : (C) 2012 by Ricardo Niederberger Cabral,
# : (C) 2016 Dmitry Litvinenko
# email : ricardo dot cabral at imgseek dot net
# : anti1869@gmail.com
#
###############################################################################
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
# Undocumented members declaration moved to /docs/api.rst
# Now using Sphinx as autodoc builder for Read The Docs (as it's most popular at the moment).
# See http://sphinx-doc.org and https://readthedocs.org
import time
import logging
import os
from typing import Sequence
from sunhead.conf import settings
from isk.backends.factory import backend
from isk.urldownloader import url_to_file
logger = logging.getLogger(__name__)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # Whether this module is imported by readthedocs.org builder
if not on_rtd: # If so, disable loading any extension modules RTC can't hadle
# TODO: Check rtd stuff here
pass
# Globals
daemon_start_time = time.time()
has_shutdown = False
SUPPORTED_IMAGE_EXTENSIONS = backend.supported_image_extensions
def query_img_id(db_id: int, image_id: int, numres: int = 12, sketch: bool = False, fast: bool = False) -> tuple:
"""
Return the most similar images to the supplied one.
The supplied image must be already indexed, and is referenced by its ID.
:param db_id: Database space id.
:param image_id: Target image id.
:param numres: Number of results to return. The target image is on the result list.
:param sketch: False for photographs, True for hand-sketched images or low-resolution vector images.
:param fast: if true, only the average color for each image is considered.
Image geometry/features are ignored. Search is faster this way.
:since: 0.7
:change: 0.9.3: added parameter 'sketch'
:return: array of arrays: M{[[image id 1, score],[image id 2, score],[image id 3, score], ...]}
(id is Integer, score is Double)
"""
# TODO: Removed inefficicent balancer from here. Implement better one
results = backend.query_img_id(db_id, image_id, numres, sketch, fast)
result_tuple = tuple(results)
return result_tuple
def query_img_blob(dbId, data, numres=12, sketch=0, fast=False):
"""
Return the most similar images to the supplied one.
The target image is specified by its raw binary file data.
Most common formats are supported.
:type dbId: number
:param dbId: Database space id.
:type data: binary data
:param data: Target image file binary data.
:type numres: number
:param numres: Number of results to return. The target image is on the result list.
:type sketch: number
:param sketch: 0 for photographs, 1 for hand-sketched images or low-resolution vector images.
:type fast: boolean
:param fast: if true, only the average color for each image is considered. Image geometry/features are ignored.
Search is faster this way.
:rtype: array
:since: 0.9.3
:return: array of arrays: M{[[image id 1, score],[image id 2, score],[image id 3, score], ...]}
(id is Integer, score is Double)
"""
dbId = int(dbId)
numres = int(numres)
return backend.query_img_blob(dbId, data.data, numres, sketch, fast)
def query_img_path(dbId, path, numres=12, sketch=0, fast=False):
"""
Return the most similar images to the supplied one.
The target image is specified using it's full path on the server filesystem.
:type dbId: number
:param dbId: Database space id.
:type path: string
:param path: Target image pth on the server disk.
:type numres: number
:param numres: Number of results to return. The target image is on the result list.
:type sketch: number
:param sketch: 0 for photographs, 1 for hand-sketched images or low-resolution vector images.
:type fast: boolean
:param fast: if true, only the average color for each image is considered. Image geometry/features are ignored.
Search is faster this way.
:rtype: array
:since: 0.9.3
:return: array of arrays: M{[[image id 1, score],[image id 2, score],[image id 3, score], ...]}
(id is Integer, score is Double)
"""
dbId = int(dbId)
numres = int(numres)
return backend.query_img_path(dbId, path, numres, sketch, fast)
def add_img_blob(dbId, id, data):
"""
Add image to database space. Image data is passed directly. It is then processed and indexed.
:type dbId: number
:param dbId: Database space id.
:type id: number
:param id: Target image id. The image located on filename will be indexed and from now on should be refered
to isk-daemon as this supplied id.
:type data: binary
:param data: Image binary data
:rtype: number
:since: 0.9.3
:return: 1 in case of success.
"""
dbId = int(dbId)
id = int(id)
try:
# TODO id should be unsigned long int or something even bigger, also must review swig declarations
res = backend.add_image_blob(dbId, data.data, id)
except Exception as e:
if str(e) == 'image already in db':
logger.warn(e)
else:
logger.error(e)
return res
return res
def add_img(db_id: int, image_id: int, filename: str, file_is_url: bool = False) -> bool:
"""
Add image to database space. Image file is read, processed and indexed.
After this indexing is done, image can be removed from file system.
:param db_id: Database space id.
:param image_id: Target image id. The image located on filename will be indexed and from now on should be
refered to isk-daemon as this supplied id.
:param filename: Physical full file path for the image to be indexed.
Should be in one of the supported formats
('jpeg', 'jpg', 'gif', 'png', 'rgb', 'pbm', 'pgm', 'ppm', 'tiff', 'tif', 'rast', 'xbm', 'bmp').
For better results image should have dimension of at least 128x128. Thumbnails are ok.
Bigger images will be scaled down to 128x128.
:param file_is_url: if true, filename is interpreted as an HTTP url and the remote image
it points to downloaded and saved to a temporary location (same directory where database file is)
before being added to database.
:since: 0.7
:return: True in case of success.
"""
if file_is_url: # download it first
# TODO: May be this need to be deprecated
tempFName = os.path.expanduser(settings.core.get('database','databasePath')) + ('_tmp_%d_%d.jpg' % (db_id, image_id))
url_to_file(filename, tempFName)
filename = tempFName
res = False
try:
# TODO id should be unsigned long int or something even bigger, also must review swig declarations
res = backend.add_image(db_id, filename, image_id)
except Exception as e:
if str(e) == 'image already in db':
logger.warn(e)
else:
logger.error(e)
return res
if (file_is_url):
os.remove(filename)
return res
def remove_img(db_id: int, id: int) -> bool:
"""
Remove image from database space.
:param db_id: Database space id.
:param id: Target image id.
:since: 0.7
:return: 1 in case of success.
"""
return backend.remove_img(db_id, id)
def remove_img_bulk(dbId, ids):
"""
Neat shortcut to remove whole bunch of images from database.
:type dbId: number
:param dbId: Database space id.
:type id: list
:param idList: List of image ids.
:since: 0.10
:return: True if all images was removed.
"""
result = True
for image_id in ids:
result &= remove_img(dbId, image_id)
return result
def is_img_on_db(dbId, id):
"""
Return whether image id exists on database space.
:type dbId: number
:param dbId: Database space id.
:type id: number
:param id: Target image id.
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
id = int(id)
return backend.is_image_on_db(dbId, id)
def get_img_dimensions(dbId, id):
"""
Returns image original dimensions when indexed into database.
:type dbId: number
:param dbId: Database space id.
:type id: number
:param id: Target image id.
:rtype: array
:since: 0.7
:return: array in the form M{[width, height]}
"""
dbId = int(dbId)
id = int(id)
return backend.get_image_dimensions(dbId, id)
def calc_img_avgl_diff(dbId, id1, id2):
"""
Return average luminance (over three color channels) difference ratio
:type dbId: number
:param dbId: Database space id.
:type id1: number
:param id1: Target image 1 id.
:type id2: number
:param id2: Target image 2 id.
:rtype: number
:since: 0.7
:return: float representing difference. The smaller, the most similar.
"""
dbId = int(dbId)
id1 = int(id1)
id2 = int(id2)
return backend.calc_avgl_diff(dbId, id1, id2)
def calc_img_diff(dbId, id1, id2):
"""
Return image similarity difference ratio. One value alone for an image pair doesn't mean much.
These values should be compared pairwise against each other.
The smaller the value between two images is (i.e. the more negative the value is),
the more similar the 2 images are.
Comparing one image against itself is a degenerate case and the value returned should be ignored.
:type dbId: number
:param dbId: Database space id.
:type id1: number
:param id1: Target image 1 id.
:type id2: number
:param id2: Target image 2 id.
:rtype: number
:since: 0.7
:return: float representing difference. The smaller, the most similar.
"""
dbId = int(dbId)
id1 = int(id1)
id2 = int(id2)
return backend.calc_diff(dbId, id1, id2)
def get_img_avgl(dbId, id):
"""
Return image average color levels on the three color channels (YIQ color system)
:type dbId: number
:param dbId: Database space id.
:type id: number
:param id: Target image id.
:rtype: array of double
:since: 0.7
:return: values for YIQ color channels
"""
dbId = int(dbId)
id1 = int(id)
return backend.get_image_avgl(dbId, id1)
def get_db_img_id_list(db_id: int) -> tuple:
"""
Return list of image ids on database space.
:since: 0.7
:param db_id: Database space id.
:return: array of image ids
"""
result = backend.get_img_id_list(db_id)
return result
def add_keyword_img(db_id: int, image_id: int, keyword_id: int) -> bool:
"""
Adds a keyword to an image.
:param db_id: Database space id.
:param image_id: Target image id.
:param keyword_id: Keyword id.
:rtype: boolean
:since: 0.7
:return: True if operation was succesful
"""
return backend.add_keyword_img(db_id, image_id, keyword_id)
def add_keyword_img_bulk(dbId, data):
"""
Adds keywords to images in a bulk. You pass data as dict when keywords as keys and list of image id as values::
{
keyword1_id: [img1_id, img2_id],
keyword2_id: [img1_id, img3_id],
...
}
This will save some network overhead.
:param dbId: id of the image database to use.
:param data: Keywords and list of image id in described format.
:since: 0.10
:return: True if all operations was successfull
"""
result = True
for keyword, id_list in data.items():
# Convert keyword to int
try:
keyword_id = int(keyword)
except ValueError:
result &= False
continue
# Assign that keyword to images
for img in id_list:
try:
img_id = int(img)
except ValueError:
result &= False
continue
result &= add_keyword_img(dbId, img_id, keyword_id)
return bool(result)
def get_ids_bloom_filter(dbId):
"""
Return bloom filter containing all images on given db id.
:type dbId: number
:param dbId: Database space id.
:rtype: bloom filter
:since: 0.7
:return: bloom filter containing all images on given db id.
"""
dbId = int(dbId)
return backend.getIdsBloomFilter(dbId)
def get_cluster_keywords(dbId, numClusters, keywords):
"""
Return whether image id exists on database space.
:type dbId: number
:param dbId: Database space id.
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
return backend.get_cluster_keywords(dbId, numClusters, keywords)
def get_cluster_db(dbId, numClusters):
"""
Return whether image id exists on database space.
:type dbId: number
:param dbId: Database space id.
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
return backend.get_cluster_db(dbId, numClusters)
def get_keywords_popular(dbId, numres):
"""
Return whether image id exists on database space.
:type dbId: number
:param dbId: Database space id.
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
return backend.get_keywords_popular(dbId, numres)
def get_keywords_visual_distance(dbId, distanceType, keywords):
"""
Return whether image id exists on database space.
:type dbId: number
:param dbId: Database space id.
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
return backend.get_keywords_visual_distance(dbId, distanceType, keywords)
# TODO: Add defaults here and explicit kw_join_type
def get_all_imgs_by_keywords(db_id, numres: int, kw_join_type: int, keyword_id_list: Sequence[int]) -> list:
"""
Return all images with the given keywords
:param db_id: Database space id.
:param numres: Number of results desired
:param kw_join_type: Logical operator for target keywords: 1 for AND, 0 for OR
:param keyword_id_list: List of keyword ids
:rtype: array
:since: 0.7
:return: array of image ids
"""
if not keyword_id_list:
keyword_id_list = (0,)
return backend.get_all_imgs_by_keywords(db_id, numres, kw_join_type, keyword_id_list)
def query_img_id_fast_keywords(dbId, imgId, numres, kwJoinType, keywords):
"""
Fast query (only considers average color) for similar images considering keywords
:type dbId: number
:param dbId: Database space id.
:type imgId: number
:param imgId: Target image id. If '0', random images containing the target keywords will be returned.
:type numres: number
:param numres: Number of results desired
:type kwJoinType: number
:param kwJoinType: logical operator for keywords: 1 for AND, 0 for OR
:type keywords: string
:param keywords: comma separated list of keyword ids.
:rtype: array
:since: 0.7
:return: array of arrays: M{[[image id 1, score],[image id 2, score],[image id 3, score], ...]}
(id is Integer, score is Double)
"""
dbId = int(dbId)
imgId = int(imgId)
keywordIds = [int(x) for x in keywords.split(',') if len(x) > 0]
return backend.query_img_id_fast_keywords(dbId, imgId, numres, kwJoinType, keywords)
def query_img_id_keywords(dbId, imgId, numres, kwJoinType, keywords):
"""
Query for similar images considering keywords. The input keywords are used for narrowing the
search space.
:type dbId: number
:param dbId: Database space id.
:type imgId: number
:param imgId: Target image id. If '0', random images containing the target keywords will be returned.
:type numres: number
:param numres: Number of results desired
:type kwJoinType: number
:param kwJoinType: logical operator for keywords: 1 for AND, 0 for OR
:type keywords: string
:param keywords: comma separated list of keyword ids.
:rtype: array
:since: 0.7
:return: array of arrays: M{[[image id 1, score],[image id 2, score],[image id 3, score], ...]}
(id is Integer, score is Double)
"""
dbId = int(dbId)
imgId = int(imgId)
keywordIds = [int(x) for x in keywords.split(',') if len(x) > 0]
return backend.query_img_id_keywords(dbId, imgId, numres, kwJoinType, keywordIds)
def query_img_id_keywords_bulk(dbId, imgKwList, numres, kwJoinType):
"""
Shortcut for querying for similar images considering keywords in bulk. You pass list of tuples::
[
(img1_id, 'kw1_id,kw2_id'),
(img2_id, 'kw3_id'),
...
]
In return you get list of results::
[
(img1_id, [(img2_id, img2_score), (img5_id, img5_score), ...]),
(img2_id, [(img16_id, img16_score), ...]),
]
This will save you some network overhead over calling queryImgIDKeywords one-by-one.
:type dbId: number
:param dbId: Database space id
:type imgKwList: tuple
:param imgKwList: List of queries in described format. Keywords is a comma-separated list of keyword ids.
:type numres: number
:param numres: Number of results desired
:type kwJoinType: number
:param kwJoinType: logical operator for keywords: 1 for AND, 0 for OR
:since: 0.10
:return: List of image ids and corresponding results in format, described above.
"""
total_results = []
for image_id, keywords in imgKwList:
query_result = query_img_id_keywords(dbId, image_id, numres, kwJoinType, keywords)
total_results.append((image_id, query_result))
return total_results
def most_popular_keywords(dbId, imgs, excludedKwds, count, mode):
"""
Returns the most frequent keywords associated with a given set of images
:type dbId: number
:param dbId: Database space id.
:type imgs: string
:param imgs: Comma separated list of target image ids
:type excludedKwds: string
:param excludedKwds: Comma separated list of keywords ids to be excluded from the frequency count
:type count: number
:param count: Number of keyword results desired
:type mode: number
:param mode: ignored, will be used on future versions.
:rtype: array
:since: 0.7
:return: array of keyword ids and frequencies: [kwd1_id, kwd1_freq, kwd2_id, kwd2_freq, ...]
"""
dbId = int(dbId)
excludedKwds = [int(x) for x in excludedKwds.split(',') if len(x) > 0]
imgs = [int(x) for x in imgs.split(',') if len(x) > 0]
return backend.most_popular_keywords(dbId, imgs, excludedKwds, count, mode)
def get_keywords_img(dbId, imgId):
"""
Returns all keywords currently associated with an image.
:type dbId: number
:param dbId: Database space id.
:type imgId: number
:param imgId: Target image id.
:rtype: array
:since: 0.7
:return: array of keyword ids
"""
dbId = int(dbId)
imgId = int(imgId)
return backend.get_keywords_img(dbId, imgId)
def remove_all_keyword_img(db_id: int, image_id: int) -> bool:
"""
Remove all keyword associations this image has.
Known issue: keyword based queries will continue to consider the image to be associated
to this keyword until the database is saved and restored.
:param db_id: Database space id.
:param image_id: Target image id.
:rtype: boolean
:since: 0.7
:return: true if operation succeeded
"""
return backend.remove_all_keywords_img(db_id, image_id)
def remove_all_keyword_img_bulk(dbId, imgIdList):
"""
Remove all keyword associations for all images in list.
This is just convenience shortcut for removeAllKeywordImg being called in loop.
Saves network overhead, though.
:type dbId: number
:param dbId: Database space id.
:type imgIdList: list
:param imgIdList: List of target image id.
:rtype: boolean
:since: 0.10
:return: True if all calls succeeded
"""
result = True
for img_id in imgIdList:
result &= remove_all_keyword_img(dbId, img_id)
return result
def remove_keyword_img(db_id: int, image_id: int, keyword_id: int) -> bool:
"""
Remove the association of a keyword to an image
Known issue: keyword based queries will continue to consider the image to be associated to this
keyword until the database is saved and restored.
:param db_id: Database space id.
:param image_id: Target image id.
:param keyword_id: Keyword id.
:since: 0.7
:return: True if operation succeeded
"""
return backend.remove_keyword_img(db_id, image_id, keyword_id)
def add_keywords_img(dbId, imgId, hashes):
"""
Associate keywords to image
:type dbId: number
:param dbId: Database space id.
:type imgId: number
:param imgId: Target image id.
:type hashes: list of number
:param hashes: Keyword hashes to associate
:rtype: boolean
:since: 0.7
:return: true if image id exists
"""
dbId = int(dbId)
imgId = int(imgId)
return backend.add_keywords_img(dbId, imgId, hashes)
def add_dir(db_id: int, path: str, recursive: bool, fname_as_id: bool = False) -> bool:
"""
Visits a directory recursively and add supported images into database space.
:param db_id: Database space id.
:param path: Target filesystem full path of the initial dir.
:param recursive: True if should visit recursively
:param fname_as_id: Whether to use file names as id. If false, id will be assigned automatically.
:since: 0.7
:return: count of images succesfully added
"""
added_count = backend.add_dir(db_id, path, recursive, fname_as_id)
return added_count
exporting = (
query_img_id,
add_img,
remove_img,
remove_img_bulk,
is_img_on_db,
get_img_dimensions,
calc_img_avgl_diff,
calc_img_diff,
get_img_avgl,
get_db_img_id_list,
add_dir,
add_keyword_img,
add_keywords_img,
add_keyword_img_bulk,
remove_keyword_img,
remove_all_keyword_img,
remove_all_keyword_img_bulk,
get_keywords_img,
query_img_id_keywords,
query_img_id_keywords_bulk,
query_img_id_fast_keywords,
get_all_imgs_by_keywords,
get_keywords_visual_distance,
get_keywords_popular,
get_ids_bloom_filter,
most_popular_keywords,
query_img_blob,
query_img_path,
add_img_blob,
get_cluster_db,
get_cluster_keywords,
)
| anti1869/isk | src/isk/api/images.py | Python | gpl-3.0 | 23,743 | [
"VisIt"
] | 92522c247e395af5ca2668d05b32c2633a327f14867dab8bd66199b462672159 |
"""This module creates a Pygame surface from a source surface that
has "end caps" on its corners. The caps remain unscaled in the
destination surface and the rest is scaled/tiled.
This was inspired by Android's NinePatch and iOS'
resizableImageWithCapInsets
"""
import pygame
AUTHOR = 'Brian Hammond <brian@fictorial.com>'
LICENSE = 'MIT'
COPYRIGHT = 'Copyright (C) 2012 Fictorial LLC'
__version__ = '1.0.0'
def resize_with_caps(src, dst_size, cap_insets=None, grow='scale'):
"""Stretch nine-grid source surface to surface of desired size.
src
The source surface.
dst_size
The destination surface size (width, height). If height is
0 maintains aspect ratio of source surface.
cap_insets
The size of each of the 4 end caps (left, top, right,
bottom).
If None, the left and right end caps are taken as 1/2 the
source surface width; and, the top and bottom end caps are
taken as 1/2 the source surface height. In this case it's
expected that the center stretchy part is 1x1 pixel.
grow
The method used to grow portions of the source image that
are not end caps. The default is 'scale' which means the
relevant source surface portions are scaled before being
copied to the destination surface. The other option is
'tile' which instead tiles the relevant source surface
portions into the destination surface.
Source and destination surfaces are laid out as follows.
A B C
D E F
G H I
A, C, G, and I are the end caps; B and H stretch horizontally;
D and F stretch vertically; and E stretches in both directions.
Returns the destination surface.
"""
# s:source, d:destination,
# c:cap, m:middle/stretchable portion
# l:left, t:top, b:bottom, r:right
# w:width, h:height
sw, sh = src.get_size()
if cap_insets is None:
assert sw % 2 == 1 and sh % 2 == 1
cl, cr = sw // 2
ct, cb = sh // 2
else:
cl, ct, cr, cb = cap_insets
dw, dh = dst_size
if dh == 0:
dh = int(sh * dw / float(sw))
dst = pygame.surface.Surface((dw, dh), pygame.SRCALPHA, 32)
smw = sw - cl - cr
smh = sh - cb - ct
dmw = dw - cl - cr
dmh = dh - cb - ct
r = pygame.Rect
# render caps: A, C, G, I in that order
dst.blit(src, r(0, 0, cl, ct), r(0, 0, cl, ct))
dst.blit(src, r(dw - cr, 0, cr, ct), r(sw - cr, 0, cr, ct))
dst.blit(src, r(0, dh - cb, cl, cb), r(0, sh - cb, cl, cb))
dst.blit(src, r(dw - cr, dh - cb, cr, cb), r(sw - cr, sh - cb, cr, cb))
# extract subsurfaces from src for growable portions
B = src.subsurface(r(cl, 0, smw, ct))
D = src.subsurface(r(0, ct, cl, smh))
E = src.subsurface(r(cl, ct, smw, smh))
F = src.subsurface(r(sw - cr, ct, cr, smh))
H = src.subsurface(r(cl, sh - cb, smw, cb))
if grow == 'scale' or grow == 'stretch':
sc = pygame.transform.smoothscale
dst.blit(sc(B, (dmw, ct)), (cl, 0))
dst.blit(sc(D, (cl, dmh)), (0, ct))
dst.blit(sc(E, (dmw, dmh)), (cl, ct))
dst.blit(sc(F, (cr, dmh)), (dw - cr, ct))
dst.blit(sc(H, (dmw, cb)), (cl, dh - cb))
elif grow == 'tile':
n_across = dmw // smw
rem_px_across = dmw - n_across * smw
n_down = dmh // smh
rem_px_down = dmh - n_down * smh
def render_across(tile, y, h):
x = cl
for i in range(int(n_across)):
dst.blit(tile, (x, y))
x += smw
if rem_px_across > 0:
dst.blit(tile, (x, y), r(0, 0, rem_px_across, h))
render_across(B, 0, ct)
render_across(H, dh - smh, cb)
def render_down(tile, x, w):
y = ct
for i in range(int(n_down)):
dst.blit(tile, (x, y))
y += smh
if rem_px_down > 0:
dst.blit(tile, (x, y), r(0, 0, w, rem_px_down))
render_down(D, 0, cl)
render_down(F, dw - smw, cr)
y = ct
for i in range(int(n_down)):
render_across(E, y, smh)
y += smh
if rem_px_down > 0:
x = cl
for i in range(int(n_across)):
dst.blit(E, (x, y), r(0, 0, smw, rem_px_down))
x += smw
if rem_px_across > 0:
dst.blit(E, (x, y), r(0, 0, rem_px_across, rem_px_down))
return dst
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 800))
title = 'L: sources; R: stretch, tile, tile w/ leftovers, stretched button'
pygame.display.set_caption(title)
template = pygame.image.load('template.png').convert_alpha()
template_cap_insets = (24, 24, 24, 24)
template_tiled = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'tile')
template_tiled1 = resize_with_caps(template, (24 * 7 + 4, 24 * 6 + 6),
template_cap_insets, 'tile')
template_stretched = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'stretch')
#button = pygame.image.load('button.png').convert_alpha()
#button_stretched = resize_with_caps(button, (450, 120), (10, 9), 'scale')
button = pygame.image.load('textfield.png').convert_alpha()
button_cap_insets = (1, 6, 1, 4)
button_stretched = resize_with_caps(button, (450, 120),
button_cap_insets, 'scale')
clock = pygame.time.Clock()
running = True
while running:
dt = clock.tick(4) / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
if not running:
break
screen.fill((255, 255, 255))
screen.blit(template, (10, 10))
screen.blit(template_stretched, (150, 10))
screen.blit(template_tiled, (150, 24 * 9 + 20))
screen.blit(template_tiled1, (150, 2 * 24 * 9 + 30))
screen.blit(button, (10, 640))
screen.blit(button_stretched, (150, 640))
pygame.display.flip()
| fictorial/pygame-capresize | capresize.py | Python | mit | 6,358 | [
"Brian"
] | 70b2303f935685a276694cb946e192bb21d1b49b432d3a9b6958ef4025191a26 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import linalg
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import mvn_linear_operator as mvn_linop
from tensorflow.python.framework import ops
__all__ = [
"MultivariateNormalDiagPlusLowRank",
]
class MultivariateNormalDiagPlusLowRank(
mvn_linop.MultivariateNormalLinearOperator):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`scale` matrix; `covariance = scale @ scale.T` where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,
y = inv(scale) @ (x - loc),
Z = (2 pi)**(0.5 k) |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||**2` denotes the squared Euclidean norm of `y`.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
Y = scale @ X + loc
```
#### Examples
```python
ds = tf.contrib.distributions
# Initialize a single 3-variate Gaussian with covariance `cov = S @ S.T`,
# `S = diag(d) + U @ diag(m) @ U.T`. The perturbation, `U @ diag(m) @ U.T`, is
# a rank-2 update.
mu = [-0.5., 0, 0.5] # shape: [3]
d = [1.5, 0.5, 2] # shape: [3]
U = [[1., 2],
[-1, 1],
[2, -0.5]] # shape: [3, 2]
m = [4., 5] # shape: [2]
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu
scale_diag=d
scale_perturb_factor=U,
scale_perturb_diag=m)
# Evaluate this on an observation in `R^3`, returning a scalar.
mvn.prob([-1, 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians; `S = diag(d) + U @ U.T`.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [b, k] = [2, 3]
U = [[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1,0, 0.25],
[1.5, 1.25]]] # shape: [b, k, r] = [2, 3, 2]
m = [[0.1, 0.2],
[0.4, 0.5]] # shape: [b, r] = [2, 2]
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=U,
scale_perturb_diag=m)
mvn.covariance().eval() # shape: [2, 3, 3]
# ==> [[[ 15.63 31.57 48.51]
# [ 31.57 69.31 105.05]
# [ 48.51 105.05 162.59]]
#
# [[ 2.59 1.41 3.35]
# [ 1.41 2.71 3.34]
# [ 3.35 3.34 8.35]]]
# Compute the pdf of two `R^3` observations (one from each batch);
# return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusLowRank"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = scale @ scale.T`. A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier ones(k)) +
scale_perturb_factor @ diag(scale_perturb_diag) @ scale_perturb_factor.T
```
where:
* `scale_diag.shape = [k]`,
* `scale_identity_multiplier.shape = []`,
* `scale_perturb_factor.shape = [k, r]`, typically `k >> r`, and,
* `scale_perturb_diag.shape = [r]`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
scale_perturb_factor: Floating-point `Tensor` representing a rank-`r`
perturbation added to `scale`. May have shape `[B1, ..., Bb, k, r]`,
`b >= 0`, and characterizes `b`-batches of rank-`r` updates to `scale`.
When `None`, no rank-`r` update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing a diagonal matrix
inside the rank-`r` perturbation added to `scale`. May have shape
`[B1, ..., Bb, r]`, `b >= 0`, and characterizes `b`-batches of `r x r`
diagonal matrices inside the perturbation added to `scale`. When
`None`, an identity matrix is used inside the perturbation. Can only be
specified if `scale_perturb_factor` is also specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
def _convert_to_tensor(x, name):
return None if x is None else ops.convert_to_tensor(x, name=name)
with ops.name_scope(name):
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier, scale_perturb_factor,
scale_perturb_diag]):
has_low_rank = (scale_perturb_factor is not None or
scale_perturb_diag is not None)
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=validate_args,
assert_positive=has_low_rank)
scale_perturb_factor = _convert_to_tensor(
scale_perturb_factor,
name="scale_perturb_factor")
scale_perturb_diag = _convert_to_tensor(
scale_perturb_diag,
name="scale_perturb_diag")
if has_low_rank:
scale = linalg.LinearOperatorLowRankUpdate(
scale,
u=scale_perturb_factor,
diag_update=scale_perturb_diag,
is_diag_update_positive=scale_perturb_diag is None,
is_non_singular=True, # Implied by is_positive_definite=True.
is_self_adjoint=True,
is_positive_definite=True,
is_square=True)
super(MultivariateNormalDiagPlusLowRank, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| dyoung418/tensorflow | tensorflow/contrib/distributions/python/ops/mvn_diag_plus_low_rank.py | Python | apache-2.0 | 9,729 | [
"Gaussian"
] | b6711f901630a54d9d20741ceb0a53783a5c8c1dbea1e310fd8204565fd2e20a |
# ----------------------------------------------------------------------
# LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
# http://lammps.sandia.gov, Sandia National Laboratories
# Steve Plimpton, sjplimp@sandia.gov
#
# Copyright (2003) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
#
# See the README file in the top-level LAMMPS directory.
# -------------------------------------------------------------------------
# ----------------------------------------------------------------------
# Contributing author: Nicholas Lubbers (LANL)
# -------------------------------------------------------------------------
import numpy as np
import torch
def calc_n_params(model):
return sum(p.nelement() for p in model.parameters())
class TorchWrapper(torch.nn.Module):
def __init__(self, model,n_descriptors,n_elements,n_params=None,device=None,dtype=torch.float64):
super().__init__()
self.model = model
self.device = device
self.dtype = dtype
# Put model on device and convert to dtype
self.to(self.dtype)
self.to(self.device)
if n_params is None:
n_params = calc_n_params(model)
self.n_params = n_params
self.n_descriptors = n_descriptors
self.n_elements = n_elements
def forward(self, elems, bispectrum, beta, energy):
bispectrum = torch.from_numpy(bispectrum).to(dtype=self.dtype, device=self.device).requires_grad_(True)
elems = torch.from_numpy(elems).to(dtype=torch.long, device=self.device) - 1
with torch.autograd.enable_grad():
energy_nn = self.model(bispectrum, elems)
if energy_nn.ndim > 1:
energy_nn = energy_nn.flatten()
beta_nn = torch.autograd.grad(energy_nn.sum(), bispectrum)[0]
beta[:] = beta_nn.detach().cpu().numpy().astype(np.float64)
energy[:] = energy_nn.detach().cpu().numpy().astype(np.float64)
class IgnoreElems(torch.nn.Module):
def __init__(self,subnet):
super().__init__()
self.subnet = subnet
def forward(self,bispectrum,elems):
return self.subnet(bispectrum)
| rbberger/lammps | python/lammps/mliap/pytorch.py | Python | gpl-2.0 | 2,356 | [
"LAMMPS"
] | b7ee4c982ecbfa0797a5477ef6aba9cd871795964aa0518098b34703a590bf2e |
"""SyGMa: Systematically Generating potential Metabolites"""
from builtins import str
import argparse
import sygma
import sys
from rdkit import Chem, RDLogger
RDLogger.logger().setLevel(RDLogger.ERROR)
import logging
logging.basicConfig()
logger = logging.getLogger('sygma')
def run_sygma(args, file=sys.stdout):
logger.setLevel(args.loglevel.upper())
scenario = sygma.Scenario([
[sygma.ruleset['phase1'], args.phase1],
[sygma.ruleset['phase2'], args.phase2]
])
parent = Chem.MolFromSmiles(args.parentmol)
metabolic_tree = scenario.run(parent)
metabolic_tree.calc_scores()
if args.outputtype == "sdf":
metabolic_tree.write_sdf(file)
elif args.outputtype == "smiles":
file.write("\n".join([m+" "+str(s) for m,s in metabolic_tree.to_smiles()])+'\n')
return None
def get_sygma_parser():
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('--version', action='version', version='%(prog)s ' + sygma.__version__)
ap.add_argument('-o', '--outputtype', help="Molecule output type (default: %(default)s)", default="sdf", type=str)
ap.add_argument('-1', '--phase1', help="Number of phase 1 cycles (default: %(default)s)", default=1, type=int)
ap.add_argument('-2', '--phase2', help="Number of phase 2 cycles (default: %(default)s)", default=1, type=int)
ap.add_argument('-l', '--loglevel', help="Set logging level (default: %(default)s)", default='info',
choices=['debug', 'info', 'warn',' error'])
ap.add_argument('parentmol', help="Smiles string of parent molecule structure", type=str)
return ap
def main():
"""Entry point for magma script"""
# Parse arguments and run subcommand
ap = get_sygma_parser()
args = ap.parse_args(sys.argv[1:])
return run_sygma(args)
if __name__ == "__main__":
main()
| 3D-e-Chem/sygma | sygma/script/__init__.py | Python | gpl-3.0 | 1,855 | [
"RDKit"
] | ba4d2a3ec04659c8a97a48a97684139b30a3d62ea84ea08225c6d2a0cf869257 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This package contains all battery-related application classes, including
representations of InsertionElectrodes and ConversionElectrodes.
"""
| vorwerkc/pymatgen | pymatgen/apps/battery/__init__.py | Python | mit | 240 | [
"pymatgen"
] | bd47d8926c67427fad03da70ebd9f9c8139413fb5738dce3071b925406ceb909 |
import logging
import numpy as np
import pytest
import nengo
from nengo.utils.functions import piecewise
from nengo.utils.numpy import filtfilt
from nengo.utils.testing import Plotter, allclose
logger = logging.getLogger(__name__)
def test_args(nl):
N = 10
d1, d2 = 3, 2
with nengo.Network(label='test_args'):
A = nengo.Ensemble(nl(N), dimensions=d1)
B = nengo.Ensemble(nl(N), dimensions=d2)
nengo.Connection(
A, B,
eval_points=np.random.normal(size=(500, d1)),
synapse=0.01,
function=np.sin,
transform=np.random.normal(size=(d2, d1)))
def test_node_to_neurons(Simulator, nl_nodirect):
name = 'node_to_neurons'
N = 30
m = nengo.Network(label=name, seed=123)
with m:
a = nengo.Ensemble(nl_nodirect(N), dimensions=1)
inn = nengo.Node(output=np.sin)
inh = nengo.Node(piecewise({0: 0, 2.5: 1}))
nengo.Connection(inn, a)
nengo.Connection(inh, a.neurons, transform=[[-2.5]]*N)
inn_p = nengo.Probe(inn, 'output')
a_p = nengo.Probe(a, 'decoded_output', synapse=0.1)
inh_p = nengo.Probe(inh, 'output')
sim = Simulator(m)
sim.run(5.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 2.5] = 0
with Plotter(Simulator, nl_nodirect) as plt:
plt.plot(t, sim.data[inn_p], label='Input')
plt.plot(t, sim.data[a_p], label='Neuron approx, synapse=0.1')
plt.plot(t, sim.data[inh_p], label='Inhib signal')
plt.plot(t, ideal, label='Ideal output')
plt.legend(loc=0, prop={'size': 10})
plt.savefig('test_connection.test_' + name + '.pdf')
plt.close()
assert np.allclose(sim.data[a_p][-10:], 0, atol=.1, rtol=.01)
def test_ensemble_to_neurons(Simulator, nl_nodirect):
name = 'ensemble_to_neurons'
N = 30
m = nengo.Network(label=name, seed=123)
with m:
a = nengo.Ensemble(nl_nodirect(N), dimensions=1)
b = nengo.Ensemble(nl_nodirect(N), dimensions=1)
inn = nengo.Node(output=np.sin)
inh = nengo.Node(piecewise({0: 0, 2.5: 1}))
nengo.Connection(inn, a)
nengo.Connection(inh, b)
nengo.Connection(b, a.neurons, transform=[[-2.5]]*N)
inn_p = nengo.Probe(inn, 'output')
a_p = nengo.Probe(a, 'decoded_output', synapse=0.1)
b_p = nengo.Probe(b, 'decoded_output', synapse=0.1)
inh_p = nengo.Probe(inh, 'output')
sim = Simulator(m)
sim.run(5.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 2.5] = 0
with Plotter(Simulator, nl_nodirect) as plt:
plt.plot(t, sim.data[inn_p], label='Input')
plt.plot(t, sim.data[a_p], label='Neuron approx, pstc=0.1')
plt.plot(
t, sim.data[b_p], label='Neuron approx of inhib sig, pstc=0.1')
plt.plot(t, sim.data[inh_p], label='Inhib signal')
plt.plot(t, ideal, label='Ideal output')
plt.legend(loc=0, prop={'size': 10})
plt.savefig('test_connection.test_' + name + '.pdf')
plt.close()
assert np.allclose(sim.data[a_p][-10:], 0, atol=.1, rtol=.01)
assert np.allclose(sim.data[b_p][-10:], 1, atol=.1, rtol=.01)
def test_neurons_to_ensemble(Simulator, nl_nodirect):
name = 'neurons_to_ensemble'
N = 20
m = nengo.Network(label=name, seed=123)
with m:
a = nengo.Ensemble(nl_nodirect(N * 2), dimensions=2)
b = nengo.Ensemble(nl_nodirect(N * 3), dimensions=3)
c = nengo.Ensemble(nl_nodirect(N), dimensions=N*2)
nengo.Connection(a.neurons, b, transform=-10 * np.ones((3, N*2)))
nengo.Connection(a.neurons, c)
a_p = nengo.Probe(a, 'decoded_output', synapse=0.01)
b_p = nengo.Probe(b, 'decoded_output', synapse=0.01)
c_p = nengo.Probe(c, 'decoded_output', synapse=0.01)
sim = Simulator(m)
sim.run(5.0)
t = sim.trange()
with Plotter(Simulator, nl_nodirect) as plt:
plt.plot(t, sim.data[a_p], label='A')
plt.plot(t, sim.data[b_p], label='B')
plt.plot(t, sim.data[c_p], label='C')
plt.savefig('test_connection.test_' + name + '.pdf')
plt.close()
assert np.all(sim.data[b_p][-10:] < 0)
def test_neurons_to_node(Simulator, nl_nodirect):
name = 'neurons_to_node'
N = 30
m = nengo.Network(label=name, seed=123)
with m:
a = nengo.Ensemble(nl_nodirect(N), dimensions=1)
out = nengo.Node(lambda t, x: x, size_in=N)
nengo.Connection(a.neurons, out, synapse=None)
a_spikes = nengo.Probe(a, 'spikes')
out_p = nengo.Probe(out, 'output')
sim = Simulator(m)
sim.run(0.6)
t = sim.trange()
with Plotter(Simulator, nl_nodirect) as plt:
ax = plt.subplot(111)
try:
from nengo.matplotlib import rasterplot
rasterplot(t, sim.data[a_spikes], ax=ax)
rasterplot(t, sim.data[out_p], ax=ax)
except ImportError:
pass
plt.savefig('test_connection.test_' + name + '.pdf')
plt.close()
assert np.allclose(sim.data[a_spikes][:-1], sim.data[out_p][1:])
def test_neurons_to_neurons(Simulator, nl_nodirect):
name = 'neurons_to_neurons'
N1, N2 = 30, 50
m = nengo.Network(label=name, seed=123)
with m:
a = nengo.Ensemble(nl_nodirect(N1), dimensions=1)
b = nengo.Ensemble(nl_nodirect(N2), dimensions=1)
inp = nengo.Node(output=1)
nengo.Connection(inp, a)
nengo.Connection(
a.neurons, b.neurons, transform=-1 * np.ones((N2, N1)))
inp_p = nengo.Probe(inp, 'output')
a_p = nengo.Probe(a, 'decoded_output', synapse=0.1)
b_p = nengo.Probe(b, 'decoded_output', synapse=0.1)
sim = Simulator(m)
sim.run(5.0)
t = sim.trange()
with Plotter(Simulator, nl_nodirect) as plt:
plt.plot(t, sim.data[inp_p], label='Input')
plt.plot(t, sim.data[a_p], label='A, represents input')
plt.plot(t, sim.data[b_p], label='B, should be 0')
plt.legend(loc=0, prop={'size': 10})
plt.savefig('test_connection.test_' + name + '.pdf')
plt.close()
assert np.allclose(sim.data[a_p][-10:], 1, atol=.1, rtol=.01)
assert np.allclose(sim.data[b_p][-10:], 0, atol=.1, rtol=.01)
def test_weights(Simulator, nl):
name = 'test_weights'
n1, n2 = 100, 50
def func(t):
return np.array([np.sin(4 * t), np.cos(12 * t)])
transform = np.array([[0.6, -0.4]])
m = nengo.Network(label=name, seed=3902)
with m:
u = nengo.Node(output=func)
a = nengo.Ensemble(nl(n1), dimensions=2, radius=1.5)
b = nengo.Ensemble(nl(n2), dimensions=1)
bp = nengo.Probe(b)
nengo.Connection(u, a)
nengo.Connection(a, b, transform=transform,
weight_solver=nengo.decoders.lstsq_L2nz)
sim = Simulator(m)
sim.run(2.)
t = sim.trange()
x = func(t).T
y = np.dot(x, transform.T)
z = filtfilt(sim.data[bp], 10, axis=0)
assert allclose(t, y.flatten(), z.flatten(),
plotter=Plotter(Simulator, nl),
filename='test_connection.' + name + '.pdf',
atol=0.1, rtol=0, buf=100, delay=10)
def test_dimensionality_errors(nl_nodirect):
N = 10
with nengo.Network(label="test_dimensionality_error"):
n01 = nengo.Node(output=[1])
n02 = nengo.Node(output=[1, 1])
n21 = nengo.Node(output=[1], size_in=2)
e1 = nengo.Ensemble(nl_nodirect(N), 1)
e2 = nengo.Ensemble(nl_nodirect(N), 2)
# these should work
nengo.Connection(n01, e1)
nengo.Connection(n02, e2)
nengo.Connection(e2, n21)
nengo.Connection(n21, e1)
nengo.Connection(e1.neurons, n21, transform=np.random.randn(2, N))
nengo.Connection(e2, e1, function=lambda x: x[0])
# these should not work
with pytest.raises(ValueError):
nengo.Connection(n02, e1)
with pytest.raises(ValueError):
nengo.Connection(e1, e2)
with pytest.raises(ValueError):
nengo.Connection(e2.neurons, e1, transform=np.random.randn(1, N+1))
with pytest.raises(ValueError):
nengo.Connection(e2.neurons, e1, transform=np.random.randn(2, N))
with pytest.raises(ValueError):
nengo.Connection(e2, e1, function=lambda x: x, transform=[[1]])
with pytest.raises(ValueError):
nengo.Connection(n21, e2, transform=np.ones((2, 2)))
# these should not work because of indexing mismatches
with pytest.raises(ValueError):
nengo.Connection(n02[0], e2)
with pytest.raises(ValueError):
nengo.Connection(n02, e2[0])
with pytest.raises(ValueError):
nengo.Connection(n02[1], e2[0], transform=[[1, 2], [3, 4]])
with pytest.raises(ValueError):
nengo.Connection(n02, e2[0], transform=[[1], [2]])
with pytest.raises(ValueError):
nengo.Connection(e2[0], e2, transform=[[1, 2]])
def test_slicing(Simulator, nl_nodirect):
name = 'connection_slicing'
N = 30
with nengo.Network(label=name):
neurons3 = nl_nodirect(3)
ens1 = nengo.Ensemble(nl_nodirect(N), dimensions=1)
ens2 = nengo.Ensemble(nl_nodirect(N), dimensions=2)
ens3 = nengo.Ensemble(nl_nodirect(N), dimensions=3)
node1 = nengo.Node(output=[0])
node2 = nengo.Node(output=[0, 0])
node3 = nengo.Node(output=[0, 0, 0])
# Pre slice with default transform -> 1x3 transform
conn = nengo.Connection(node3[2], ens1)
assert np.all(conn.transform == np.array(1))
assert np.all(conn.transform_full == np.array([[0, 0, 1]]))
# Post slice with 1x1 transform -> 1x2 transform
conn = nengo.Connection(node2[0], ens1, transform=-2)
assert np.all(conn.transform == np.array(-2))
assert np.all(conn.transform_full == np.array([[-2, 0]]))
# Post slice with 2x1 tranfsorm -> 3x1 transform
conn = nengo.Connection(node1, ens3[::2], transform=[[1], [2]])
assert np.all(conn.transform == np.array([[1], [2]]))
assert np.all(conn.transform_full == np.array([[1], [0], [2]]))
# Both slices with 2x1 transform -> 3x2 transform
conn = nengo.Connection(ens2[0], neurons3[1:], transform=[[1], [2]])
assert np.all(conn.transform == np.array([[1], [2]]))
assert np.all(conn.transform_full == np.array(
[[0, 0], [1, 0], [2, 0]]))
# Full slices that can be optimized away
conn = nengo.Connection(ens3[:], ens3, transform=2)
assert np.all(conn.transform == np.array(2))
assert np.all(conn.transform_full == np.array(2))
# Pre slice with 1x1 transform on 2x2 slices -> 2x3 transform
conn = nengo.Connection(neurons3[:2], ens2, transform=-1)
assert np.all(conn.transform == np.array(-1))
assert np.all(conn.transform_full == np.array(
[[-1, 0, 0], [0, -1, 0]]))
# Both slices with 1x1 transform on 2x2 slices -> 3x3 transform
conn = nengo.Connection(neurons3[1:], neurons3[::2], transform=-1)
assert np.all(conn.transform == np.array(-1))
assert np.all(conn.transform_full == np.array([[0, -1, 0],
[0, 0, 0],
[0, 0, -1]]))
# Both slices with 2x2 transform -> 3x3 transform
conn = nengo.Connection(node3[[0, 2]], neurons3[1:],
transform=[[1, 2], [3, 4]])
assert np.all(conn.transform == np.array([[1, 2], [3, 4]]))
assert np.all(conn.transform_full == np.array([[0, 0, 0],
[1, 0, 2],
[3, 0, 4]]))
# Both slices with 2x3 transform -> 3x3 transform... IN REVERSE!
conn = nengo.Connection(neurons3[::-1], neurons3[[2, 0]],
transform=[[1, 2, 3], [4, 5, 6]])
assert np.all(conn.transform == np.array([[1, 2, 3], [4, 5, 6]]))
assert np.all(conn.transform_full == np.array([[6, 5, 4],
[0, 0, 0],
[3, 2, 1]]))
# Both slices using lists
conn = nengo.Connection(neurons3[[1, 0, 2]], neurons3[[2, 1]],
transform=[[1, 2, 3], [4, 5, 6]])
assert np.all(conn.transform == np.array([[1, 2, 3], [4, 5, 6]]))
assert np.all(conn.transform_full == np.array([[0, 0, 0],
[5, 4, 6],
[2, 1, 3]]))
def test_shortfilter(Simulator, nl):
# Testing the case where the connection filter is < dt
m = nengo.Network()
with m:
a = nengo.Ensemble(neurons=nl(10), dimensions=1)
nengo.Connection(a, a)
b = nengo.Ensemble(neurons=nl(10), dimensions=1)
nengo.Connection(a, b)
nengo.Connection(b, a)
Simulator(m, dt=.01)
# This test passes if there are no cycles in the op graph
# We will still get a cycle if the user explicitly sets the
# filter to None
with m:
d = nengo.Ensemble(neurons=nengo.Direct(10), dimensions=1)
nengo.Connection(d, d, synapse=None)
with pytest.raises(ValueError):
Simulator(m, dt=.01)
def test_function_output_size(Simulator, nl_nodirect):
"""Try a function that outputs both 0-d and 1-d arrays"""
def bad_function(x):
return x if x > 0 else 0
model = nengo.Network(seed=9)
with model:
u = nengo.Node(output=lambda t: t - 1)
a = nengo.Ensemble(neurons=nl_nodirect(100), dimensions=1)
b = nengo.Ensemble(neurons=nl_nodirect(100), dimensions=1)
nengo.Connection(u, a)
nengo.Connection(a, b, function=bad_function)
up = nengo.Probe(u, synapse=None)
bp = nengo.Probe(b, synapse=0.03)
sim = Simulator(model)
sim.run(2.)
t = sim.trange()
x = nengo.utils.numpy.filt(sim.data[up].clip(0, np.inf), 0.03 / sim.dt)
y = sim.data[bp]
with Plotter(Simulator, nl_nodirect) as plt:
plt.plot(t, x, 'k')
plt.plot(t, y)
plt.savefig('test_connection.test_function_output_size.pdf')
plt.close()
assert np.allclose(x, y, atol=0.1)
if __name__ == "__main__":
nengo.log(debug=True)
pytest.main([__file__, '-v'])
| ZeitgeberH/nengo | nengo/tests/test_connection.py | Python | gpl-3.0 | 14,583 | [
"NEURON"
] | 22b2eb75c43cd931d467a5e0e7dafb94b93e5be5cca6c6439646b5244c012731 |
from modshogun import GMM, RealFeatures
from numpy import zeros
from numpy.ma.extras import unique
from numpy.random import randint
from kameleon_mcmc.distribution.Discrete import Discrete
from kameleon_mcmc.distribution.Gaussian import Gaussian
from kameleon_mcmc.distribution.MixtureDistribution import MixtureDistribution
from kameleon_mcmc.mcmc.samplers.StandardMetropolis import StandardMetropolis
class GMMMetropolis(StandardMetropolis):
'''
Runs StandardMetropolis for a number of iterations, performs a couple of
EM instances to fit a Gaussian Mixture Model which is subsequently used
as a static proposal distribution
'''
def __init__(self, distribution, num_components, num_sample_discard=1000,
num_samples_gmm=1000, num_samples_when_to_switch=40000, num_runs_em=1):
StandardMetropolis.__init__(self, distribution)
self.num_components = num_components
self.num_sample_discard = num_sample_discard
self.num_samples_gmm = num_samples_gmm
self.num_samples_when_to_switch = num_samples_when_to_switch
self.num_runs_em = num_runs_em
# start with empty proposal, is changed to something in adapt method
self.proposal = None
def __str__(self):
s = self.__class__.__name__ + "=["
s += "num_components=" + str(self.num_components)
s += ", num_sample_discard=" + str(self.num_sample_discard)
s += ", num_samples_gmm=" + str(self.num_samples_gmm)
s += ", num_runs_em=" + str(self.num_runs_em)
s += ", " + StandardMetropolis.__str__(self)
s += "]"
return s
def adapt(self, mcmc_chain, step_output):
# only learn the proposal once, at a pre-specified iteration
if mcmc_chain.iteration == self.num_samples_when_to_switch:
iter_no = mcmc_chain.iteration
inds = randint(iter_no - self.num_sample_discard, size=self.num_samples_gmm) + self.num_sample_discard
unique_inds = unique(inds)
self.proposal = self.fit_gmm(mcmc_chain.samples[unique_inds])
#idx_left = self.num_sample_discard
#idx_right = self.num_sample_discard + self.num_samples_gmm
#samples = mcmc_chain.samples[idx_left:idx_right]
#self.proposal = self.fit_gmm(samples)
def construct_proposal(self, y):
# fixed proposal exists from a certain iteration, return std MH otherwise
# was created in adapt method
if self.proposal is not None:
return self.proposal
else:
return StandardMetropolis.construct_proposal(self, y)
def fit_gmm(self, samples):
"""
Runs a couple of em instances on random starting points and returns
internal GMM representation of best instance
"""
features = RealFeatures(samples.T)
gmms = []
log_likelihoods = zeros(self.num_runs_em)
for i in range(self.num_runs_em):
# set up Shogun's GMM class and run em (corresponds to random
# initialisation)
gmm = GMM(self.num_components)
gmm.set_features(features)
log_likelihoods[i] = gmm.train_em()
gmms.append(gmm)
max_idx = log_likelihoods.argmax()
# construct Gaussian mixture components in internal representation
components = []
for i in range(self.num_components):
mu = gmms[max_idx].get_nth_mean(i)
Sigma = gmms[max_idx].get_nth_cov(i)
components.append(Gaussian(mu, Sigma))
# construct a Gaussian mixture model based on the best EM run
pie = gmms[max_idx].get_coef()
proposal = MixtureDistribution(components[0].dimension,
self.num_components, components,
Discrete(pie))
return proposal
| karlnapf/kameleon-mcmc | kameleon_mcmc/mcmc/samplers/GMMMetropolis.py | Python | bsd-2-clause | 3,990 | [
"Gaussian"
] | b1fe96b4394fa4a792547f98188a8f372a8988a6d2809c222a84d3189b485e57 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI templates to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_template
author: "Dave Kasberg (@dkasberg)"
short_description: Manage switch configuration using templates on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way
to execute a set of CNOS commands on a switch by evaluating the current running configuration
and executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its
filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory.
---
- name: Replace Config CLI command template with values
template:
src: demo_template.j2
dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
vlanid1: 13
slot_chassis_number1: "1/2"
portchannel_interface_number1: 100
portchannel_mode1: "active"
- name: Applying CLI commands on Switches
cnos_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Template Applied.]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
commandfile = module.params['commandfile']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break # To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| t0mk/ansible | lib/ansible/modules/network/lenovo/cnos_template.py | Python | gpl-3.0 | 7,136 | [
"VisIt"
] | f6ad882e69c53d73b5b9759faf71619e240e3683f2e88ad9d69d84071c8a6d1a |
from __future__ import division
import functools
import numpy as np
import scipy.sparse as sp
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
np.full((10, 10), 0.5))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
@pytest.mark.parametrize("random_matrix", all_random_matrix)
def test_basic_property_of_random_matrix(random_matrix):
# Check basic properties of random matrix generation
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
check_zero_mean_and_unit_norm(random_matrix)
@pytest.mark.parametrize("random_matrix", all_sparse_random_matrix)
def test_basic_property_of_sparse_random_matrix(random_matrix):
check_input_with_sparse_random_matrix(random_matrix)
random_matrix_dense = functools.partial(random_matrix, density=1.0)
check_zero_mean_and_unit_norm(random_matrix_dense)
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| vortex-ape/scikit-learn | sklearn/tests/test_random_projection.py | Python | bsd-3-clause | 14,002 | [
"Gaussian"
] | d133c22d005ee551be2f8b3f814987354b52f7c9e4e4d6c6ab493733ca9dc9a0 |
from aces.materials.POSCAR import structure as Material
class structure(Material):
def getPOSCAR(self):
return self.directContcar()
#generated from minimized Bi4I4 tilt cell
return """POSCAR file written by OVITO
1.0
4.4448437691 0.0000000000 0.0000000000
2.2224218396 7.8117603831 0.0000000000
0.0000000000 2.9140218290 10.6656807255
Bi I
4 4
Direct
0.785974920 0.380735666 0.573405504
0.164426789 0.623831928 0.839674532
0.769837260 0.413011134 0.288448513
0.148288980 0.656107605 0.554717600
0.517726481 0.917232454 0.780036151
0.416537434 0.119610682 0.348086953
0.124091707 0.704502106 0.199127942
0.810172141 0.332341015 0.928995073
"""
def directContcar(self):
return """Bi I
1.0000000000000000
1.2100935260191532 4.2769506840430891 -0.0000000000002100
-6.9113483941381544 4.2651169123495434 -0.0693201522270231
-2.8949119255432874 0.8190681722870106 10.6394026495327889
Bi I
4 4
Cartesian
5.3710962420928592 0.7899993138004117 0.1436195283796707
-4.7433324006001136 3.6517121378367969 2.8903912576196218
3.0585140192273870 1.4443067826475851 7.7490113919129584
-7.0559147016282306 4.3060196287987855 10.4957834084168393
0.7403966147082210 2.1001803062913327 2.3048554208558620
-2.4252150981847973 2.9958385737917417 8.3345475372193807
-6.3902670151061267 4.1176853897580532 6.7091683613127611
4.7054486842779610 0.9783333778457488 3.9302339690377104
"""
def csetup(self):
from ase.dft.kpoints import ibz_points
self.bandpoints={'Gamma':[0,0,0],'Y': [0,0.5,0],'M':[ 0,0.5, 0.5],
'L':[ 0,0.67, 0.33],'X':[0.5, 0, 0 ],'Z':[0,0,.5] }
self.bandpath=['Gamma','Y','X','L','Z','M','Gamma'] | vanceeasleaf/aces | aces/materials/Bi4I4c.py | Python | gpl-2.0 | 1,891 | [
"ASE",
"OVITO"
] | 539a1800a26e2abea1702135c1e632524cd645ba71056c5abe5e0cb3dc0d92ec |
"""
This class includes all of the functions that used by MeshOperations.py for the creation of the mesh.
"""
from StandardModules import *
import sys
from PyQt4.QtGui import *
from export_geo import *
class PreMesh(DefineDomain):
"""
Once the user has selected something from the drop-downs and clicked OK these functions retrieve the layer names as well as the source files of the
layers.
"""
def getNetCDFDropDownOptions(self):
self.singleNetCDFLayerText = self.dlg.ui.singleNetCDFLayerDropDown.currentText()
self.singleNetCDFLayerIndex = self.dlg.ui.singleNetCDFLayerDropDown.findText(self.singleNetCDFLayerText)
self.singleNetCDFLayerFileName = self.dlg.ui.singleNetCDFLayerDropDown.itemData(self.singleNetCDFLayerIndex).toString()
def getShapeDropDownOptions(self):
self.domainShapefileLayerText = self.dlg.ui.domainShapefileLayerDropDown.currentText()
self.domainShapefileLayerIndex = self.dlg.ui.domainShapefileLayerDropDown.findText(self.domainShapefileLayerText)
self.domainShapefileLayerFileName = self.dlg.ui.domainShapefileLayerDropDown.itemData(self.domainShapefileLayerIndex).toString()
def getMeshingAlgorithm(self):
self.meshingAlgorithmText = self.dlg.ui.meshingAlgorithmDropDown.currentText()
"""
Uses getGeoFile to convert the given domain Shapefile layer into a .geo file and edits its name.
"""
def convertShape(self):
getGeoFile(str(self.domainShapefileLayerFileName), str(self.domainShapefileLayerFileName[:-4]))
self.geoFileName = '%s.geo' % self.domainShapefileLayerFileName[:-4]
def define_bounds(self, ok):
DefineDomain.define_bounds(self, ok)
'''
Runs all the modules for id definition and runs an export module to create the geofile
Organises all the data for the id definitions and export. exports either to sphere or
plane.
'''
def runIdDef(self):
self.defID = int(str(self.dlg.ui.Default_Id.text()))
self.domainSavePath = '%s_idBoundary' % self.domainShapefileLayerFileName[:-4]
self.domainText = self.domainShapefileLayerFileName[:-4]
idText = self.dlg.ui.IdDropdown.currentText()
idIndex = self.dlg.ui.IdDropdown.findText(idText)
self.idFilePath = self.dlg.ui.IdDropdown.itemData(idIndex).toString()
self.threshold = 0.0
if self.dlg.ui.define_th.isChecked():
self.threshold = float(str(self.dlg.ui.Threshold.text()))
self.define_bounds(self.dlg.ui.grpDefID.isChecked())
# Write the Geo.
data = [self.domainData.regionIDs,self.domainData.shapes,self.boundaryIDList,self.domainData.points]
write_geo_file(self.domainSavePath,data)
"""
Retrieve the information from the drop-down boxes.
"""
def getFiles(self):
if self.dlg.ui.singleNetCDFChooseFilesRadioButton.isChecked():
self.singleNetCDFLayerFileName = self.dlg.ui.singleNetCDFChooseFilesLineEdit.text()
if ".nc" in str(self.singleNetCDFLayerFileName):
self.singleNetCDFLayerFileName = '%s' % self.singleNetCDFLayerFileName
else:
self.singleNetCDFLayerFileName = '%s.nc' % self.singleNetCDFLayerFileName
else:
self.getNetCDFDropDownOptions()
self.postviewFileName = '%s_meshing_posfile.pos' % self.singleNetCDFLayerFileName[:-3]
if self.dlg.ui.chooseGeoFileRadioButton.isChecked():
self.geoFileName = self.dlg.ui.chooseGeoFileLineEdit.text()
else:
self.getShapeDropDownOptions()
self.runIdDef()
self.geoFileName = '%s.geo' % self.domainSavePath
"""
Generates a PostView file for the use as mesh-size metric for planar domains. The three functions for the three
types of coordinate system used in NetCDFs: lat-lon, x-y, and x/y start/stop with x/y step.
"""
def writePosFile(self):
input_file = str(self.singleNetCDFLayerFileName)
output_file = str(self.postviewFileName)
# Lon-lat.
def create_pos(netcdf_file):
file = NetCDF.NetCDFFile(netcdf_file, 'r')
lon = file.variables['lon'][:]
lat = file.variables['lat'][:]
field = file.variables['z'][:, :]
pos_string = """View "background_edgelength" {\n"""
for i in range(0,len(lon)):
for j in range(0,len(lat)):
lat_p1 = lat[j]
lon_p1 = lon[i]
depth = abs(field[j][i])
# If a NetCDF has 0 value elements Gmsh will attempt to create an impossibly small mesh resulting in slow
# operation. This ensures that the .pos file created is usable.
if depth == 0:
depth = 0.001
line = "SP("+str(lon_p1)+","+str(lat_p1)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
pos_string = pos_string+"};"
return pos_string
# X/Y range.
def create_pos_xyrange(netcdf_file):
file = NetCDF.NetCDFFile(netcdf_file, 'r')
xMin = file.variables['x_range'][0]; xMax = file.variables['x_range'][1]
yMin = file.variables['y_range'][0]; yMax = file.variables['y_range'][1]
xSpace = file.variables['spacing'][0]; ySpace = file.variables['spacing'][1]
field = file.variables['z']
pos_string = """View "background_edgelength" {\n"""
y = yMax; count = 0; step = 1
xList = linspace(xMin, xMax, (1/xSpace)); yList = linspace(yMin, yMax, (1/ySpace))
while y >= yMin:
x = xMin
while x <= xMax and count < len(field):
depth = abs(field[count])
if depth == 0:
depth = 0.001
line = "SP("+str(x)+","+str(y)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
x += step*xSpace; count += step
y -= step*ySpace
pos_string = pos_string+"};"
return pos_string
# X-Y.
def create_pos_xy(netcdf_file):
# read netcdf file
file = NetCDF.NetCDFFile(netcdf_file, 'r')
x = file.variables['x'][:]
y = file.variables['y'][:]
field = file.variables['z'][:, :]
pos_string = """View "background_edgelength" {\n"""
for i in range(len(x)):
for j in range(len(y)):
y_p1 = y[j]
x_p1 = x[i]
depth = abs(field[j][i])
if depth == 0:
depth = 0.001
line = "SP("+str(x_p1)+","+str(y_p1)+",0){"+str(depth)+"};\n"
pos_string = pos_string+line
pos_string = pos_string+"};"
return pos_string
print "Writing PostView File..."
# Check the file variables so that the appropriate function can be called.
file = NetCDF.NetCDFFile(input_file, 'r')
variableNames = file.variables.keys()
if 'lon' in variableNames:
pos_string = create_pos(input_file)
elif 'x_range' in variableNames:
pos_string = create_pos_xyrange(input_file)
elif 'x' in variableNames:
pos_string = create_pos_xy(input_file)
else:
raise ErrorMessages.UnsuportedRasterVariableError(variableNames) #should work
f = open(output_file,'w')
f.write(pos_string)
f.close()
print "PostView File Written."
"""
Not in use. This functionality is now possible within RasterCalc.
Performed the calculation of the minimum of multiple NetCDF files using grdmath and imported the resulting file into QGIS
in pseudolcolour.
"""
def calculateMinimum(self):
# Get all of the active NetCDF layers.
self.activeNetCDFs = []
for layer in self.activeLayers:
if '.nc' in str(layer.source()):
self.activeNetCDFs.append([layer.name(), QVariant(str(layer.source()))])
for i in range(len(list(self.activeNetCDFs)) - 1):
# For the first iteration we need to use the top layer and the layer below and output to /tmp/tmp.tif.
if i == 0:
# Min of overlapping regions.
call (["/usr/lib/gmt/bin/grdmath", str(list(self.activeNetCDFs)[i][1].toString()), str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", "/tmp/tmp.tif"])
# After the first iteration we want to use the newly created tmp file and the next layer down.
if i > 0 and i < range(len(list(self.activeNetCDFs)) - 1)[-1]:
# Min of the newly created tmp and the next layer.
call (["/usr/lib/gmt/bin/grdmath", "/tmp/tmp.tif", str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", "/tmp/tmp.tif"])
# For the last iteration we need to convert the .tif to a .nc with the correct filename rather than tmp.tif. Uses the bottom layers name
# plus -minimum.nc.
if i == range(len(list(self.activeNetCDFs)) - 1)[-1]:
saveName = str(list(self.activeNetCDFs)[i + 1][1].toString())
saveName = saveName.replace(".nc", "-minimum.nc")
call (["/usr/lib/gmt/bin/grdmath", "/tmp/tmp.tif", str(list(self.activeNetCDFs)[i + 1][1].toString()) \
, "MIN", "=", saveName])
# If check box is selected it will add the layer to canvas as pseudocolour.
if self.dlg.ui.addLayerToCanvasCheckBox.isChecked():
# Add the layer and convert it to pseudocolour.
fileInfo = QFileInfo(saveName)
baseName = fileInfo.baseName()
self.iface.addRasterLayer(saveName, baseName)
self.qgisCanvas = qgis.utils.iface.mapCanvas()
self.activeLayers = self.qgisCanvas.layers()
for layer in self.activeLayers:
if saveName in str(layer.source()):
layer.setDrawingStyle(QgsRasterLayer.SingleBandPseudoColor)
layer.setColorShadingAlgorithm(QgsRasterLayer.PseudoColorShader)
| janhui/test_engine | release/mesh_netcdf/PreMeshingFunctions.py | Python | lgpl-2.1 | 9,107 | [
"NetCDF"
] | d0615cfd993dd7d53728ada2b59d7cd1aacbcee2206fec58b7a0fc25a8e281ce |
"""
Very simple test case to verify bok-choy integration.
"""
from bok_choy.web_app_test import WebAppTest
from edxapp_pages.lms.info import InfoPage
class InfoPageTest(WebAppTest):
"""
Test that the top-level pages in the LMS load.
"""
@property
def page_object_classes(self):
return [InfoPage]
def test_info(self):
for section_name in InfoPage.sections():
self.ui.visit('lms.info', section=section_name)
| mjg2203/edx-platform-seas | common/test/bok_choy/tests/test_info_pages.py | Python | agpl-3.0 | 463 | [
"VisIt"
] | e837668f67331abe10d6a083945ead8aa508812e14366dcc16343921d24d3513 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines the VaspInputSet abstract base class and a concrete
implementation for the parameters developed and tested by the core team
of pymatgen, including the Materials Virtual Lab, Materials Project and the MIT
high throughput project. The basic concept behind an input set is to specify
a scheme to generate a consistent set of VASP inputs from a structure
without further user intervention. This ensures comparability across
runs.
Read the following carefully before implementing new input sets:
1. 99% of what needs to be done can be done by specifying user_incar_settings
to override some of the defaults of various input sets. Unless there is an
extremely good reason to add a new set, DO NOT add one. E.g., if you want
to turn the hubbard U off, just set "LDAU": False as a user_incar_setting.
2. All derivative input sets should inherit from one of the usual MPRelaxSet or
MITRelaxSet, and proper superclass delegation should be used where possible.
In particular, you are not supposed to implement your own as_dict or
from_dict for derivative sets unless you know what you are doing.
Improper overriding the as_dict and from_dict protocols is the major
cause of implementation headaches. If you need an example, look at how the
MPStaticSet or MPNonSCFSets are constructed.
The above are recommendations. The following are UNBREAKABLE rules:
1. All input sets must take in a structure or list of structures as the first
argument.
2. user_incar_settings, user_kpoints_settings and user_<whatever>_settings are
ABSOLUTE. Any new sets you implement must obey this. If a user wants to
override your settings, you assume he knows what he is doing. Do not
magically override user supplied settings. You can issue a warning if you
think the user is wrong.
3. All input sets must save all supplied args and kwargs as instance variables.
E.g., self.my_arg = my_arg and self.kwargs = kwargs in the __init__. This
ensures the as_dict and from_dict work correctly.
"""
import abc
import glob
import os
import re
import shutil
import warnings
from copy import deepcopy
from itertools import chain
from pathlib import Path
from typing import List, Union, Optional
from zipfile import ZipFile
import numpy as np
from monty.dev import deprecated
from monty.io import zopen
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.periodic_table import Specie, Element
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.lobster import Lobsterin
from pymatgen.io.vasp.inputs import Incar, Poscar, Potcar, Kpoints, VaspInput
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
__author__ = (
"Shyue Ping Ong, Wei Chen, Will Richards, Geoffroy Hautier, " "Anubhav Jain"
)
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "May 28 2016"
MODULE_DIR = Path(__file__).resolve().parent
class VaspInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Base class representing a set of Vasp input parameters with a structure
supplied as init parameters. Typically, you should not inherit from this
class. Start from DictSet or MPRelaxSet or MITRelaxSet.
"""
@property
@abc.abstractmethod
def incar(self):
"""Incar object"""
pass
@property
@abc.abstractmethod
def kpoints(self):
"""Kpoints object"""
pass
@property
@abc.abstractmethod
def poscar(self):
"""Poscar object"""
pass
@property
def potcar_symbols(self):
"""
List of POTCAR symbols.
"""
elements = self.poscar.site_symbols
potcar_symbols = []
settings = self._config_dict["POTCAR"]
if isinstance(settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(settings[el]["symbol"] if el in settings else el)
else:
for el in elements:
potcar_symbols.append(settings.get(el, el))
return potcar_symbols
@property
def potcar(self):
"""
Potcar object.
"""
potcar = Potcar(self.potcar_symbols, functional=self.potcar_functional)
# warn if the selected POTCARs do not correspond to the chosen
# potcar_functional
for psingle in potcar:
if self.potcar_functional not in psingle.identify_potcar()[0]:
warnings.warn(
"POTCAR data with symbol {} is not known by pymatgen to\
correspond with the selected potcar_functional {}. This POTCAR\
is known to correspond with functionals {}. Please verify that\
you are using the right POTCARs!"
.format(psingle.symbol,
self.potcar_functional,
psingle.identify_potcar(mode='data')[0]),
BadInputSetWarning,
)
return potcar
@property # type: ignore
@deprecated(message="Use the get_vasp_input() method instead.")
def all_input(self):
"""
Returns all input files as a dict of {filename: vasp object}
Returns:
dict of {filename: object}, e.g., {'INCAR': Incar object, ...}
"""
return {
"INCAR": self.incar,
"KPOINTS": self.kpoints,
"POSCAR": self.poscar,
"POTCAR": self.potcar,
}
def get_vasp_input(self) -> VaspInput:
"""
Returns:
VaspInput
"""
return VaspInput(
incar=self.incar,
kpoints=self.kpoints,
poscar=self.poscar,
potcar=self.potcar,
)
def write_input(self, output_dir, make_dir_if_not_present=True, include_cif=False,
potcar_spec=False, zip_output=False):
"""
Writes a set of VASP input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
"""
if potcar_spec:
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
with zopen(os.path.join(output_dir, "POTCAR.spec"), "wt") as f:
f.write("\n".join(self.potcar_symbols))
for k, v in {"INCAR": self.incar,
"POSCAR": self.poscar,
"KPOINTS": self.kpoints
}.items():
if v is not None:
with zopen(os.path.join(output_dir, k), "wt") as f:
f.write(v.__str__())
else:
vinput = self.get_vasp_input()
vinput.write_input(output_dir, make_dir_if_not_present=make_dir_if_not_present)
cifname = ""
if include_cif:
s = vinput["POSCAR"].structure
cifname = Path(output_dir) / ("%s.cif" % re.sub(r"\s", "", s.formula))
s.to(filename=cifname)
if zip_output:
filename = self.__class__.__name__ + ".zip"
with ZipFile(filename, "w") as zip:
for file in ["INCAR", "POSCAR", "KPOINTS", "POTCAR", "POTCAR.spec", cifname]:
try:
zip.write(file)
os.remove(file)
except FileNotFoundError:
pass
def as_dict(self, verbosity=2):
"""
Args:
verbosity: Verbosity for generated dict. If 1, structure is
excluded.
Returns:
MSONable dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
def _load_yaml_config(fname):
config = loadfn(str(MODULE_DIR / ("%s.yaml" % fname)))
if "PARENT" in config:
parent_config = _load_yaml_config(config["PARENT"])
for k, v in parent_config.items():
if k not in config:
config[k] = v
elif isinstance(v, dict):
v_new = config.get(k, {})
v_new.update(v)
config[k] = v_new
return config
class DictSet(VaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
"""
def __init__(
self,
structure,
config_dict,
files_to_transfer=None,
user_incar_settings=None,
user_kpoints_settings=None,
user_potcar_settings=None,
constrain_total_magmom=False,
sort_structure=True,
potcar_functional=None,
user_potcar_functional=None,
force_gamma=False,
reduce_structure=None,
vdw=None,
use_structure_charge=False,
standardize=False,
sym_prec=0.1,
international_monoclinic=True,
):
"""
Args:
structure (Structure): The Structure to create inputs for.
config_dict (dict): The config dictionary to use.
files_to_transfer (dict): A dictionary of {filename: filepath}. This
allows the transfer of files from a previous calculation.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species. Note that in the new scheme,
ediff_per_atom and hubbard_u are no longer args. Instead, the
config_dict supports EDIFF_PER_ATOM and EDIFF keys. The former
scales with # of atoms, the latter does not. If both are
present, EDIFF is preferred. To force such settings, just supply
user_incar_settings={"EDIFF": 1e-5, "LDAU": False} for example.
The keys 'LDAUU', 'LDAUJ', 'LDAUL' are special cases since
pymatgen defines different values depending on what anions are
present in the structure, so these keys can be defined in one
of two ways, e.g. either {"LDAUU":{"O":{"Fe":5}}} to set LDAUU
for Fe to 5 in an oxide, or {"LDAUU":{"Fe":5}} to set LDAUU to
5 regardless of the input structure.
If a None value is given, that key is unset. For example,
{"ENCUT": None} will remove ENCUT from the incar settings.
user_kpoints_settings (dict or Kpoints): Allow user to override kpoints
setting by supplying a dict E.g., {"reciprocal_density": 1000}.
User can also supply Kpoints object. Default is None.
user_potcar_settings (dict: Allow user to override POTCARs. E.g.,
{"Gd": "Gd_3"}. This is generally not recommended. Default is None.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
user_potcar_functional (str): Functional to use. Default (None) is to use
the functional in the config dictionary. Valid values:
"PBE", "PBE_52", "PBE_54", "LDA", "LDA_52", "LDA_54", "PW91",
"LDA_US", "PW91_US".
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL".
vdw: Adds default parameters for van-der-Waals functionals supported
by VASP to INCAR. Supported functionals are: DFT-D2, undamped
DFT-D3, DFT-D3 with Becke-Jonson damping, Tkatchenko-Scheffler,
Tkatchenko-Scheffler with iterative Hirshfeld partitioning,
MBD@rSC, dDsC, Dion's vdW-DF, DF2, optPBE, optB88, optB86b and
rVV10.
use_structure_charge (bool): If set to True, then the public
variable used for setting the overall charge of the
structure (structure.charge) is used to set the NELECT
variable in the INCAR
Default is False (structure's overall charge is not used)
standardize (float): Whether to standardize to a primitive standard
cell. Defaults to False.
sym_prec (float): Tolerance for symmetry finding.
international_monoclinic (bool): Whether to use international convention
(vs Curtarolo) for monoclinic. Defaults True.
"""
if reduce_structure:
structure = structure.get_reduced_structure(reduce_structure)
if sort_structure:
structure = structure.get_sorted_structure()
self._structure = structure
self._config_dict = deepcopy(config_dict)
self.files_to_transfer = files_to_transfer or {}
self.constrain_total_magmom = constrain_total_magmom
self.sort_structure = sort_structure
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
self.user_incar_settings = user_incar_settings or {}
self.user_kpoints_settings = user_kpoints_settings or {}
self.user_potcar_settings = user_potcar_settings
self.vdw = vdw.lower() if vdw is not None else None
self.use_structure_charge = use_structure_charge
self.standardize = standardize
self.sym_prec = sym_prec
self.international_monoclinic = international_monoclinic
if (
self.user_incar_settings.get("KSPACING")
and user_kpoints_settings is not None
):
warnings.warn(
"You have specified KSPACING and also supplied kpoints "
"settings. KSPACING only has effect when there is no "
"KPOINTS file. Since both settings were given, pymatgen"
"will generate a KPOINTS file and ignore KSPACING."
"Remove the `user_kpoints_settings` argument to enable KSPACING.",
BadInputSetWarning,
)
if self.vdw:
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
try:
self._config_dict["INCAR"].update(vdw_par[self.vdw])
except KeyError:
raise KeyError(
"Invalid or unsupported van-der-Waals "
"functional. Supported functionals are "
"%s." % vdw_par.keys()
)
# read the POTCAR_FUNCTIONAL from the .yaml
self.potcar_functional = self._config_dict.get("POTCAR_FUNCTIONAL", "PBE")
if potcar_functional is not None and user_potcar_functional is not None:
raise ValueError(
"Received both 'potcar_functional' and "
"'user_potcar_functional arguments. 'potcar_functional "
"is deprecated."
)
if potcar_functional:
warnings.warn(
"'potcar_functional' argument is deprecated. Use "
"'user_potcar_functional' instead.",
DeprecationWarning,
)
self.potcar_functional = potcar_functional
elif user_potcar_functional:
self.potcar_functional = user_potcar_functional
# warn if a user is overriding POTCAR_FUNCTIONAL
if self.potcar_functional != self._config_dict.get("POTCAR_FUNCTIONAL"):
warnings.warn(
"Overriding the POTCAR functional is generally not recommended "
" as it significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. Note that some POTCAR symbols specified in "
"the configuration file may not be available in the selected "
"functional.",
BadInputSetWarning,
)
if self.user_potcar_settings:
warnings.warn(
"Overriding POTCARs is generally not recommended as it "
"significantly affect the results of calculations and "
"compatibility with other calculations done with the same "
"input set. In many instances, it is better to write a "
"subclass of a desired input set and override the POTCAR in "
"the subclass to be explicit on the differences.",
BadInputSetWarning,
)
for k, v in self.user_potcar_settings.items():
self._config_dict["POTCAR"][k] = v
@property
def structure(self) -> Structure:
"""
:return: Structure
"""
if self.standardize and self.sym_prec:
return standardize_structure(
self._structure,
sym_prec=self.sym_prec,
international_monoclinic=self.international_monoclinic,
)
else:
return self._structure
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
settings = dict(self._config_dict["INCAR"])
for k, v in self.user_incar_settings.items():
if v is None:
try:
del settings[k]
except KeyError:
settings[k] = v
elif k == "KSPACING" and self.user_kpoints_settings != {}:
pass # Ignore KSPACING if user_kpoints_settings are given
else:
settings[k] = v
structure = self.structure
incar = Incar()
comp = structure.composition
elements = sorted(
[el for el in comp.elements if comp[el] > 0], key=lambda e: e.X
)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
hubbard_u = settings.get("LDAU", False)
for k, v in settings.items():
if k == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, "magmom"):
mag.append(site.magmom)
elif hasattr(site.specie, "spin"):
mag.append(site.specie.spin)
elif str(site.specie) in v:
mag.append(v.get(str(site.specie)))
else:
mag.append(v.get(site.specie.symbol, 0.6))
incar[k] = mag
elif k in ("LDAUU", "LDAUJ", "LDAUL"):
if hubbard_u:
if hasattr(structure[0], k.lower()):
m = dict(
[
(site.specie.symbol, getattr(site, k.lower()))
for site in structure
]
)
incar[k] = [m[sym] for sym in poscar.site_symbols]
# lookup specific LDAU if specified for most_electroneg atom
elif most_electroneg in v.keys() and isinstance(
v[most_electroneg], dict
):
incar[k] = [
v[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols
]
# else, use fallback LDAU value if it exists
else:
incar[k] = [
v.get(sym, 0)
if isinstance(v.get(sym, 0), (float, int))
else 0
for sym in poscar.site_symbols
]
elif k.startswith("EDIFF") and k != "EDIFFG":
if "EDIFF" not in settings and k == "EDIFF_PER_ATOM":
incar["EDIFF"] = float(v) * structure.num_sites
else:
incar["EDIFF"] = float(settings["EDIFF"])
else:
incar[k] = v
has_u = hubbard_u and sum(incar["LDAUU"]) > 0
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if "LMAXMIX" not in settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar["LMAXMIX"] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar["LMAXMIX"] = 4
else:
for key in list(incar.keys()):
if key.startswith("LDAU"):
del incar[key]
if self.constrain_total_magmom:
nupdown = sum([mag if abs(mag) > 0.6 else 0 for mag in incar["MAGMOM"]])
incar["NUPDOWN"] = nupdown
if self.use_structure_charge:
incar["NELECT"] = self.nelect
# Ensure adequate number of KPOINTS are present for the tetrahedron
# method (ISMEAR=-5). If KSPACING is in the INCAR file the number
# of kpoints is not known before calling VASP, but a warning is raised
# when the KSPACING value is > 0.5 (2 reciprocal Angstrom).
# An error handler in Custodian is available to
# correct overly large KSPACING values (small number of kpoints)
# if necessary.
# if "KSPACING" not in self.user_incar_settings.keys():
if self.kpoints is not None:
if np.product(self.kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
if self.user_incar_settings.get("KSPACING", 0) > 0.5 and incar.get(
"ISMEAR", 0 == -5
):
warnings.warn(
"Large KSPACING value detected with ISMEAR = -5. Ensure that VASP "
"generates an adequate number of KPOINTS, lower KSPACING, or "
"set ISMEAR = 0",
BadInputSetWarning,
)
if all([k.is_metal for k in structure.composition.keys()]):
if incar.get("NSW", 0) > 0 and incar.get("ISMEAR", 1) < 1:
warnings.warn(
"Relaxation of likely metal with ISMEAR < 1 "
"detected. Please see VASP recommendations on "
"ISMEAR for metals.",
BadInputSetWarning,
)
return incar
@property
def poscar(self) -> Poscar:
"""
:return: Poscar
"""
return Poscar(self.structure)
@property
def nelect(self) -> float:
"""
Gets the default number of electrons for a given structure.
"""
# if structure is not sorted this can cause problems, so must take
# care to remove redundant symbols when counting electrons
site_symbols = list(set(self.poscar.site_symbols))
structure = self.structure
nelect = 0.0
for ps in self.potcar:
if ps.element in site_symbols:
site_symbols.remove(ps.element)
nelect += (
structure.composition.element_composition[ps.element] * ps.ZVAL
)
if self.use_structure_charge:
return nelect - structure.charge
else:
return nelect
@property
def kpoints(self) -> Union[Kpoints, None]:
"""
Returns a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
If KSPACING is set in user_incar_settings (or the INCAR file), no
file is created because VASP will automatically generate the kpoints.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if self.user_incar_settings.get("KSPACING") or self._config_dict["INCAR"].get(
"KSPACING"
):
if self.user_kpoints_settings == {}:
return None
settings = self.user_kpoints_settings or self._config_dict.get("KPOINTS")
if isinstance(settings, Kpoints):
return settings
# Return None if KSPACING is present in the INCAR, because this will
# cause VASP to generate the kpoints automatically
if (
self.user_incar_settings.get("KSPACING")
and self.user_kpoints_settings == {}
):
return None
# If grid_density is in the kpoints_settings use
# Kpoints.automatic_density
if settings.get("grid_density"):
return Kpoints.automatic_density(
self.structure, int(settings["grid_density"]), self.force_gamma
)
# If reciprocal_density is in the kpoints_settings use
# Kpoints.automatic_density_by_vol
elif settings.get("reciprocal_density"):
return Kpoints.automatic_density_by_vol(
self.structure, int(settings["reciprocal_density"]), self.force_gamma
)
# If length is in the kpoints_settings use Kpoints.automatic
elif settings.get("length"):
return Kpoints.automatic(settings["length"])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol "
"generation, and length : for Kpoints.automatic generation"
)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return self.__class__.__name__
def write_input(
self,
output_dir: str,
make_dir_if_not_present: bool = True,
include_cif: bool = False,
potcar_spec: bool = False,
zip_output: bool = False,
):
"""
Writes out all input to a directory.
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
"""
super().write_input(
output_dir=output_dir,
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output
)
for k, v in self.files_to_transfer.items():
with zopen(v, "rb") as fin, zopen(str(Path(output_dir) / k), "wb") as fout:
shutil.copyfileobj(fin, fout)
class MITRelaxSet(DictSet):
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
CONFIG = _load_yaml_config("MITRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MITRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPRelaxSet(DictSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPRelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPScanRelaxSet(DictSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCARs include the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
CONFIG = _load_yaml_config("MPSCANRelaxSet")
def __init__(self, structure, bandgap=0, **kwargs):
"""
Args:
structure (Structure): Input structure.
bandgap (int): Bandgap of the structure in eV. The bandgap is used to
compute the appropriate k-point density and determine the
smearing settings.
Metallic systems (default, bandgap = 0) use a KSPACING value of 0.22
and Methfessel-Paxton order 2 smearing (ISMEAR=2, SIGMA=0.2).
Non-metallic systems (bandgap > 0) use the tetrahedron smearing
method (ISMEAR=-5, SIGMA=0.05). The KSPACING value is
calculated from the bandgap via Eqs. 25 and 29 of Wisesa, McGill,
and Mueller [1] (see References). Note that if 'user_incar_settings'
or 'user_kpoints_settings' override KSPACING, the calculation from
bandgap is not performed.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional. rvv10 is the only
dispersion correction available for SCAN at this time.
**kwargs: Same as those supported by DictSet.
References:
[1] P. Wisesa, K.A. McGill, T. Mueller, Efficient generation of
generalized Monkhorst-Pack grids through the use of informatics,
Phys. Rev. B. 93 (2016) 1–10. doi:10.1103/PhysRevB.93.155109.
"""
super().__init__(structure, MPScanRelaxSet.CONFIG, **kwargs)
self.bandgap = bandgap
self.kwargs = kwargs
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations require PBE_52 or PBE_54!")
# self.kwargs.get("user_incar_settings", {
updates = {}
# select the KSPACING and smearing parameters based on the bandgap
if self.bandgap == 0:
updates["KSPACING"] = 0.22
updates["SIGMA"] = 0.2
updates["ISMEAR"] = 2
else:
rmin = 25.22 - 1.87 * bandgap # Eq. 25
kspacing = 2 * np.pi * 1.0265 / (rmin - 1.0183) # Eq. 29
# cap the KSPACING at a max of 0.44, per internal benchmarking
if kspacing > 0.44:
kspacing = 0.44
updates["KSPACING"] = kspacing
updates["ISMEAR"] = -5
updates["SIGMA"] = 0.05
# Don't overwrite things the user has supplied
if kwargs.get("user_incar_settings", {}).get("KSPACING"):
del updates["KSPACING"]
if kwargs.get("user_incar_settings", {}).get("ISMEAR"):
del updates["ISMEAR"]
if kwargs.get("user_incar_settings", {}).get("SIGMA"):
del updates["SIGMA"]
if self.vdw:
if self.vdw != "rvv10":
warnings.warn(
"Use of van der waals functionals other than rVV10 "
"with SCAN is not supported at this time. "
)
# delete any vdw parameters that may have been added to the INCAR
vdw_par = loadfn(str(MODULE_DIR / "vdW_parameters.yaml"))
for k, v in vdw_par[self.vdw].items():
try:
del self._config_dict["INCAR"][k]
except KeyError:
pass
self._config_dict["INCAR"].update(updates)
class MPMetalRelaxSet(MPRelaxSet):
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project, but with tuning for metals. Key things are a denser
k point density, and a
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"ISMEAR": 1, "SIGMA": 0.2})
self._config_dict["KPOINTS"].update({"reciprocal_density": 200})
self.kwargs = kwargs
class MPHSERelaxSet(DictSet):
"""
Same as the MPRelaxSet, but with HSE parameters.
"""
CONFIG = _load_yaml_config("MPHSERelaxSet")
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, MPHSERelaxSet.CONFIG, **kwargs)
self.kwargs = kwargs
class MPStaticSet(MPRelaxSet):
"""
Creates input files for a static calculation.
"""
def __init__(
self,
structure,
prev_incar=None,
prev_kpoints=None,
lepsilon=False,
lcalcpol=False,
reciprocal_density=100,
small_gap_multiply=None,
**kwargs
):
"""
Args:
structure (Structure): Structure from previous run.
prev_incar (Incar): Incar file from previous run.
prev_kpoints (Kpoints): Kpoints from previous run.
lepsilon (bool): Whether to add static dielectric calculation
reciprocal_density (int): For static calculations, we usually set the
reciprocal density by volume. This is a convenience arg to change
that, rather than using user_kpoints_settings. Defaults to 100,
which is ~50% more than that of standard relaxation calculations.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
if isinstance(prev_kpoints, str):
prev_kpoints = Kpoints.from_file(prev_kpoints)
self.prev_incar = prev_incar
self.prev_kpoints = prev_kpoints
self.reciprocal_density = reciprocal_density
self.kwargs = kwargs
self.lepsilon = lepsilon
self.lcalcpol = lcalcpol
self.small_gap_multiply = small_gap_multiply
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = (
Incar(self.prev_incar)
if self.prev_incar is not None
else Incar(parent_incar)
)
incar.update(
{
"IBRION": -1,
"ISMEAR": -5,
"LAECHG": True,
"LCHARG": True,
"LORBIT": 11,
"LVHAR": True,
"LWAVE": False,
"NSW": 0,
"ICHARG": 0,
"ALGO": "Normal",
}
)
if self.lepsilon:
incar["IBRION"] = 8
incar["LEPSILON"] = True
# LPEAD=T: numerical evaluation of overlap integral prevents
# LRF_COMMUTATOR errors and can lead to better expt. agreement
# but produces slightly different results
incar["LPEAD"] = True
# Note that DFPT calculations MUST unset NSW. NSW = 0 will fail
# to output ionic.
incar.pop("NSW", None)
incar.pop("NPAR", None)
if self.lcalcpol:
incar["LCALCPOL"] = True
for k in ["MAGMOM", "NUPDOWN"] + list(
self.kwargs.get("user_incar_settings", {}).keys()
):
# For these parameters as well as user specified settings, override
# the incar settings.
if parent_incar.get(k, None) is not None:
incar[k] = parent_incar[k]
else:
incar.pop(k, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if incar.get("LDAU"):
u = incar.get("LDAUU", [])
j = incar.get("LDAUJ", [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ("LDAUU", "LDAUL", "LDAUJ"):
incar.update({tag: parent_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in incar:
incar.update({"LMAXMIX": parent_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
incar["EDIFF"] = min(incar.get("EDIFF", 1), parent_incar["EDIFF"])
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
kpoints = super().kpoints
# Prefer to use k-point scheme from previous run
# except for when lepsilon = True is specified
if kpoints is not None:
if self.prev_kpoints and self.prev_kpoints.style != kpoints.style:
if (self.prev_kpoints.style == Kpoints.supported_modes.Monkhorst) and (
not self.lepsilon
):
k_div = [kp + 1 if kp % 2 == 1 else kp for kp in kpoints.kpts[0]]
kpoints = Kpoints.monkhorst_automatic(k_div)
else:
kpoints = Kpoints.gamma_automatic(kpoints.kpts[0])
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self.prev_kpoints = vasprun.kpoints
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# multiply the reciprocal density if needed
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPStaticSet, other than prev_incar
and prev_structure and prev_kpoints which are determined from
the prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPHSEBSSet(MPHSERelaxSet):
"""
Implementation of a VaspInputSet for HSE band structure computations.
Remember that HSE band structures must be self-consistent in VASP. A
band structure along symmetry lines for instance needs BOTH a uniform
grid with appropriate weights AND a path along the lines with weight 0.
Thus, the "Uniform" mode is just like regular static SCF but allows
adding custom kpoints (e.g., corresponding to known VBM/CBM) to the
uniform grid that have zero weight (e.g., for better gap estimate).
The "Gap" mode behaves just like the "Uniform" mode, however, if starting
from a previous calculation, the VBM and CBM k-points will automatically
be added to ``added_kpoints``.
The "Line" mode is just like Uniform mode, but additionally adds
k-points along symmetry lines with zero weight.
"""
def __init__(
self,
structure,
user_incar_settings=None,
added_kpoints=None,
mode="Gap",
reciprocal_density=None,
copy_chgcar=True,
kpoints_line_density=20,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
user_incar_settings (dict): A dict specifying additional incar
settings
added_kpoints (list): a list of kpoints (list of 3 number list)
added to the run. The k-points are in fractional coordinates
mode (str): "Line" - generate k-points along symmetry lines for
bandstructure. "Uniform" - generate uniform k-points grid.
reciprocal_density (int): k-point density to use for uniform mesh.
copy_chgcar (bool): Whether to copy the CHGCAR of a previous run.
kpoints_line_density (int): k-point density for high symmetry lines
**kwargs (dict): Any other parameters to pass into DictSet.
"""
super().__init__(structure, **kwargs)
self.user_incar_settings = user_incar_settings or {}
self._config_dict["INCAR"].update(
{
"NSW": 0,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISYM": 3,
"LCHARG": False,
"NELMIN": 5,
}
)
self.added_kpoints = added_kpoints if added_kpoints is not None else []
self.mode = mode
if (
not reciprocal_density
or "reciprocal_density" not in self.user_kpoints_settings
):
self.reciprocal_density = 50
else:
self.reciprocal_density = (
reciprocal_density or self.user_kpoints_settings["reciprocal_density"]
)
self.kpoints_line_density = kpoints_line_density
self.copy_chgcar = copy_chgcar
@property
def kpoints(self) -> Kpoints:
"""
:return: Kpoints
"""
kpts = [] # type: List[Union[int, float, None]]
weights = [] # type: List[Union[float, None]]
all_labels = [] # type: List[Union[str, None]]
structure = self.structure
# for both modes, include the Uniform mesh w/standard weights
grid = Kpoints.automatic_density_by_vol(structure, self.reciprocal_density).kpts
ir_kpts = SpacegroupAnalyzer(structure, symprec=0.1).get_ir_reciprocal_mesh(
grid[0]
)
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
all_labels.append(None)
# for both modes, include any user-added kpoints w/zero weight
for k in self.added_kpoints:
kpts.append(k)
weights.append(0.0)
all_labels.append("user-defined")
# for line mode only, add the symmetry lines w/zero weight
if self.mode.lower() == "line":
kpath = HighSymmKpath(structure)
frac_k_points, labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
for k in range(len(frac_k_points)):
kpts.append(frac_k_points[k])
weights.append(0.0)
all_labels.append(labels[k])
comment = (
"HSE run along symmetry lines"
if self.mode.lower() == "line"
else "HSE run on uniform grid"
)
return Kpoints(
comment=comment,
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(kpts),
kpts=kpts,
kpts_weights=weights,
labels=all_labels,
)
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self._structure = get_structure_from_prev_run(vasprun, outcar)
# note: recommend not standardizing the cell because we want to retain
# k-points
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_calc is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
if self.mode.lower() == "gap":
added_kpoints = []
bs = vasprun.get_band_structure()
vbm, cbm = bs.get_vbm()["kpoint"], bs.get_cbm()["kpoint"]
if vbm:
added_kpoints.append(vbm.frac_coords)
if cbm:
added_kpoints.append(cbm.frac_coords)
self.added_kpoints.extend(added_kpoints)
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for HSE calculations from a
directory of previous Vasp run.
Args:
prev_calc_dir (str): Directory containing the outputs
(vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPHSEBSStaticSet, other than
prev_structure which is determined from the previous calc dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNonSCFSet(MPRelaxSet):
"""
Init a MPNonSCFSet. Typically, you would use the classmethod
from_prev_calc to initialize from a previous SCF run.
"""
def __init__(
self,
structure,
prev_incar=None,
mode="line",
nedos=2001,
dedos=0.005,
reciprocal_density=100,
sym_prec=0.1,
kpoints_line_density=20,
optics=False,
copy_chgcar=True,
nbands_factor=1.2,
small_gap_multiply=None,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
prev_incar (Incar/string): Incar file from previous run.
mode (str): Line, Uniform or Boltztrap mode supported.
nedos (int): nedos parameter. Default to 2001.
dedos (float): setting nedos=0 and uniform mode in from_prev_calc,
an automatic nedos will be calculated using the total energy range
divided by the energy step dedos
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
sym_prec (float): Symmetry precision (for Uniform mode).
kpoints_line_density (int): Line density for Line mode.
optics (bool): whether to add dielectric function
copy_chgcar: Whether to copy the old CHGCAR when starting from a
previous calculation.
nbands_factor (float): Multiplicative factor for NBANDS when starting
from a previous calculation. Choose a higher number if you are
doing an LOPTICS calculation.
small_gap_multiply ([float, float]): When starting from a previous
calculation, if the gap is less than 1st index, multiply the default
reciprocal_density by the 2nd index.
**kwargs: kwargs supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
if isinstance(prev_incar, str):
prev_incar = Incar.from_file(prev_incar)
self.prev_incar = prev_incar
self.kwargs = kwargs
self.nedos = nedos
self.dedos = dedos
self.reciprocal_density = reciprocal_density
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
self.optics = optics
self.mode = mode.lower()
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
if self.mode.lower() not in ["line", "uniform", "boltztrap"]:
raise ValueError(
"Supported modes for NonSCF runs are 'Line', "
"'Uniform' and 'Boltztrap!"
)
if (self.mode.lower() != "uniform" or nedos < 2000) and optics:
warnings.warn(
"It is recommended to use Uniform mode with a high "
"NEDOS for optics calculations."
)
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{
"IBRION": -1,
"LCHARG": False,
"LORBIT": 11,
"LWAVE": False,
"NSW": 0,
"ISYM": 0,
"ICHARG": 11,
}
)
if self.mode.lower() == "uniform":
# use tetrahedron method for DOS and optics calculations
incar.update({"ISMEAR": -5, "ISYM": 2})
else:
# if line mode, can't use ISMEAR=-5; also use small sigma to avoid
# partial occupancies for small band gap materials.
# finally, explicit k-point generation (needed for bolztrap mode)
# is incompatible with ISMEAR = -5.
incar.update({"ISMEAR": 0, "SIGMA": 0.01})
incar.update(self.kwargs.get("user_incar_settings", {}))
if self.mode.lower() in "uniform":
# Set smaller steps for DOS and optics output
incar["NEDOS"] = self.nedos
if self.optics:
incar["LOPTICS"] = True
incar.pop("MAGMOM", None)
return incar
@property
def kpoints(self) -> Optional[Kpoints]:
"""
:return: Kpoints
"""
# override pymatgen kpoints if provided
user_kpoints = self.kwargs.get("user_kpoints_settings", None)
if isinstance(user_kpoints, Kpoints):
return user_kpoints
if self.mode.lower() == "line":
kpath = HighSymmKpath(self.structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density, coords_are_cartesian=False
)
kpoints = Kpoints(
comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points,
labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points),
)
elif self.mode.lower() == "boltztrap":
kpoints = Kpoints.automatic_density_by_vol(
self.structure, self.reciprocal_density
)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(
self.structure, symprec=self.sym_prec
).get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
kpoints = Kpoints(
comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts,
kpts_weights=weights,
)
else:
self._config_dict["KPOINTS"]["reciprocal_density"] = self.reciprocal_density
return super().kpoints
return kpoints
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Get a Magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i["tot"] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasprun.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"ISPIN": ispin, "NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
self.kpoints_line_density = (
self.kpoints_line_density * self.small_gap_multiply[1]
)
# automatic setting of nedos using the total energy range and the energy step dedos
if self.nedos == 0:
emax = max([eigs.max() for eigs in vasprun.eigenvalues.values()])
emin = min([eigs.min() for eigs in vasprun.eigenvalues.values()])
self.nedos = int((emax - emin) / self.dedos)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPNonSCFSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPSOCSet(MPStaticSet):
"""
An input set for running spin-orbit coupling (SOC) calculations.
"""
def __init__(
self,
structure,
saxis=(0, 0, 1),
copy_chgcar=True,
nbands_factor=1.2,
reciprocal_density=100,
small_gap_multiply=None,
magmom=None,
**kwargs
):
"""
Args:
structure (Structure): the structure must have the 'magmom' site
property and each magnetic moment value must have 3
components. eg: ``magmom = [[0,0,2], ...]``
saxis (tuple): magnetic moment orientation
copy_chgcar: Whether to copy the old CHGCAR. Defaults to True.
nbands_factor (float): Multiplicative factor for NBANDS. Choose a
higher number if you are doing an LOPTICS calculation.
reciprocal_density (int): density of k-mesh by reciprocal volume.
small_gap_multiply ([float, float]): If the gap is less than
1st index, multiply the default reciprocal_density by the 2nd
index.
magmom (list[list[float]]): Override for the structure magmoms.
**kwargs: kwargs supported by MPStaticSet.
"""
if not hasattr(structure[0], "magmom") and not isinstance(
structure[0].magmom, list
):
raise ValueError(
"The structure must have the 'magmom' site "
"property and each magnetic moment value must have 3 "
"components. eg:- magmom = [0,0,2]"
)
super().__init__(structure, reciprocal_density=reciprocal_density, **kwargs)
self.saxis = saxis
self.copy_chgcar = copy_chgcar
self.nbands_factor = nbands_factor
self.small_gap_multiply = small_gap_multiply
self.magmom = magmom
@property
def incar(self) -> Incar:
"""
:return: Incar
"""
incar = super().incar
if self.prev_incar is not None:
incar.update({k: v for k, v in self.prev_incar.items()})
# Overwrite necessary INCAR parameters from previous runs
incar.update(
{"ISYM": -1, "LSORBIT": "T", "ICHARG": 11, "SAXIS": list(self.saxis)}
)
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
# Remove magmoms from previous INCAR, since we will prefer
# the final calculated magmoms
# TODO: revisit in context of MPStaticSet incar logic
if "MAGMOM" in self.prev_incar:
del self.prev_incar["magmom"]
# Get a magmom-decorated structure
self._structure = get_structure_from_prev_run(vasprun, outcar)
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized"
" structure. copy_chgcar is enforced to be false."
)
self.copy_chgcar = False
# override magmom if provided
if self.magmom:
self._structure = self._structure.copy(
site_properties={"magmom": self.magmom}
)
# magmom has to be 3D for SOC calculation.
if hasattr(self._structure[0], "magmom"):
if not isinstance(self._structure[0].magmom, list):
self._structure = self._structure.copy(
site_properties={
"magmom": [[0, 0, site.magmom] for site in self._structure]
}
)
else:
raise ValueError(
"Neither the previous structure has magmom "
"property nor magmom provided"
)
nbands = int(np.ceil(vasprun.parameters["NBANDS"] * self.nbands_factor))
self.prev_incar.update({"NBANDS": nbands})
files_to_transfer = {}
if self.copy_chgcar:
chgcars = sorted(glob.glob(str(Path(prev_calc_dir) / "CHGCAR*")))
if chgcars:
files_to_transfer["CHGCAR"] = str(chgcars[-1])
self.files_to_transfer.update(files_to_transfer)
# multiply the reciprocal density if needed:
if self.small_gap_multiply:
gap = vasprun.eigenvalue_band_properties[0]
if gap <= self.small_gap_multiply[0]:
self.reciprocal_density = (
self.reciprocal_density * self.small_gap_multiply[1]
)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, **kwargs):
"""
Generate a set of Vasp input files for SOC calculations from a
directory of previous static Vasp run. SOC calc requires all 3
components for MAGMOM for each atom in the structure.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
**kwargs: All kwargs supported by MPSOCSet, other than structure,
prev_incar and prev_chgcar which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MPNMRSet(MPStaticSet):
"""
Init a MPNMRSet.
"""
def __init__(
self,
structure,
mode="cs",
isotopes=None,
prev_incar=None,
reciprocal_density=100,
**kwargs
):
"""
Args:
structure (Structure): Structure to compute
mode (str): The NMR calculation to run
"cs": for Chemical Shift
"efg" for Electric Field Gradient
isotopes (list): list of Isotopes for quadrupole moments
prev_incar (Incar): Incar file from previous run.
reciprocal_density (int): density of k-mesh by reciprocal
volume (defaults to 100)
**kwargs: kwargs supported by MPStaticSet.
"""
self.mode = mode
self.isotopes = isotopes if isotopes else []
super().__init__(
structure,
prev_incar=prev_incar,
reciprocal_density=reciprocal_density,
**kwargs
)
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
if self.mode.lower() == "cs":
incar.update(
{
"LCHIMAG": True,
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LNMR_SYM_RED": True,
"NELMIN": 10,
"NSLPLINE": True,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
elif self.mode.lower() == "efg":
isotopes = {ist.split("-")[0]: ist for ist in self.isotopes}
quad_efg = [
Specie(p).get_nmr_quadrupole_moment(isotopes.get(p, None))
for p in self.poscar.site_symbols
]
incar.update(
{
"ALGO": "FAST",
"EDIFF": -1.0e-10,
"ISYM": 0,
"LCHARG": False,
"LEFG": True,
"QUAD_EFG": quad_efg,
"NELMIN": 10,
"PREC": "ACCURATE",
"SIGMA": 0.01,
}
)
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
class MVLElasticSet(MPRelaxSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
"""
def __init__(self, structure, potim=0.015, **kwargs):
"""
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
kwargs:
Parameters supported by MPRelaxSet.
"""
super().__init__(structure, **kwargs)
self._config_dict["INCAR"].update({"IBRION": 6, "NFREE": 2, "POTIM": potim})
self._config_dict["INCAR"].pop("NPAR", None)
class MVLGWSet(DictSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research. This is a
flexible input set for GW calculations.
Note that unlike all other input sets in this module, the PBE_54 series of
functional is set as the default. These have much improved performance for
GW calculations.
A typical sequence is mode="STATIC" -> mode="DIAG" -> mode="GW" ->
mode="BSE". For all steps other than the first one (static), the
recommendation is to use from_prev_calculation on the preceding run in
the series.
"""
CONFIG = _load_yaml_config("MVLGWSet")
SUPPORTED_MODES = ("DIAG", "GW", "STATIC", "BSE")
def __init__(
self,
structure,
prev_incar=None,
nbands=None,
reciprocal_density=100,
mode="STATIC",
copy_wavecar=True,
nbands_factor=5,
ncores=16,
**kwargs
):
r"""
Args:
structure (Structure): Input structure.
prev_incar (Incar/string): Incar file from previous run.
mode (str): Supported modes are "STATIC" (default), "DIAG", "GW",
and "BSE".
nbands (int): For subsequent calculations, it is generally
recommended to perform NBANDS convergence starting from the
NBANDS of the previous run for DIAG, and to use the exact same
NBANDS for GW and BSE. This parameter is used by
from_previous_calculation to set nband.
copy_wavecar: Whether to copy the old WAVECAR, WAVEDER and associated
files when starting from a previous calculation.
nbands_factor (int): Multiplicative factor for NBANDS when starting
from a previous calculation. Only applies if mode=="DIAG".
Need to be tested for convergence.
ncores (int): Numbers of cores used for the calculation. VASP will alter
NBANDS if it was not dividable by ncores. Only applies if
mode=="DIAG".
**kwargs: All kwargs supported by DictSet. Typically,
user_incar_settings is a commonly used option.
"""
super().__init__(structure, MVLGWSet.CONFIG, **kwargs)
self.prev_incar = prev_incar
self.nbands = nbands
self.reciprocal_density = reciprocal_density
self.mode = mode.upper()
if self.mode not in MVLGWSet.SUPPORTED_MODES:
raise ValueError(
"%s not one of the support modes : %s"
% (self.mode, MVLGWSet.SUPPORTED_MODES)
)
self.kwargs = kwargs
self.copy_wavecar = copy_wavecar
self.nbands_factor = nbands_factor
self.ncores = ncores
@property
def kpoints(self):
"""
Generate gamma center k-points mesh grid for GW calc,
which is requested by GW calculation.
"""
return Kpoints.automatic_density_by_vol(
self.structure, self.reciprocal_density, force_gamma=True
)
@property
def incar(self):
"""
:return: Incar
"""
parent_incar = super().incar
incar = (
Incar(self.prev_incar)
if self.prev_incar is not None
else Incar(parent_incar)
)
if self.mode == "DIAG":
# Default parameters for diagonalization calculation.
incar.update({"ALGO": "Exact", "NELM": 1, "LOPTICS": True, "LPEAD": True})
elif self.mode == "GW":
# Default parameters for GW calculation.
incar.update({"ALGO": "GW0", "NELM": 1, "NOMEGA": 80, "ENCUTGW": 250})
incar.pop("EDIFF", None)
incar.pop("LOPTICS", None)
incar.pop("LPEAD", None)
elif self.mode == "BSE":
# Default parameters for BSE calculation.
incar.update({"ALGO": "BSE", "ANTIRES": 0, "NBANDSO": 20, "NBANDSV": 20})
if self.nbands:
incar["NBANDS"] = self.nbands
# Respect user set INCAR.
incar.update(self.kwargs.get("user_incar_settings", {}))
return incar
def override_from_prev_calc(self, prev_calc_dir="."):
"""
Update the input set to include settings from a previous calculation.
Args:
prev_calc_dir (str): The path to the previous calculation directory.
Returns:
The input set with the settings (structure, k-points, incar, etc)
updated using the previous VASP run.
"""
vasprun, outcar = get_vasprun_outcar(prev_calc_dir)
self.prev_incar = vasprun.incar
self._structure = vasprun.final_structure
if self.standardize:
warnings.warn(
"Use of standardize=True with from_prev_run is not "
"recommended as there is no guarantee the copied "
"files will be appropriate for the standardized "
"structure."
)
self.nbands = int(vasprun.parameters["NBANDS"])
if self.mode.upper() == "DIAG":
self.nbands = int(
np.ceil(self.nbands * self.nbands_factor / self.ncores) * self.ncores
)
# copy WAVECAR, WAVEDER (derivatives)
files_to_transfer = {}
if self.copy_wavecar:
for fname in ("WAVECAR", "WAVEDER", "WFULL"):
w = sorted(glob.glob(str(Path(prev_calc_dir) / (fname + "*"))))
if w:
if fname == "WFULL":
for f in w:
fname = Path(f).name
fname = fname.split(".")[0]
files_to_transfer[fname] = f
else:
files_to_transfer[fname] = str(w[-1])
self.files_to_transfer.update(files_to_transfer)
return self
@classmethod
def from_prev_calc(cls, prev_calc_dir, mode="DIAG", **kwargs):
"""
Generate a set of Vasp input files for GW or BSE calculations from a
directory of previous Exact Diag Vasp run.
Args:
prev_calc_dir (str): The directory contains the outputs(
vasprun.xml of previous vasp run.
mode (str): Supported modes are "STATIC", "DIAG" (default), "GW",
and "BSE".
**kwargs: All kwargs supported by MVLGWSet, other than structure,
prev_incar and mode, which are determined from the
prev_calc_dir.
"""
input_set = cls(_dummy_structure, mode=mode, **kwargs)
return input_set.override_from_prev_calc(prev_calc_dir=prev_calc_dir)
class MVLSlabSet(MPRelaxSet):
"""
Class for writing a set of slab vasp runs,
including both slabs (along the c direction) and orient unit cells (bulk),
to ensure the same KPOINTS, POTCAR and INCAR criterion.
"""
def __init__(
self,
structure,
k_product=50,
bulk=False,
auto_dipole=False,
set_mix=True,
sort_structure=True,
**kwargs
):
"""
:param structure: Structure
:param k_product: default to 50, kpoint number * length for a & b
directions, also for c direction in bulk calculations
:param bulk:
:param auto_dipole:
:param set_mix:
:param sort_structure:
:param kwargs: Other kwargs supported by :class:`DictSet`.
"""
super().__init__(structure, **kwargs)
if sort_structure:
structure = structure.get_sorted_structure()
self.k_product = k_product
self.bulk = bulk
self.auto_dipole = auto_dipole
self.kwargs = kwargs
self.set_mix = set_mix
self.kpt_calc = None
slab_incar = {
"EDIFF": 1e-4,
"EDIFFG": -0.02,
"ENCUT": 400,
"ISMEAR": 0,
"SIGMA": 0.05,
"ISIF": 3,
}
if not self.bulk:
slab_incar["ISIF"] = 2
slab_incar["LVTOT"] = True
if self.set_mix:
slab_incar["AMIN"] = 0.01
slab_incar["AMIX"] = 0.2
slab_incar["BMIX"] = 0.001
slab_incar["NELMIN"] = 8
if self.auto_dipole:
weights = [s.species.weight for s in structure]
center_of_mass = np.average(
structure.frac_coords, weights=weights, axis=0
)
slab_incar["IDIPOL"] = 3
slab_incar["LDIPOL"] = True
slab_incar["DIPOL"] = center_of_mass
self._config_dict["INCAR"].update(slab_incar)
@property
def kpoints(self):
"""
k_product, default to 50, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object (ie. 4). Slab
# attributes aren't going to affect the VASP inputs anyways so
# converting the slab into a structure should not matter
kpt = super().kpoints
kpt.comment = "Automatic mesh"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lattice_abc = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lattice_abc[0] + 0.5),
int(self.k_product / lattice_abc[1] + 0.5),
1,
]
self.kpt_calc = kpt_calc
# calculate kpts (c direction) for bulk. (for slab, set to 1)
if self.bulk:
kpt_calc[2] = int(self.k_product / lattice_abc[2] + 0.5)
kpt.kpts[0] = kpt_calc
return kpt
def as_dict(self, verbosity=2):
"""
:param verbosity: Verbosity of dict. E.g., whether to include Structure.
:return: MSONAble dict
"""
d = MSONable.as_dict(self)
if verbosity == 1:
d.pop("structure", None)
return d
class MVLGBSet(MPRelaxSet):
"""
Class for writing a vasp input files for grain boundary calculations, slab
or bulk.
"""
def __init__(
self, structure, k_product=40, slab_mode=False, is_metal=True, **kwargs
):
r"""
Args:
structure(Structure): provide the structure
k_product: Kpoint number * length for a & b directions, also for c
direction in bulk calculations. Default to 40.
slab_mode (bool): Defaults to False. Use default (False) for a
bulk supercell. Use True if you are performing calculations on a
slab-like (i.e., surface) of the GB, for example, when you are
calculating the work of separation.
is_metal (bool): Defaults to True. This determines whether an ISMEAR of
1 is used (for metals) or not (for insulators and semiconductors)
by default. Note that it does *not* override user_incar_settings,
which can be set by the user to be anything desired.
**kwargs:
Other kwargs supported by :class:`MPRelaxSet`.
"""
super().__init__(structure, **kwargs)
self.k_product = k_product
self.slab_mode = slab_mode
self.is_metal = is_metal
@property
def kpoints(self):
"""
k_product, default to 40, is kpoint number * length for a & b
directions, also for c direction in bulk calculations
Automatic mesh & Gamma is the default setting.
"""
# To get input sets, the input structure has to has the same number
# of required parameters as a Structure object.
kpt = super().kpoints
kpt.comment = "Generated by pymatgen's MVLGBSet"
kpt.style = "Gamma"
# use k_product to calculate kpoints, k_product = kpts[0][0] * a
lengths = self.structure.lattice.abc
kpt_calc = [
int(self.k_product / lengths[0] + 0.5),
int(self.k_product / lengths[1] + 0.5),
int(self.k_product / lengths[2] + 0.5),
]
if self.slab_mode:
kpt_calc[2] = 1
kpt.kpts[0] = kpt_calc
return kpt
@property
def incar(self):
"""
:return: Incar
"""
incar = super().incar
# The default incar setting is used for metallic system, for
# insulator or semiconductor, ISMEAR need to be changed.
incar.update(
{
"LCHARG": False,
"NELM": 60,
"PREC": "Normal",
"EDIFFG": -0.02,
"ICHARG": 0,
"NSW": 200,
"EDIFF": 0.0001,
}
)
if self.is_metal:
incar["ISMEAR"] = 1
incar["LDAU"] = False
if self.slab_mode:
# for clean grain boundary and bulk relaxation, full optimization
# relaxation (ISIF=3) is used. For slab relaxation (ISIF=2) is used.
incar["ISIF"] = 2
incar["NELMIN"] = 8
incar.update(self.user_incar_settings)
return incar
class MVLRelax52Set(DictSet):
"""
Implementation of VaspInputSet utilizing the public Materials Project
parameters for INCAR & KPOINTS and VASP's recommended PAW potentials for
POTCAR.
Keynotes from VASP manual:
1. Recommended potentials for calculations using vasp.5.2+
2. If dimers with short bonds are present in the compound (O2, CO,
N2, F2, P2, S2, Cl2), it is recommended to use the h potentials.
Specifically, C_h, O_h, N_h, F_h, P_h, S_h, Cl_h
3. Released on Oct 28, 2018 by VASP. Please refer to VASP
Manual 1.2, 1.3 & 10.2.1 for more details.
"""
CONFIG = _load_yaml_config("MVLRelax52Set")
def __init__(self, structure, **kwargs):
"""
Args:
structure (Structure): input structure.
potcar_functional (str): choose from "PBE_52" and "PBE_54".
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, MVLRelax52Set.CONFIG, **kwargs)
else:
super().__init__(
structure,
MVLRelax52Set.CONFIG,
user_potcar_functional="PBE_52",
**kwargs
)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("Please select from PBE_52 and PBE_54!")
self.kwargs = kwargs
class MITNEBSet(MITRelaxSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
"""
def __init__(self, structures, unset_encut=False, **kwargs):
"""
Args:
structures: List of Structure objects.
unset_encut (bool): Whether to unset ENCUT.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
if len(structures) < 3:
raise ValueError("You need at least 3 structures for an NEB.")
kwargs["sort_structure"] = False
super().__init__(structures[0], **kwargs)
self.structures = self._process_structures(structures)
self.unset_encut = False
if unset_encut:
self._config_dict["INCAR"].pop("ENCUT", None)
if "EDIFF" not in self._config_dict["INCAR"]:
self._config_dict["INCAR"]["EDIFF"] = self._config_dict["INCAR"].pop(
"EDIFF_PER_ATOM"
)
# NEB specific defaults
defaults = {
"IMAGES": len(structures) - 2,
"IBRION": 1,
"ISYM": 0,
"LCHARG": False,
"LDAU": False,
}
self._config_dict["INCAR"].update(defaults)
@property
def poscar(self):
"""
:return: Poscar for structure of first end point.
"""
return Poscar(self.structures[0])
@property
def poscars(self):
"""
:return: List of Poscars.
"""
return [Poscar(s) for s in self.structures]
@staticmethod
def _process_structures(structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.any(np.abs(t) > 0.5):
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(
self,
output_dir,
make_dir_if_not_present=True,
write_cif=False,
write_path_cif=False,
write_endpoint_inputs=False,
):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
write_path_cif (bool): If true, writes a cif for each image.
write_endpoint_inputs (bool): If true, writes input files for
running endpoint calculations.
"""
output_dir = Path(output_dir)
if make_dir_if_not_present and not output_dir.exists():
output_dir.mkdir(parents=True)
self.incar.write_file(str(output_dir / "INCAR"))
self.kpoints.write_file(str(output_dir / "KPOINTS"))
self.potcar.write_file(str(output_dir / "POTCAR"))
for i, p in enumerate(self.poscars):
d = output_dir / str(i).zfill(2)
if not d.exists():
d.mkdir(parents=True)
p.write_file(str(d / "POSCAR"))
if write_cif:
p.structure.to(filename=str(d / "{}.cif".format(i)))
if write_endpoint_inputs:
end_point_param = MITRelaxSet(
self.structures[0], user_incar_settings=self.user_incar_settings
)
for image in ["00", str(len(self.structures) - 1).zfill(2)]:
end_point_param.incar.write_file(str(output_dir / image / "INCAR"))
end_point_param.kpoints.write_file(str(output_dir / image / "KPOINTS"))
end_point_param.potcar.write_file(str(output_dir / image / "POTCAR"))
if write_path_cif:
sites = set()
lat = self.structures[0].lattice
for site in chain(*(s.sites for s in self.structures)):
sites.add(PeriodicSite(site.species, site.frac_coords, lat))
nebpath = Structure.from_sites(sorted(sites))
nebpath.to(filename=str(output_dir / "path.cif"))
class MITMDSet(MITRelaxSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
"""
def __init__(
self,
structure,
start_temp,
end_temp,
nsteps,
time_step=2,
spin_polarized=False,
**kwargs
):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.000001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": time_step,
"PREC": "Low",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
}
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MPMDSet(MPRelaxSet):
"""
This a modified version of the old MITMDSet pre 2018/03/12.
This set serves as the basis for the amorphous skyline paper.
(1) Aykol, M.; Dwaraknath, S. S.; Sun, W.; Persson, K. A. Thermodynamic
Limit for Synthesis of Metastable Inorganic Materials. Sci. Adv. 2018,
4 (4).
Class for writing a vasp md run. This DOES NOT do multiple stage runs.
Precision remains normal, to increase accuracy of stress tensor.
"""
def __init__(
self, structure, start_temp, end_temp, nsteps, spin_polarized=False, **kwargs
):
r"""
Args:
structure (Structure): Input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# MD default settings
defaults = {
"TEBEG": start_temp,
"TEEND": end_temp,
"NSW": nsteps,
"EDIFF_PER_ATOM": 0.00001,
"LSCALU": False,
"LCHARG": False,
"LPLANE": False,
"LWAVE": True,
"ISMEAR": 0,
"NELMIN": 4,
"LREAL": True,
"BMIX": 1,
"MAXMIX": 20,
"NELM": 500,
"NSIM": 4,
"ISYM": 0,
"ISIF": 0,
"IBRION": 0,
"NBLOCK": 1,
"KBLOCK": 100,
"SMASS": 0,
"POTIM": 2,
"PREC": "Normal",
"ISPIN": 2 if spin_polarized else 1,
"LDAU": False,
"ADDGRID": True,
}
if Element("H") in structure.species:
defaults["POTIM"] = 0.5
defaults["NSW"] = defaults["NSW"] * 4
super().__init__(structure, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.spin_polarized = spin_polarized
self.kwargs = kwargs
# use VASP default ENCUT
self._config_dict["INCAR"].pop("ENCUT", None)
if defaults["ISPIN"] == 1:
self._config_dict["INCAR"].pop("MAGMOM", None)
self._config_dict["INCAR"].update(defaults)
@property
def kpoints(self):
"""
:return: Kpoints
"""
return Kpoints.gamma_automatic()
class MVLNPTMDSet(MITMDSet):
"""
Class for writing a vasp md run in NPT ensemble.
Notes:
To eliminate Pulay stress, the default ENCUT is set to a rather large
value of ENCUT, which is 1.5 * ENMAX.
"""
def __init__(
self,
structure,
start_temp,
end_temp,
nsteps,
time_step=2,
spin_polarized=False,
**kwargs
):
r"""
Args:
structure (Structure): input structure.
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps(int): Number of time steps for simulations. NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
user_incar_settings = kwargs.get("user_incar_settings", {})
# NPT-AIMD default settings
defaults = {
"IALGO": 48,
"ISIF": 3,
"LANGEVIN_GAMMA": [10] * structure.ntypesp,
"LANGEVIN_GAMMA_L": 1,
"MDALGO": 3,
"PMASS": 10,
"PSTRESS": 0,
"SMASS": 0,
}
defaults.update(user_incar_settings)
kwargs["user_incar_settings"] = defaults
super().__init__(
structure, start_temp, end_temp, nsteps, time_step, spin_polarized, **kwargs
)
# Set NPT-AIMD ENCUT = 1.5 * VASP_default
enmax = [self.potcar[i].keywords["ENMAX"] for i in range(structure.ntypesp)]
encut = max(enmax) * 1.5
self._config_dict["INCAR"]["ENCUT"] = encut
class MVLScanRelaxSet(MPRelaxSet):
"""
Class for writing a relax input set using Strongly Constrained and
Appropriately Normed (SCAN) semilocal density functional.
Notes:
1. This functional is only available from VASP.5.4.3 upwards.
2. Meta-GGA calculations require POTCAR files that include
information on the kinetic energy density of the core-electrons,
i.e. "PBE_52" or "PBE_54". Make sure the POTCAR including the
following lines (see VASP wiki for more details):
$ grep kinetic POTCAR
kinetic energy-density
mkinetic energy-density pseudized
kinetic energy density (partial)
"""
def __init__(self, structure, **kwargs):
r"""
Args:
structure (Structure): input structure.
vdw (str): set "rVV10" to enable SCAN+rVV10, which is a versatile
van der Waals density functional by combing the SCAN functional
with the rVV10 non-local correlation functional.
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
# choose PBE_52 unless the user specifies something else
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_52", **kwargs)
if self.potcar_functional not in ["PBE_52", "PBE_54"]:
raise ValueError("SCAN calculations required PBE_52 or PBE_54!")
updates = {
"ADDGRID": True,
"EDIFF": 1e-05,
"EDIFFG": -0.05,
"LASPH": True,
"LDAU": False,
"METAGGA": "SCAN",
"NELM": 200,
}
if kwargs.get("vdw", "").lower() == "rvv10":
updates["BPARAM"] = 15.7 # This is the correct BPARAM for SCAN+rVV10
self._config_dict["INCAR"].update(updates)
class LobsterSet(MPRelaxSet):
"""
Input set to prepare VASP runs that can be digested by Lobster (See cohp.de)
"""
CONFIG = _load_yaml_config("MPRelaxSet")
def __init__(
self,
structure: Structure,
isym: int = -1,
ismear: int = -5,
reciprocal_density: int = None,
address_basis_file: str = None,
user_supplied_basis: dict = None,
**kwargs
):
"""
Args:
structure (Structure): input structure.
isym (int): ISYM entry for INCAR, only isym=-1 and isym=0 are allowed
ismear (int): ISMEAR entry for INCAR, only ismear=-5 and ismear=0 are allowed
reciprocal_density (int): density of k-mesh by reciprocal volume
user_supplied_basis (dict): dict including basis functions for all elements in structure,
e.g. {"Fe": "3d 3p 4s", "O": "2s 2p"}; if not supplied, a standard basis is used
address_basis_file (str): address to a file similar to "BASIS_PBE_54_standaard.yaml"
in pymatgen.io.lobster.lobster_basis
**kwargs: Other kwargs supported by :class:`DictSet`.
"""
warnings.warn(
"Make sure that all parameters are okay! This is a brand new implementation."
)
if not (isym == -1 or isym == 0):
raise ValueError("Lobster cannot digest WAVEFUNCTIONS with symmetry")
if not (ismear == -5 or ismear == 0):
raise ValueError("Lobster usually works with ismear=-5 or ismear=0")
# newest potcars are preferred
# Choose PBE_54 unless the user specifies a different potcar_functional
if kwargs.get("potcar_functional") or kwargs.get("user_potcar_functional"):
super().__init__(structure, **kwargs)
else:
super().__init__(structure, user_potcar_functional="PBE_54", **kwargs)
# reciprocal density
if self.user_kpoints_settings is not None:
if (
not reciprocal_density
or "reciprocal_density" not in self.user_kpoints_settings
):
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = (
reciprocal_density
or self.user_kpoints_settings["reciprocal_density"]
)
else:
if not reciprocal_density:
# test, if this is okay
self.reciprocal_density = 310
else:
self.reciprocal_density = reciprocal_density
# might need to be adapted in the future
ediff_per_atom = 5e-05
self.isym = isym
self.ismear = ismear
self.user_supplied_basis = user_supplied_basis
self.address_basis_file = address_basis_file
# predefined basis! Check if the basis is okay! (charge spilling and bandoverlaps!)
if user_supplied_basis is None and address_basis_file is None:
basis = Lobsterin.get_basis(
structure=structure, potcar_symbols=self.potcar_symbols
)
elif address_basis_file is not None:
basis = Lobsterin.get_basis(
structure=structure,
potcar_symbols=self.potcar_symbols,
address_basis_file=address_basis_file,
)
elif user_supplied_basis is not None:
# test if all elements from structure are in user_supplied_basis
for atomtype in structure.symbol_set:
if atomtype not in user_supplied_basis:
raise ValueError(
"There are no basis functions for the atom type "
+ str(atomtype)
)
basis = [key + " " + value for key, value in user_supplied_basis.items()]
lobsterin = Lobsterin(settingsdict={"basisfunctions": basis})
nbands = lobsterin._get_nbands(structure=structure)
update_dict = {
"EDIFF_PER_ATOM": ediff_per_atom,
"NSW": 0,
"LWAVE": True,
"ISYM": isym,
"NBANDS": nbands,
"IBRION": -1,
"ISMEAR": ismear,
"LORBIT": 11,
"ICHARG": 0,
"ALGO": "Normal",
}
self._config_dict["INCAR"].update(update_dict)
self._config_dict["KPOINTS"].update(
{"reciprocal_density": self.reciprocal_density}
)
def get_vasprun_outcar(path, parse_dos=True, parse_eigen=True):
"""
:param path: Path to get the vasprun.xml and OUTCAR.
:param parse_dos: Whether to parse dos. Defaults to True.
:param parse_eigen: Whether to parse eigenvalue. Defaults to True.
:return:
"""
path = Path(path)
vruns = list(glob.glob(str(path / "vasprun.xml*")))
outcars = list(glob.glob(str(path / "OUTCAR*")))
if len(vruns) == 0 or len(outcars) == 0:
raise ValueError(
"Unable to get vasprun.xml/OUTCAR from prev calculation in %s" % path
)
vsfile_fullpath = str(path / "vasprun.xml")
outcarfile_fullpath = str(path / "OUTCAR")
vsfile = vsfile_fullpath if vsfile_fullpath in vruns else sorted(vruns)[-1]
outcarfile = (
outcarfile_fullpath if outcarfile_fullpath in outcars else sorted(outcars)[-1]
)
return (
Vasprun(vsfile, parse_dos=parse_dos, parse_eigen=parse_eigen),
Outcar(outcarfile),
)
def get_structure_from_prev_run(vasprun, outcar=None):
"""
Process structure from previous run.
Args:
vasprun (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
structure = vasprun.final_structure
site_properties = {}
# magmom
if vasprun.is_spin:
if outcar and outcar.magnetization:
site_properties.update({"magmom": [i["tot"] for i in outcar.magnetization]})
else:
site_properties.update({"magmom": vasprun.parameters["MAGMOM"]})
# ldau
if vasprun.parameters.get("LDAU", False):
for k in ("LDAUU", "LDAUJ", "LDAUL"):
vals = vasprun.incar[k]
m = {}
l_val = []
s = 0
for site in structure:
if site.specie.symbol not in m:
m[site.specie.symbol] = vals[s]
s += 1
l_val.append(m[site.specie.symbol])
if len(l_val) == len(structure):
site_properties.update({k.lower(): l_val})
else:
raise ValueError(
"length of list {} not the same as" "structure".format(l_val)
)
return structure.copy(site_properties=site_properties)
def standardize_structure(structure, sym_prec=0.1, international_monoclinic=True):
"""
Get the symmetrically standardized structure.
Args:
structure (Structure): The structure.
sym_prec (float): Tolerance for symmetry finding for standardization.
international_monoclinic (bool): Whether to use international
convention (vs Curtarolo) for monoclinic. Defaults True.
Returns:
The symmetrized structure.
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
new_structure = sym_finder.get_primitive_standard_structure(
international_monoclinic=international_monoclinic
)
# the primitive structure finding has had several bugs in the past
# defend through validation
vpa_old = structure.volume / structure.num_sites
vpa_new = new_structure.volume / new_structure.num_sites
if abs(vpa_old - vpa_new) / vpa_old > 0.02:
raise ValueError(
"Standardizing cell failed! VPA old: {}, VPA new: {}".format(
vpa_old, vpa_new
)
)
sm = StructureMatcher()
if not sm.fit(structure, new_structure):
raise ValueError("Standardizing cell failed! Old structure doesn't match new.")
return new_structure
class BadInputSetWarning(UserWarning):
"""
Warning class for bad but legal inputs.
"""
pass
def batch_write_input(
structures,
vasp_input_set=MPRelaxSet,
output_dir=".",
make_dir_if_not_present=True,
subfolder=None,
sanitize=False,
include_cif=False,
potcar_spec=False,
zip_output=False,
**kwargs
):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Note that a class should be
supplied. Defaults to MPRelaxSet.
output_dir (str): Directory to output files. Defaults to current
directory ".".
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
potcar_spec (bool): Instead of writing the POTCAR, write a "POTCAR.spec".
This is intended to help sharing an input set with people who might
not have a license to specific Potcar files. Given a "POTCAR.spec",
the specific POTCAR file can be re-generated using pymatgen with the
"generate_potcar" function in the pymatgen CLI.
zip_output (bool): If True, output will be zipped into a file with the
same name as the InputSet (e.g., MPStaticSet.zip)
**kwargs: Additional kwargs are passed to the vasp_input_set class
in addition to structure.
"""
output_dir = Path(output_dir)
for i, s in enumerate(structures):
formula = re.sub(r"\s+", "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
d = output_dir / subdir
else:
d = output_dir / "{}_{}".format(formula, i)
if sanitize:
s = s.copy(sanitize=True)
v = vasp_input_set(s, **kwargs)
v.write_input(
str(d),
make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif,
potcar_spec=potcar_spec,
zip_output=zip_output,
)
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
| mbkumar/pymatgen | pymatgen/io/vasp/sets.py | Python | mit | 110,153 | [
"BoltzTrap",
"VASP",
"pymatgen"
] | f1126956886ac1c38f7fa8c880d6b8664dd4f69067d84b80acbfb267e370226e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.