text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
'''Points (dots, vectors) for PyMOLProbity plugin.''' from __future__ import absolute_import import copy import logging import re from chempy import cpv #from pymol import cmd from pymol import cgo from . import colors # from settings import mpgetq from . import utils logger = logging.getLogger(__name__) ############################################################################### # # CGO UTILS # ############################################################################### def _cgo_color(color): '''Return a CGO list specifying a color.''' r, g, b = colors.get_color_rgb(color) return [cgo.COLOR, r, g, b] def _cgo_sphere(pos, radius): '''Return a CGO list specifying a sphere.''' x, y, z = pos return [cgo.SPHERE, x, y, z, radius] def _perp_vec(vec): '''Return a vector orthogonal to `vec`.''' if abs(vec[0]) > 1e-6: return [-(vec[1] + vec[2]) / vec[0], 1., 1.] if abs(vec[1]) > 1e-6: return [1., -(vec[0] + vec[2]) / vec[1], 1.] return [1., 1., -(vec[0] + vec[1]) / vec[2]] def _cgo_quad(pos, normal, radius): '''Return a CGO list specifying a quad.''' v1 = cpv.normalize(_perp_vec(normal)) v2 = cpv.cross_product(normal, v1) v1 = cpv.scale(v1, radius) v2 = cpv.scale(v2, radius) obj = [ cgo.BEGIN, cgo.TRIANGLE_STRIP, cgo.NORMAL] obj.extend(normal) obj.append(cgo.VERTEX) obj.extend(cpv.add(pos, v1)) obj.append(cgo.VERTEX) obj.extend(cpv.add(pos, v2)) obj.append(cgo.VERTEX) obj.extend(cpv.sub(pos, v2)) obj.append(cgo.VERTEX) obj.extend(cpv.sub(pos, v1)) obj.append(cgo.END) return obj def _cgo_cylinder(pos0, pos1, radius, rgb0, rgb1): '''Return a CGO list specifying a cylinder.''' cgolist = [cgo.CYLINDER] cgolist.extend(pos0) # start cgolist.extend(pos1) # end cgolist.append(radius) cgolist.extend(rgb0) # start color cgolist.extend(rgb1) # end color return cgolist ############################################################################## # # ATOM INFO STRINGS # ############################################################################### # KEY: N=name, A=alt, R=resn, I=resi, X=ins code, C=chain, O=occ, b=Bfac # Simple dotlist atom: 15-char total, with 2-char chain ID # e.g. "NNNNARRRIIIIXCC" BASIC_ATOM_RE = re.compile( r"^([\w\? ]{4})" # group 1: atom name (e.g. " CA ") r"([\w ])" # group 2: alternate conformation ID (e.g. "A") r"([\w]{3})" # group 3: residue name (e.g. "ARG") r"([\d ]{4}[\w ])" # group 4: residue number + insertion code r"([\w ]{1,4})" # group 5: chain ID (1-, 2-, or 4-char) ) def process_basic_atom_string(atom_str): m = BASIC_ATOM_RE.match(atom_str) if m: name, alt, resn, resi, chain = [g.strip().upper() for g in m.groups()] # Assemble into dict atom_dict = { 'chain': chain, 'resn': resn, 'resi': resi, 'name': name, 'alt': alt, } return atom_dict else: return None # Bonds vectorlist with occupancy (optional), B-factor, and input file name # e.g. "NNNNARRRCCIIIIX OOOOBbbbbbb filename" BONDS_VECTORLIST_ATOM_RE = re.compile( r"^([\w\? ]{4})" # group 1: atom name (e.g. " CA ") r"([\w ])" # group 2: alternate conformation ID (e.g. "A") r"([\w]{3})" # group 3: residue name (e.g. "ARG") r"([\w ]{2})" # group 4: chain ID r"([\d ]{4}[\w ]) " # group 5: residue number + ins code r"([\d\.]{4}|) ?" # group 6: 4-char occupancy (optional) + space? r"B([\d\.]{4,6}) " # group 7: 4- to 6-char B-factor + space r".+$" # input file name ) def process_bonds_vectorlist_atom(atom_str): m = BONDS_VECTORLIST_ATOM_RE.match(atom_str) if m: (name, alt, resn, chain, resi, occ, b) = [g.strip().upper() for g in m.groups()] # Convert occupancy and B-factor to floats if not occ: occ = 1.00 else: occ = float(occ) if not b: b = 0.00 else: b = float(b) # Assemble into dict atom_dict = { 'chain': chain, 'resn': resn, 'resi': resi, 'name': name, 'alt': alt, 'occ': occ, 'b': b, } return atom_dict else: return None ############################################################################### # # DOTS # ############################################################################### class Dot(object): """Python representation of a Molprobity-style kinemage dot.""" # def set_draw(self): # self.draw = 1 # def unset_draw(self): # self.draw = 0 # def toggle_draw(self): # if self.draw: # self.draw = 0 # else: # self.draw = 1 def get_cgo(self, dot_mode=0, dot_radius=0.03): """Generate a CGO list for a dot.""" cgolist = [] # COLOR cgolist.extend(_cgo_color(self.color)) if dot_mode == 0: # spheres logger.debug("Adding dot to cgolist...") cgolist.extend(_cgo_sphere(self.coords, dot_radius)) logger.debug("Finished adding dot to cgolist.") if dot_mode == 1: # quads logger.debug("Adding quad to cgolist...") normal = cpv.normalize(cpv.sub(self.coords, self.atom['coords'])) cgolist.extend(_cgo_quad(self.coords, normal, dot_radius * 1.5)) logger.debug("Finished adding quad to cgolist.") return cgolist def __init__(self, atom=None, color=None, pointmaster=None, coords=None, draw=1): # Atom self.atom = atom # Dot info self.color = colors.get_pymol_color(color) self.pm = pointmaster self.coords = coords self.draw = draw # Dotlist info self.dotlist_name = None self.dotlist_color = None self.master = None # # Bind to PyMOL atom # #self.atom_selection = None # #self.atom_id = None # #if self.atom is not None: # #self.atom_selection = self._get_atom_selection() # #self.atom_id = cmd.id_atom(self.atom_selection) # #print self.atom_selection, self.atom_id DOTLIST_HEADER_RE = re.compile( r"dotlist " # dotlist keyword + space r"{([^}]*)} " # atom info section + space r"color=([^\s]*) " # color r"master={([^}]*)}" # master ) def _parse_dotlist_header(line): """Parse the header line of a kinemage `@dotlist` keyword. Header lines are in the following format: dotlist {x} color=white master={vdw contact} dotlist {x} color=sky master={vdw contact} dotlist {x} color=red master={H-bonds} Where "x" is an arbitrary name for the dotlist (currently hard-coded as "x" in the Probe source), the color is the default color of the dots, and master is the type of interaction depicted by the dots. """ m = DOTLIST_HEADER_RE.match(line) # name, color, master return m.group(1), m.group(2), utils.slugify(m.group(3)) DOTLIST_BODY_RE = re.compile( r"{([^}]*)}" # atom info string r"(\w*)\s*" # color + optional space(s) r"'(\w)' " # pointmaster r"([0-9.,\-]*)" # coordinates ) def _parse_dotlist_body(lines): """Parse the non-header lines of a kinemage `@dotlist` keyword. Body lines are in the following format[*]: { CA SER 26 A}blue 'O' 61.716,59.833,8.961 { OE1 GLU 31 A}blue 'S' 61.865,58.936,17.234 { OE2 GLU 31 A}blue 'S' 61.108,60.399,15.044 { H? HOH 293 A}greentint 'O' 57.884,59.181,7.525 [**] { O HOH 435 A}greentint 'O' 56.838,61.938,21.538 { O HOH 450 A}greentint 'O' 55.912,56.611,17.956 Or generally: {AAAABCCCDDDDEFF}colorname 'G' X,Y,Z where: AAAA = atom name B = alt conf CCC = residue name (3-letter) DDDD = residue number (typically 3 digits) E = insertion code FF = chain (typically a single letter) G = pointmaster code (e.g. ScSc, McSc, McMc, Hets) * Note that the text within the braces is not space-delimited, but is arranged in fixed-width columns. ** Also, 'H?' for water hydrogens is problematic and simply stripped away. """ active_atom = None dots = [] # Parse lines to generate Dots for i, l in enumerate(lines): m = DOTLIST_BODY_RE.match(l) # Atom selection in kinemages is only written explicitly the first # time for a given set of dots. Afterward, it inherits from the # previous line via a single double-quote character ("). if m.group(1) == '"': atom = active_atom else: # e.g.: " O HOH 450 A" logger.debug('m.group(1) match is: %s' % m.group(1)) atom_sel = m.group(1) # TODO: Check this formatting in probe documentation name = atom_sel[0:4].strip().replace('?','') alt = atom_sel[4:5].strip().upper() resn = atom_sel[5:8].strip().upper() resi = atom_sel[8:13].strip().upper() chain = atom_sel[13:15].strip().upper() logger.debug('Generating atom for dot %i...' % i) # TODO don't create duplicate atoms (track in MPObject) atom = {'name': name, 'alt': alt, 'resn': resn, 'resi': resi, 'chain': chain} logger.debug('Finished generating atom for dot %i.' % i) active_atom = atom color = m.group(2) pointmaster = m.group(3) coords = [float(c) for c in m.group(4).split(',')] # Create the Dot dot = Dot(atom, color, pointmaster, coords) dots.append(dot) return dots # #def _get_dot_atom_selection(dot): # #assert type(dot) is Dot # #obj = "%s" % dot.dotlist.result.obj # #if dot.atom['chain']: # #sele = "%s and chain %s" % (sele, dot.chain) # #if dot.atom['resn']: # #sele = "%s and resn %s" % (sele, dot.resn) # #if dot.atom['resi']: # #sele = "%s and resi %s" % (sele, dot.resi) # ## Hack: Probe gives HOH hydrogens atom names of 'H?', which, even when the # ## '?' is stripped, doesn't work with PyMOL, which numbers them 'H1' and # ## 'H2'. # #if dot.atom['name'] and not dot.atom['resn'] == 'HOH': # hack # #sele = "%s and name %s" % (sele, dot.name) # #return sele def process_dotlist(lines, context): '''Process a list of dotlist lines and return a list of Dots. Given a list of lines from a Kinemage file comprising a dotlist, parse the first line as the header, and the remaining lines as the body. Create a Dot() instance for each line and return a list of Dots. ''' logger.debug("Parsing dotlist header...") name, color, master = _parse_dotlist_header(lines[0]) logger.debug("Parsing dotlist body...") dots = _parse_dotlist_body(lines[1:]) logger.debug("Adding Dotlist info...") # Add Dotlist info to each dot. for d in dots: # From dotlist header d.dotlist_name = name d.dotlist_color = color d.master = master # From context d.kinemage = context['kinemage'] d.group = context['group'] d.subgroup = context['subgroup'] d.animate = context['animate'] logger.debug("Finished adding Dotlist info.") return dots # ############################################################################### # # # # VECTORS # # # ############################################################################### class Vector(object): """Python representation of a Molprobity-style kinemage vector.""" # def set_draw(self): # self.draw = 1 # def unset_draw(self): # self.draw = 0 # def toggle_draw(self): # if self.draw: # self.draw = 0 # else: # self.draw = 1 def get_cgo(self, radius=0.03): """Generate a CGO list for a vector.""" cgolist = [] # Set colors rgb0 = colors.get_color_rgb(self.color[0]) rgb1 = colors.get_color_rgb(self.color[1]) if True: # cylinders logger.debug("Adding vector to cgolist...") # Cylinder cgolist.extend(_cgo_cylinder(self.coords[0], self.coords[1], radius, rgb0, rgb1)) # Caps cgolist.extend(_cgo_color(self.color[0])) cgolist.extend(_cgo_sphere(self.coords[0], radius)) cgolist.extend(_cgo_color(self.color[1])) cgolist.extend(_cgo_sphere(self.coords[1], radius)) logger.debug("Finished adding vector to cgolist.") return cgolist def macro(self, i): a = copy.copy(self.atom[i]) if a is None: return None if a['alt']: a['alt'] = '`{}'.format(a['alt']) else: a['alt'] = '' return '{chain}/{resn}`{resi}/{name}{alt}'.format(**a) def sel(self, i): a = copy.copy(self.atom[i]) if a is None: return None c = 'chain {chain} and '.format(**a) if a['chain'] else '' i = 'resi {resi} and '.format(**a) if a['resi'] else '' n = 'name {name} and '.format(**a) if a['name'] else '' alt = 'alt {alt} and '.format(**a) if a['alt'] else '' sel = '{}{}{}{}'.format(c, i, n, alt) return sel[:-5] # strip last " and " def __init__(self, atom0=None, color0=None, pointmaster0=None, coords0=None, atom1=None, color1=None, pointmaster1=None, coords1=None, draw=1): # Atom self.atom = [atom0, atom1] # Vector info c0 = colors.get_pymol_color(color0) c1 = colors.get_pymol_color(color1) self.color = [c0, c1] self.pm = [pointmaster0, pointmaster1] self.coords = [coords0, coords1] self.draw = draw # Vectorlist info self.vectorlist_name = None self.vectorlist_color = None self.master = None # # Bind to PyMOL atom # #self.atom_selection = None # #self.atom_id = None # #if self.atom is not None: # #self.atom_selection = self._get_atom_selection() # #self.atom_id = cmd.id_atom(self.atom_selection) # #print self.atom_selection, self.atom_id def __str__(self): vectorlist_info = '[{},{}]'.format(self.vectorlist_name, self.master) return '{}: {}--{}'.format(vectorlist_info, self.macro(0), self.macro(1)) VECTORLIST_HEADER_RE = re.compile( r"vectorlist " r"{([^}]*)} " # group 1: name r"color= *([^\s]*) *" # group 2: color r"(\w+ )*" # group 3: other text (e.g. nobutton) r"master= *{([^}]*)}" # group 4: master ) def _parse_vectorlist_header(line): """Parse the header line of a kinemage `@vectorlist` keyword. Header lines are in the following format: vectorlist {x} color=white master={small overlap} Where "x" is an arbitrary name for the list (currently hard-coded as "x" in the Probe source), the color is the default color of the vectors, and master is the type of interaction depicted by the vectors. """ logger.debug('parsing line: "{}"'.format(line)) NAME = 1 COLOR = 2 OTHER = 3 MASTER = 4 m = VECTORLIST_HEADER_RE.match(line) logger.debug('vectorlist header: {}'.format(m.groups())) # name, color, master return m.group(NAME), m.group(COLOR), utils.slugify(m.group(MASTER)) # Probe v2.16 (20-May-13) format VECTORLIST_CLASH_RE = re.compile( r"{([^}]*)}" # group 1: atom description r"(\w*) " # group 2: color r"([A-Z] )*" # group 3: optional L or P character followed by space r" *" # allow extra space before pointmaster r"'(\w)' " # group 4: pointmaster r"([0-9.,\-]*)" # group 5: coordinates as a single string ) def _parse_clash_vectorlist_body(lines): """Parse the non-header lines of a vectorlist containing clash spikes. Body lines are in the following format: { CB SER 26 A}yellowtint P 'O' 57.581,59.168,8.642 {"}yellowtint 'O' 57.589,59.163,8.646 Or generally: {AAAA BBB CCCD E}colorname 'F' X,Y,Z (x 2) where: AAAA = atom name BBB = residue name (3-letter) CCC = residue number D = insertion code ??? TODO: check this E = chain F = pointmaster code (e.g. ScSc, McSc, McMc, Hets) * Note that the text within the braces is not space-delimited, but is arranged in fixed-width columns. Also, 'H?' for water hydrogens is problematic. # TODO """ ATOM = 1 COLOR = 2 LP = 3 PM = 4 COORDS = 5 active_atom = None vectors = [] # Parse lines to generate Vectors for i, l in enumerate(lines): matches = VECTORLIST_CLASH_RE.finditer(l) v = [] for m in matches: logger.debug('match: {}'.format(m.group(0))) logger.debug('clash vectorlist body line: {}'.format(m.groups())) logger.debug('beginning match...') # Atom selection in kinemages is only written explicitly the first # time for a given list of points. Afterward, it inherits from the # previous point via a single double-quote character ("). atom_sel = m.group(ATOM) if atom_sel == '"': #logger.debug('using active atom...') atom = active_atom else: # TODO don't create duplicate atoms (track in MPObject) logger.debug('Generating atom for vector %i point...' % i) atom = process_basic_atom_string(atom_sel) logger.debug('Finished generating atom for vector %i point.' % i) active_atom = atom color = m.group(COLOR) pointmaster = m.group(PM) coords = [float(c) for c in m.group(COORDS).split(',')] v.append({ 'atom': atom, 'color': color, 'pm': pointmaster, 'coords': coords}) # Create the Vector vector = Vector(v[0]['atom'], v[0]['color'], v[0]['pm'], v[0]['coords'], v[1]['atom'], v[1]['color'], v[1]['pm'], v[1]['coords']) vectors.append(vector) return vectors VECTORLIST_BONDS_RE = re.compile( r"{([^}]*)}" # group 1: atom description r" ?" # optional space r"([LP]) " # group 2: single L or P character (TODO: what is this?) r"(?:'(\w)' )*" # group 3: pointmaster (optional) r"(?:(\w+) )*" # group 4: other non-quoted word, e.g. 'ghost' (optional) r"([\d,\.\- ]+)" # group 5: coordinates as a single string ) def _parse_bonds_vectorlist_body(lines): '''Parse vectorlist lines that describe bonds.''' # TODO: merge this with _parse_clashes_vectorlist_body() ATOM = 1 LP = 2 # not used PM = 3 OTHER = 4 # not used COORDS = 5 # active_atom = None vectors = [] for i, l in enumerate(lines): matches = tuple(VECTORLIST_BONDS_RE.finditer(l)) # Set up new vector points list v = [] # If a continuation point, reuse the second point from the previous vector if len(matches) == 1: try: prev_v = vectors[-1] v.append({ 'atom': prev_v.atom[1], 'color': prev_v.color[1], 'pm': prev_v.pm[1], 'coords': prev_v.coords[1]}) except IndexError: logger.error('only 1 point given, but no previous points to use') raise elif len(matches) < 1 or len(matches) > 2: # <1 or >2 shouldn't happen raise ValueError('malformed line: {}'.format(l)) for j, m in enumerate(matches): logger.debug('match {} of {}: {}'.format(j+1, len(matches), m.group(0))) logger.debug('bond vectorlist body line: {}'.format(m.groups())) logger.debug('beginning match...') # # Atom selection in kinemages is only written explicitly the first # # time for a given set of dots. Afterward, it inherits from the # # previous line via a single double-quote character ("). atom_sel = m.group(ATOM) if atom_sel == '"': logger.debug('using active atom...') atom = active_atom else: try: # TODO don't create duplicate atoms (track in MPObject) msg = 'Generating atom for vector {} point {}...'.format(i, j) logger.debug(msg) atom = process_bonds_vectorlist_atom(atom_sel) except: msg = 'Atom info string `{}` could not be parsed. Skipping.' logger.error(msg.format(atom_sel)) logger.debug('Finished generating atom for vector %i point %i.' % (i, j)) # active_atom = atom color = None pointmaster = m.group(PM) coords = [float(c) for c in m.group(COORDS).split(',')] v.append({ 'atom': atom, 'color': color, 'pm': pointmaster, 'coords': coords}) # logger.debug('coords0: {}'.format(v[0]['coords'])) # logger.debug('coords1: {}'.format(v[1]['coords'])) # Create the Vector vector = Vector(v[0]['atom'], v[0]['color'], v[0]['pm'], v[0]['coords'], v[1]['atom'], v[1]['color'], v[1]['pm'], v[1]['coords']) vectors.append(vector) return vectors # #def _get_dot_atom_selection(dot): # #assert type(dot) is Dot # #obj = "%s" % dot.dotlist.result.obj # #if dot.atom['chain']: # #sele = "%s and chain %s" % (sele, dot.chain) # #if dot.atom['resn']: # #sele = "%s and resn %s" % (sele, dot.resn) # #if dot.atom['resi']: # #sele = "%s and resi %s" % (sele, dot.resi) # ## Hack: Probe gives HOH hydrogens atom names of 'H?', which, even when the # ## '?' is stripped, doesn't work with PyMOL, which numbers them 'H1' and # ## 'H2'. # #if dot.atom['name'] and not dot.atom['resn'] == 'HOH': # hack # #sele = "%s and name %s" % (sele, dot.name) # #return sele def process_vectorlist(lines, context): """Process a list of dotlist lines and return a list of Vectors. Given a list of lines from a Kinemage file comprising a dotlist, parse the first line as the header, and the remaining lines as the body. Create a Dot() instance for each line and return a list of Dots. """ logger.debug("Parsing vectorlist header...") name, color, master = _parse_vectorlist_header(lines[0]) logger.debug("Parsing vectorlist body...") if name == 'x': vectors = _parse_clash_vectorlist_body(lines[1:]) else: vectors = _parse_bonds_vectorlist_body(lines[1:]) logger.debug("Adding vectorlist info...") # Add Dotlist info to each dot. for v in vectors: # From vectorlist header v.vectorlist_name = name v.vectorlist_color = color v.master = master # From context v.kinemage = context['kinemage'] v.group = context['group'] v.subgroup = context['subgroup'] v.animate = context['animate'] logger.debug("Finished adding Vectorlist info.") return vectors
jaredsampson/pymolprobity
pymolprobity/points.py
Python
mit
24,345
[ "ChemPy", "PyMOL" ]
bd7afc8fe9dd59a94a5157b8cbcd47b81c3ae8f28fa0b8a268034ab50a8d2ac5
#!/usr/bin/python ######################################################################## # 31 Oct 2014 # Patrick Lombard, Centre for Stem Stem Research # Core Bioinformatics Group # University of Cambridge # All right reserved. ######################################################################## import subprocess import sys, re, os import ConfigParser import itertools import argparse from collections import defaultdict import pkg_resources import HTSeq import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from multiprocessing import Pool, Manager from pynoncode import web_templates def read_custom_input(ifile): trans = {} with open(ifile) as f: for line in f: line = line.rstrip() word = line.split("\t") trans[word[0]] = 1 return trans def read_input(idir, paired, pval, padj=None): trans = {} frags = {} with open(idir + "/diff_transcripts.tsv") as f: header = next(f) for line in f: line = line.rstrip() word = line.split("\t") if padj: if word[6] == "NA": pass elif float(word[6]) <= padj: trans[word[0]] = (word[2], word[5], word[6]) else: if word[5] == "NA": pass elif float(word[5]) <= pval: trans[word[0]] = (word[2], word[5], word[6]) #LFC Pvalue, Padj with open(idir + "/diff_fragments.tsv") as f: header = next(f) for line in f: line = line.rstrip() word = line.split("\t") if not paired: if padj: if word[6] == "NA": pass elif float(word[6]) <= padj: frags[word[0]] = (word[2], word[5], word[6]) else: if word[5] == "NA": pass elif float(word[5]) <= float(pval): frags[word[0]] = (word[2], word[5], word[6]) else: pairs = word[0].split("|") if padj: if word[6] == "NA": pass elif float(word[6]) <= padj: frags[pairs[0], pairs[1]] = (word[2], word[5], word[6]) else: if word[5] == "NA": pass elif float(word[5]) <= pval: frags[pairs[0], pairs[1]] = (word[2], word[5], word[6]) #LFC. Pvalue, padj return trans, frags def find_frag_transcripts(conditions, frags, paired): transcripts = {} for idir in conditions: with open(idir + "/fragment_counts.txt") as f: for line in f: line = line.rstrip() word = line.split("\t") if not paired: #This is adding every occurence, need to just find unique positions! if word[3] in frags: if word[6] not in transcripts: transcripts[word[6]] = {} transcripts[word[6]][word[0], word[1], word[2]] = (word[3], frags[word[3]])#Position of diff fragment. Make this list if more than one are involved??? else: transcripts[word[6]][word[0], word[1], word[2]] = (word[3], frags[word[3]]) else: if int(word[7]) == 1: next_line = next(f).rstrip() next_word = next_line.split("\t") if int(next_word[7]) == 2: if (word[3], next_word[3]) in frags: if word[6] not in transcripts: transcripts[word[6]] = {} transcripts[word[6]][word[0], word[1], word[2], next_word[1], word[2]] = (word[3], next_word[3], frags[word[3], next_word[3]]) #Contains both reads positions else: transcripts[word[6]][word[0], word[1], word[2], next_word[1], word[2]] = (word[3], next_word[3], frags[word[3], next_word[3]]) return transcripts #Could add region in plots! def read_directories_for_transcripts(conditions, transcript_coords, paired): #Counts the incidence of transcripts in fragments file transcript_arrays = {} #Initialise this dictionary for trans in transcript_coords: transcript_arrays[trans] = {} for idir in conditions: length = int(transcript_coords[trans][2]) - int(transcript_coords[trans][1]) transcript_arrays[trans][idir] = np.zeros(length, dtype='f') for idir in conditions: with open(idir + "/fragment_counts.txt") as f: for line in f: line = line.rstrip() word = line.split("\t") if not paired: if word[6] in transcript_coords: start = int(word[1]) - int(transcript_coords[word[6]][1]) end = int(word[2]) - int(transcript_coords[word[6]][1]) if start < 0: start = 0 if end > int(transcript_coords[word[6]][2]): end = int(transcript_coords[word[6]][2]) transcript_arrays[word[6]][idir][start:end] += float(word[4])#Add the count of that fragment to the transcript range it covers else: #I only need count once per pair! But I need to add this over all regions per pair!! if int(word[7]) == 1: #First pair if word[6] in transcript_coords: #Make sure transcript is important next_line = next(f).rstrip() next_word = next_line.split("\t") if int(next_word[7]) == 2: #Make sure they are properly paired p1_start = int(word[1]) - int(transcript_coords[word[6]][1]) p1_end = int(word[2]) - int(transcript_coords[word[6]][1]) if p1_start < 0: p1_start = 0 if p1_end > int(transcript_coords[word[6]][2]): p1_end = int(transcript_coords[word[6]][2]) transcript_arrays[word[6]][idir][p1_start:p1_end] += float(word[4]) #This is first pair p2_start = int(next_word[1]) - int(transcript_coords[word[6]][1]) #Second pair p2_end = int(next_word[2]) - int(transcript_coords[word[6]][1]) if p2_start < 0: p2_start = 0 if p2_end > int(transcript_coords[word[6]][2]): p2_end = int(transcript_coords[word[6]][2]) transcript_arrays[word[6]][idir][p2_start:p2_end] += float(word[4]) #Must add same count to every pair return transcript_arrays def invert_dict_nonunique(d): newdict = {} for k, v in d.iteritems(): newdict.setdefault(v, []).append(k) return newdict def average_arrays(conditions, transcript_arrays, transcript_coords): #Reverse the conditions dictionary and then average counts per conditions inv_conditions = invert_dict_nonunique(conditions) inv_array = {} for transcript in sorted(transcript_arrays): #Naming is confusing, transcript here is an array length = int(transcript_coords[transcript][2]) - int(transcript_coords[transcript][1]) inv_array[transcript] = {} for cond in inv_conditions: inv_array[transcript][cond] = np.zeros(length, dtype='f') count = 1 for sample in inv_conditions[cond]: inv_array[transcript][cond] += transcript_arrays[transcript][sample] count += 1 inv_array[transcript][cond] /= count return inv_array def plot_trans_arrays(conditions, transcript_arrays, outputdir, custom=False): #Plot sererately per transcript for transcript in sorted(transcript_arrays): c = 1 for sample in sorted(transcript_arrays[transcript]): c +=1 length_of_transcript = len(transcript_arrays[transcript][sample]) base_label = np.array(xrange(length_of_transcript)) plt.plot(base_label, transcript_arrays[transcript][sample], label="{}".format(sample)) if c <= 4: #Control size of legend plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0., prop={'size':8}) else: plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0., prop={'size':5}) plt.ylabel('Read Count') if custom: plt.savefig(outputdir+'/{}.png'.format(transcript)) else: plt.savefig(outputdir+'/plots/{}.png'.format(transcript)) plt.close() def plot_frag_arrays(conditions, transcript_arrays, outputdir, transcript_coords, transcripts_dict, paired): #Plot sererately per transcript for transcript in sorted(transcripts_dict): #Key is transcript, values are dict of positions and then fragments a = 1 for frag_pos in sorted(transcripts_dict[transcript]): c = 1 for sample in sorted(transcript_arrays[transcript]): length_of_transcript = len(transcript_arrays[transcript][sample]) base_label = np.array(xrange(length_of_transcript)) c += 1 #Count number of samples for legend size plt.plot(base_label, transcript_arrays[transcript][sample], label="{}".format(sample)) #Same as transcripts start_pos = int(frag_pos[1]) - int(transcript_coords[transcript][1]) end_pos = int(frag_pos[2]) - int(transcript_coords[transcript][1]) plt.axvspan(start_pos, end_pos, color='red', alpha=0.2) if paired: start_pos = int(frag_pos[3]) - int(transcript_coords[transcript][1]) end_pos = int(frag_pos[4]) - int(transcript_coords[transcript][1]) plt.axvspan(start_pos, end_pos, color='red', alpha=0.2) #Plot labels if c <= 4: #Control size of legend plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0., prop={'size':5}) else: plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0., prop={'size':7}) plt.ylabel('Read Count') plt.savefig(outputdir+'/plots/{}_{}.png'.format(transcript, a)) plt.close() a += 1 #To reduce memory usage, just store interesting transcripts def preprocess_gtf(gtf, transcripts): gtffile = HTSeq.GFF_Reader( gtf ) exons = defaultdict(list) for feature in gtffile: if feature.type == "exon": if feature.attr["transcript_id"] in transcripts: exons[feature.attr["transcript_id"]].append(feature) #Just a list of exons for each transcript transcript_coords = {} for trans in sorted(exons): list_of_exons = exons[trans] if len(list_of_exons) == 1: #Don't care about exon numbers, just get start and end. Strand is unimportant transcript_coords[trans] = (exons[trans][0].iv.chrom, exons[trans][0].iv.start, exons[trans][0].iv.end, exons[trans][0].iv.strand) else: #Strand is important, need to becareful here. exon_count = 1 for exon in list_of_exons: if exon.iv.strand == "+": if exon.attr["exon_number"] == "1": chrom = exon.iv.chrom start = exon.iv.start strand = exon.iv.strand else: if exon.attr["exon_number"] > exon_count: end = exon.iv.end else: #Reverse start and end for negative strands if exon.attr["exon_number"] == "1": chrom = exon.iv.chrom end = exon.iv.end strand = exon.iv.strand else: if exon.attr["exon_number"] > exon_count: start = exon.iv.start exon_number = exon.attr["exon_number"] transcript_coords[trans] = (chrom, start, end, strand) return transcript_coords def ConfigSectionMap(section, Config): dict1 = {} options = Config.options(section) for option in options: try: dict1[option] = Config.get(section, option) if dict1[option] == -1: DebugPrint("skip: %s" % option) except: print("exception on %s!" % option) dict1[option] = None return dict1 def write_reports(transcripts, fragments, outdir, path_to_stuff, paired): FNULL = open(os.devnull, 'w') command = "unzip -o {0}/bootstrap-3.3.0-dist.zip -d {1}".format(path_to_stuff, outdir) #Move css and other stuff to output directory subprocess.call(command.split(), stdout=FNULL) html = web_templates.create_transcript_html(transcripts) #Get HTML text output = open(outdir+"/transcripts.html", "w") #Write it out output.write(html) output.close() output = open(outdir + "/transcript_summary.tsv", "w") output.write("Transcript\tP-Value\tLFC\n"), for trans in sorted(transcripts): output.write("{}\t{}\t{}\n".format(trans, transcripts[trans][1], transcripts[trans][0])), output.close() html = web_templates.create_fragment_html(fragments, paired) #Get HTML text output = open(outdir+"/fragments.html", "w") #Write it out output.write(html) output.close() output = open(outdir + "/fragment_summary.tsv", "w") if paired: output.write("Chromosome\tStart\tEnd\tRead1\tChromosome\tStart\tEnd\tRead2\tP Value\tLog Fold Change\tMapped Transcript\n"), else: output.write("Chromosome\tStart\tEnd\tRead1\tP Value\tLog Fold Change\tMapped Transcript\n"), for trans in sorted(fragments): for frag_pos in sorted(fragments[trans]): if paired: output.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(frag_pos[0], frag_pos[1], frag_pos[2], fragments[trans][frag_pos][0], frag_pos[0], frag_pos[3], frag_pos[4], fragments[trans][frag_pos][1], fragments[trans][frag_pos][2][1], fragments[trans][frag_pos][2][0], trans)) else: output.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(frag_pos[0], frag_pos[1], frag_pos[2], fragments[trans][frag_pos][0], fragments[trans][frag_pos][1][1], fragments[trans][frag_pos][1][0], trans)) output.close() def create_counts_dict(counts): data = {} with open(counts) as f: header = next(f) head = header.split("\t") for line in f: line = line.rstrip() word = line.split("\t") for i in range(1, len(head)): if word[0] in data: data[word[0]] = {} data[word[0]][head[i]] = word[i] else: data[word[0]][head[i]] = word[i] return data def main(): parser = argparse.ArgumentParser(description='Plots transcripts from pynoncode processed samples\n') parser.add_argument('-c','--config', help='Config file, similar as the one supplied to pynon_diff.py. Please see documentation for more details', required=True) parser.add_argument('-i','--input', help='pynon_diff.py processed directory', required=False) parser.add_argument('-u','--custom', help='Can provide list of transcripts for plotting instead of pynon_diff.py directory', required=False) parser.add_argument('-n','--genome', help='Samples genome akigned to, options are hg19/mm10', required=True) parser.add_argument('-g','--gtf', help='GTF for annotation. If not supplied, will use the packages GTF') parser.add_argument('-p', help='Use if samples are paired end', action="store_true", required=False) parser.add_argument('-a', help='Will average samples according to conditions', action="store_true", required=False) parser.add_argument('-v','--pval', help='Pvalue cutoff for significance, default=0.1', default=0.1, required=False) parser.add_argument('-d','--padj', help='Use padjusted instead of pvalue for selection', required=False) parser.add_argument('-o','--outdir', help='Output directory', required=True) args = vars(parser.parse_args()) Config = ConfigParser.ConfigParser() Config.optionxform = str Config.read(args["config"]) conditions = ConfigSectionMap("Conditions", Config) if os.path.isdir(args["outdir"]): print "Output directory already exists, may overwrite existing files" else: os.mkdir(args["outdir"]) os.mkdir("{}/plots".format(args["outdir"])) if args["gtf"]: gtf = args["gtf"] else: gtf = pkg_resources.resource_filename('pynoncode', 'data/{}_ncRNA.gtf'.format(args["genome"])) path_to_stuff = pkg_resources.resource_filename('pynoncode', 'data/') #Used for web templates css features if args["custom"]: transcripts = read_custom_input(args["input"]) transcript_coords = preprocess_gtf(gtf, transcripts) #Annotation of transcripts transcript_arrays = read_directories_for_transcripts(conditions, transcript_coords, args["p"]) #Dict of numpy array containing counts of transcripts if args["a"]: #Average over conditions by reversing numpy array dict averaged_array = average_arrays(conditions, transcript_arrays, transcript_coords) plot_trans_arrays(conditions, averaged_array, args["outdir"], True) else: plot_trans_arrays(conditions, transcript_arrays, args["outdir"], True) else: transcripts, fragments = read_input(args["input"], args["p"], float(args["pval"]), float(args["padj"])) #Now contains LFC, Pvalue and padj transcript_coords = preprocess_gtf(gtf, transcripts) #Annotation of transcripts transcript_arrays = read_directories_for_transcripts(conditions, transcript_coords, args["p"]) #Dict of numpy array containing counts of transcripts if args["a"]: #Average over conditions by reversing numpy array dict averaged_array = average_arrays(conditions, transcript_arrays, transcript_coords) plot_trans_arrays(conditions, averaged_array, args["outdir"]) else: plot_trans_arrays(conditions, transcript_arrays, args["outdir"]) transcripts2 = find_frag_transcripts(conditions, fragments, args["p"]) #Now contains coordinates of fragment, LFC and pvalue transcript_coords2 = preprocess_gtf(gtf, transcripts2) #Annotation of transcripts transcript_arrays2 = read_directories_for_transcripts(conditions, transcript_coords2, args["p"]) #Dict of numpy array containing counts of transcripts if args["a"]: #Average over conditions by reversing numpy array dict averaged_array2 = average_arrays(conditions, transcript_arrays2, transcript_coords2) plot_frag_arrays(conditions, averaged_array2, args["outdir"], transcript_coords2, transcripts2, args["p"]) else: plot_frag_arrays(conditions, transcript_arrays2, args["outdir"], transcript_coords2, transcripts2, args["p"]) #Creating web report write_reports(transcripts, transcripts2, args["outdir"], path_to_stuff, args["p"])
pdl30/pynoncode
pynoncode/plot.py
Python
gpl-2.0
16,666
[ "HTSeq" ]
dfd8e57793e72758161e9b281d5844e947cb64d48848d077cbb2fd283ca267e8
import random import threading import time import unittest from traits.api import Enum, HasStrictTraits from traits.util.async_trait_wait import wait_for_condition class TrafficLights(HasStrictTraits): colour = Enum('Green', 'Amber', 'Red', 'RedAndAmber') _next_colour = { 'Green': 'Amber', 'Amber': 'Red', 'Red': 'RedAndAmber', 'RedAndAmber': 'Green', } def make_random_changes(self, change_count): for _ in xrange(change_count): time.sleep(random.uniform(0.1, 0.3)) self.colour = self._next_colour[self.colour] class TestAsyncTraitWait(unittest.TestCase): def test_wait_for_condition_success(self): lights = TrafficLights(colour='Green') t = threading.Thread(target=lights.make_random_changes, args=(2,)) t.start() wait_for_condition( condition=lambda l: l.colour == 'Red', obj=lights, trait='colour', ) self.assertEqual(lights.colour, 'Red') t.join() def test_wait_for_condition_failure(self): lights = TrafficLights(colour='Green') t = threading.Thread(target=lights.make_random_changes, args=(2,)) t.start() self.assertRaises( RuntimeError, wait_for_condition, condition=lambda l: l.colour == 'RedAndAmber', obj=lights, trait='colour', timeout=5.0, ) t.join() def test_traits_handler_cleaned_up(self): # An older version of wait_for_condition failed to clean up # the trait handler, leading to possibly evaluation of the # condition after the 'wait_for_condition' call had returned. self.lights = TrafficLights(colour='Green') t = threading.Thread(target=self.lights.make_random_changes, args=(3,)) t.start() wait_for_condition( condition=lambda l: self.lights.colour == 'Red', obj=self.lights, trait='colour', ) del self.lights # If the condition gets evaluated again past this point, we'll # see an AttributeError from the failed self.lights lookup. # assertSucceeds! t.join() if __name__ == '__main__': unittest.main()
burnpanck/traits
traits/util/tests/test_async_trait_wait.py
Python
bsd-3-clause
2,297
[ "Amber" ]
e7492efd21313d9d9cc8e2a966984afbbe6db1207a545cb76b5669c31ec3fe57
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Memsurfer(PythonPackage): """MemSurfer is a tool to compute and analyze membrane surfaces found in a wide variety of large-scale molecular simulations.""" homepage = "https://github.com/LLNL/MemSurfer" git = "https://github.com/LLNL/MemSurfer.git" maintainers = ['bhatiaharsh'] version('1.0', tag='v1.0', submodules=True) version('master', branch='master', submodules=True) version('develop', branch='develop', submodules=True) variant('osmesa', default=False, description='Enable OSMesa support (for VTK)') extends('python') depends_on('python@3.7:', type=('build', 'run')) depends_on('cmake@3.14:', type='build') depends_on('swig@3.0.12', type='build') depends_on('py-cython', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('eigen@3.3.7') depends_on('cgal@4.13 +shared~core~demos~imageio') # vtk needs to know whether to build with mesa or opengl vtk_conf = '~ffmpeg~mpi+opengl2~qt~xdmf+python' depends_on('vtk@8.1.2 ' + vtk_conf + ' ~osmesa', when='~osmesa') depends_on('vtk@8.1.2 ' + vtk_conf + ' +osmesa', when='+osmesa') # needed only to resolve the conflict between the default and netcdf's spec depends_on('hdf5 +hl') # memsurfer's setup needs path to these deps to build extension modules def setup_build_environment(self, env): env.set('VTK_ROOT', self.spec['vtk'].prefix) env.set('CGAL_ROOT', self.spec['cgal'].prefix) env.set('BOOST_ROOT', self.spec['boost'].prefix) env.set('EIGEN_ROOT', self.spec['eigen'].prefix)
iulian787/spack
var/spack/repos/builtin/packages/memsurfer/package.py
Python
lgpl-2.1
1,848
[ "NetCDF", "VTK" ]
509dd57371d7e388bbedf586e6879853ca3088e2f3bee9d2078db1933792389c
__all__ = [ 'BadRequestException', 'BadStateException', 'CsrfException', 'DropboxOAuth2Flow', 'DropboxOAuth2FlowNoRedirect', 'NotApprovedException', 'OAuth2FlowNoRedirectResult', 'OAuth2FlowResult', 'ProviderException', ] import base64 import os import six import urllib from .session import ( API_HOST, WEB_HOST, pinned_session, ) if six.PY3: url_path_quote = urllib.parse.quote # pylint: disable=no-member,useless-suppression url_encode = urllib.parse.urlencode # pylint: disable=no-member,useless-suppression else: url_path_quote = urllib.quote # pylint: disable=no-member,useless-suppression url_encode = urllib.urlencode # pylint: disable=no-member,useless-suppression class OAuth2FlowNoRedirectResult(object): """ Authorization information for an OAuth2Flow performed with no redirect. """ def __init__(self, access_token, account_id, user_id): """ Args: access_token (str): Token to be used to authenticate later requests. account_id (str): The Dropbox user's account ID. user_id (str): Deprecated (use account_id instead). """ self.access_token = access_token self.account_id = account_id self.user_id = user_id def __repr__(self): return 'OAuth2FlowNoRedirectResult(%r, %r, %r)' % ( self.access_token, self.account_id, self.user_id, ) class OAuth2FlowResult(OAuth2FlowNoRedirectResult): """ Authorization information for an OAuth2Flow with redirect. """ def __init__(self, access_token, account_id, user_id, url_state): """ Same as OAuth2FlowNoRedirectResult but with url_state. Args: url_state (str): The url state that was set by :meth:`DropboxOAuth2Flow.start`. """ super(OAuth2FlowResult, self).__init__( access_token, account_id, user_id) self.url_state = url_state @classmethod def from_no_redirect_result(cls, result, url_state): assert isinstance(result, OAuth2FlowNoRedirectResult) return cls( result.access_token, result.account_id, result.user_id, url_state) def __repr__(self): return 'OAuth2FlowResult(%r, %r, %r, %r)' % ( self.access_token, self.account_id, self.user_id, self.url_state, ) class DropboxOAuth2FlowBase(object): def __init__(self, consumer_key, consumer_secret, locale=None): self.consumer_key = consumer_key self.consumer_secret = consumer_secret self.locale = locale self.requests_session = pinned_session() def _get_authorize_url(self, redirect_uri, state): params = dict(response_type='code', client_id=self.consumer_key) if redirect_uri is not None: params['redirect_uri'] = redirect_uri if state is not None: params['state'] = state return self.build_url('/oauth2/authorize', params, WEB_HOST) def _finish(self, code, redirect_uri): url = self.build_url('/oauth2/token') params = {'grant_type': 'authorization_code', 'code': code, 'client_id': self.consumer_key, 'client_secret': self.consumer_secret, } if self.locale is not None: params['locale'] = self.locale if redirect_uri is not None: params['redirect_uri'] = redirect_uri resp = self.requests_session.post(url, data=params) resp.raise_for_status() d = resp.json() if 'team_id' in d: account_id = d['team_id'] else: account_id = d['account_id'] access_token = d['access_token'] uid = d['uid'] return OAuth2FlowNoRedirectResult( access_token, account_id, uid) def build_path(self, target, params=None): """Build the path component for an API URL. This method urlencodes the parameters, adds them to the end of the target url, and puts a marker for the API version in front. :param str target: A target url (e.g. '/files') to build upon. :param dict params: Optional dictionary of parameters (name to value). :return: The path and parameters components of an API URL. :rtype: str """ if six.PY2 and isinstance(target, six.text_type): target = target.encode('utf8') target_path = url_path_quote(target) params = params or {} params = params.copy() if self.locale: params['locale'] = self.locale if params: query_string = _params_to_urlencoded(params) return "%s?%s" % (target_path, query_string) else: return target_path def build_url(self, target, params=None, host=API_HOST): """Build an API URL. This method adds scheme and hostname to the path returned from build_path. :param str target: A target url (e.g. '/files') to build upon. :param dict params: Optional dictionary of parameters (name to value). :return: The full API URL. :rtype: str """ return "https://%s%s" % (host, self.build_path(target, params)) class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase): """ OAuth 2 authorization helper for apps that can't provide a redirect URI (such as the command-line example apps). Example:: from dropbox import DropboxOAuth2FlowNoRedirect auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET) authorize_url = auth_flow.start() print "1. Go to: " + authorize_url print "2. Click \\"Allow\\" (you might have to log in first)." print "3. Copy the authorization code." auth_code = raw_input("Enter the authorization code here: ").strip() try: oauth_result = auth_flow.finish(auth_code) except Exception, e: print('Error: %s' % (e,)) return dbx = Dropbox(oauth_result.access_token) """ def __init__(self, consumer_key, consumer_secret, locale=None): # noqa: E501; pylint: disable=useless-super-delegation """ Construct an instance. Parameters :param str consumer_key: Your API app's "app key". :param str consumer_secret: Your API app's "app secret". :param str locale: The locale of the user of your application. For example "en" or "en_US". Some API calls return localized data and error messages; this setting tells the server which locale to use. By default, the server uses "en_US". """ # pylint: disable=useless-super-delegation super(DropboxOAuth2FlowNoRedirect, self).__init__( consumer_key, consumer_secret, locale, ) def start(self): """ Starts the OAuth 2 authorization process. :return: The URL for a page on Dropbox's website. This page will let the user "approve" your app, which gives your app permission to access the user's Dropbox account. Tell the user to visit this URL and approve your app. """ return self._get_authorize_url(None, None) def finish(self, code): """ If the user approves your app, they will be presented with an "authorization code". Have the user copy/paste that authorization code into your app and then call this method to get an access token. :param str code: The authorization code shown to the user when they approved your app. :rtype: OAuth2FlowNoRedirectResult :raises: The same exceptions as :meth:`DropboxOAuth2Flow.finish()`. """ return self._finish(code, None) class DropboxOAuth2Flow(DropboxOAuth2FlowBase): """ OAuth 2 authorization helper. Use this for web apps. OAuth 2 has a two-step authorization process. The first step is having the user authorize your app. The second involves getting an OAuth 2 access token from Dropbox. Example:: from dropbox import DropboxOAuth2Flow def get_dropbox_auth_flow(web_app_session): redirect_uri = "https://my-web-server.org/dropbox-auth-finish" return DropboxOAuth2Flow( APP_KEY, APP_SECRET, redirect_uri, web_app_session, "dropbox-auth-csrf-token") # URL handler for /dropbox-auth-start def dropbox_auth_start(web_app_session, request): authorize_url = get_dropbox_auth_flow(web_app_session).start() redirect_to(authorize_url) # URL handler for /dropbox-auth-finish def dropbox_auth_finish(web_app_session, request): try: oauth_result = \\ get_dropbox_auth_flow(web_app_session).finish( request.query_params) except BadRequestException, e: http_status(400) except BadStateException, e: # Start the auth flow again. redirect_to("/dropbox-auth-start") except CsrfException, e: http_status(403) except NotApprovedException, e: flash('Not approved? Why not?') return redirect_to("/home") except ProviderException, e: logger.log("Auth error: %s" % (e,)) http_status(403) """ def __init__(self, consumer_key, consumer_secret, redirect_uri, session, csrf_token_session_key, locale=None): """ Construct an instance. :param str consumer_key: Your API app's "app key". :param str consumer_secret: Your API app's "app secret". :param str redirect_uri: The URI that the Dropbox server will redirect the user to after the user finishes authorizing your app. This URI must be HTTPS-based and pre-registered with the Dropbox servers, though localhost URIs are allowed without pre-registration and can be either HTTP or HTTPS. :param dict session: A dict-like object that represents the current user's web session (will be used to save the CSRF token). :param str csrf_token_session_key: The key to use when storing the CSRF token in the session (for example: "dropbox-auth-csrf-token"). :param str locale: The locale of the user of your application. For example "en" or "en_US". Some API calls return localized data and error messages; this setting tells the server which locale to use. By default, the server uses "en_US". """ super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale) self.redirect_uri = redirect_uri self.session = session self.csrf_token_session_key = csrf_token_session_key def start(self, url_state=None): """ Starts the OAuth 2 authorization process. This function builds an "authorization URL". You should redirect your user's browser to this URL, which will give them an opportunity to grant your app access to their Dropbox account. When the user completes this process, they will be automatically redirected to the ``redirect_uri`` you passed in to the constructor. This function will also save a CSRF token to ``session[csrf_token_session_key]`` (as provided to the constructor). This CSRF token will be checked on :meth:`finish()` to prevent request forgery. :param str url_state: Any data that you would like to keep in the URL through the authorization process. This exact value will be returned to you by :meth:`finish()`. :return: The URL for a page on Dropbox's website. This page will let the user "approve" your app, which gives your app permission to access the user's Dropbox account. Tell the user to visit this URL and approve your app. """ csrf_token = base64.urlsafe_b64encode(os.urandom(16)).decode('ascii') state = csrf_token if url_state is not None: state += "|" + url_state self.session[self.csrf_token_session_key] = csrf_token return self._get_authorize_url(self.redirect_uri, state) def finish(self, query_params): """ Call this after the user has visited the authorize URL (see :meth:`start()`), approved your app and was redirected to your redirect URI. :param dict query_params: The query parameters on the GET request to your redirect URI. :rtype: OAuth2FlowResult :raises: :class:`BadRequestException` If the redirect URL was missing parameters or if the given parameters were not valid. :raises: :class:`BadStateException` If there's no CSRF token in the session. :raises: :class:`CsrfException` If the ``state`` query parameter doesn't contain the CSRF token from the user's session. :raises: :class:`NotApprovedException` If the user chose not to approve your app. :raises: :class:`ProviderException` If Dropbox redirected to your redirect URI with some unexpected error identifier and error message. """ # Check well-formedness of request. state = query_params.get('state') if state is None: raise BadRequestException("Missing query parameter 'state'.") error = query_params.get('error') error_description = query_params.get('error_description') code = query_params.get('code') if error is not None and code is not None: raise BadRequestException( "Query parameters 'code' and 'error' are both set; " "only one must be set.") if error is None and code is None: raise BadRequestException( "Neither query parameter 'code' or 'error' is set.") # Check CSRF token if self.csrf_token_session_key not in self.session: raise BadStateException('Missing CSRF token in session.') csrf_token_from_session = self.session[self.csrf_token_session_key] if len(csrf_token_from_session) <= 20: raise AssertionError('CSRF token unexpectedly short: %r' % csrf_token_from_session) split_pos = state.find('|') if split_pos < 0: given_csrf_token = state url_state = None else: given_csrf_token = state[0:split_pos] url_state = state[split_pos + 1:] if not _safe_equals(csrf_token_from_session, given_csrf_token): raise CsrfException('expected %r, got %r' % (csrf_token_from_session, given_csrf_token)) del self.session[self.csrf_token_session_key] # Check for error identifier if error is not None: if error == 'access_denied': # The user clicked "Deny" if error_description is None: raise NotApprovedException( 'No additional description from Dropbox') else: raise NotApprovedException( 'Additional description from Dropbox: %s' % error_description) else: # All other errors full_message = error if error_description is not None: full_message += ": " + error_description raise ProviderException(full_message) # If everything went ok, make the network call to get an access token. no_redirect_result = self._finish(code, self.redirect_uri) return OAuth2FlowResult.from_no_redirect_result( no_redirect_result, url_state) class BadRequestException(Exception): """ Thrown if the redirect URL was missing parameters or if the given parameters were not valid. The recommended action is to show an HTTP 400 error page. """ pass class BadStateException(Exception): """ Thrown if all the parameters are correct, but there's no CSRF token in the session. This probably means that the session expired. The recommended action is to redirect the user's browser to try the approval process again. """ pass class CsrfException(Exception): """ Thrown if the given 'state' parameter doesn't contain the CSRF token from the user's session. This is blocked to prevent CSRF attacks. The recommended action is to respond with an HTTP 403 error page. """ pass class NotApprovedException(Exception): """ The user chose not to approve your app. """ pass class ProviderException(Exception): """ Dropbox redirected to your redirect URI with some unexpected error identifier and error message. The recommended action is to log the error, tell the user something went wrong, and let them try again. """ pass def _safe_equals(a, b): if len(a) != len(b): return False res = 0 for ca, cb in zip(a, b): res |= ord(ca) ^ ord(cb) return res == 0 def _params_to_urlencoded(params): """ Returns a application/x-www-form-urlencoded ``str`` representing the key/value pairs in ``params``. Keys are values are ``str()``'d before calling ``urllib.urlencode``, with the exception of unicode objects which are utf8-encoded. """ def encode(o): if isinstance(o, six.binary_type): return o else: if isinstance(o, six.text_type): return o.encode('utf-8') else: return str(o).encode('utf-8') utf8_params = {encode(k): encode(v) for k, v in six.iteritems(params)} return url_encode(utf8_params)
mrquim/mrquimrepo
script.xbmcbackup/resources/lib/dropbox/oauth.py
Python
gpl-2.0
18,272
[ "VisIt" ]
36d380bd9046a0cd7483c2165be730eb9150fa88d193d124d6845785ae9559ae
# # AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers # Copyright (C) 2014 Dave Hocker # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # See the LICENSE file for more details. # # # DEPRECATED by v2019 redesign # Adapter pattern for the X10 controller. # # This class presents a consistent interface to the rest of the server application. # The actual controller driver to be used is injected by the server start up code. # Note that this adapter is treated as a singleton. The app only supports a singleton # controller at a time and that controller is represented by this adapter. # As a result, most of the methods/properties are class or static level. # # Add methods for X10 controller access. Use the AtHomeX10 app's driver as a model # of what methods are needed. # import logging logger = logging.getLogger("server") class X10ControllerAdapter: # Injection point for driver to be used to access X10 controller # This is a singleton instance of the driver to be used for all access Driver = None #************************************************************************ # Constructor def __init__(self): pass #************************************************************************ # Inject driver into the adapter # The main app must create an instance of a driver that implements the # X10ControllerInterface and call this method to inject the instance. @classmethod def InjectDriver(cls, driver): cls.Driver = driver logger.info("X10ControllerAdapter has been injected with driver: %s", str(driver)) #************************************************************************ # Open the injected driver @classmethod def Open(cls, driver): cls.Driver = driver logger.info("X10ControllerAdapter has been injected with driver: %s", str(driver)) return cls.Driver.open() #************************************************************************ # Close the singleton copy of the controller driver @classmethod def Close(cls): result = cls.Driver.close() logger.info("X10ControllerAdapter has been closed") return result @classmethod def GetLastErrorCode(cls): return cls.Driver.last_error_code @classmethod def GetLastError(cls): return cls.Driver.last_error #************************************************************************ # Turn a device on # house_device_code = Ex. 'A1' # dim_amount 0 <= v <= 22 @classmethod def DeviceOn(cls, house_device_code, dim_amount): logger.info("Device on: {} {}".format(house_device_code, dim_amount)) return cls.Driver.device_on(house_device_code, dim_amount) #************************************************************************ # Turn a device off # house_device_code = Ex. 'A1' # dim_amount 0 <= v <= 22 @classmethod def DeviceOff(cls, house_device_code, dim_amount): logger.info("Device off: {} {}".format(house_device_code, dim_amount)) return cls.Driver.device_off(house_device_code, dim_amount) #************************************************************************ # Dim a lamp module # house_device_code = Ex. 'A1' # dim_amount 0 <= v <= 22 @classmethod def DeviceDim(cls, house_device_code, dim_amount): logger.info("Device dim: {0} {1}".format(house_device_code, dim_amount)) return cls.Driver.device_dim(house_device_code, dim_amount) #************************************************************************ # Bright(en) a lamp module # house_device_code = Ex. 'A1' # dim_amount 0 <= v <= 22 @classmethod def DeviceBright(cls, house_device_code, bright_amount): logger.info("Device bright: {0} {1}".format(house_device_code, bright_amount)) return cls.Driver.device_bright(house_device_code, bright_amount) #************************************************************************ # Turn all units off @classmethod def DeviceAllUnitsOff(cls, house_code): logger.info("Device all units off for house code: {0}".format(house_code)) return cls.Driver.device_all_units_off(house_code) #************************************************************************ # Turn all lights off @classmethod def DeviceAllLightsOff(cls, house_code): logger.info("Device all lights off") return cls.Driver.device_all_lights_off(house_code) #************************************************************************ # Turn all lights on @classmethod def DeviceAllLightsOn(cls, house_code): logger.info("Device all lights on") return cls.Driver.device_all_lights_on(house_code)
dhocker/athomepowerlineserver
drivers/X10ControllerAdapter.py
Python
gpl-3.0
4,887
[ "xTB" ]
d883360244c933f68027b4d48ada23be49e6bb0ca7009032284d4e1d2d1010c0
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Test of Policy Engine For Neutron""" import mock from oslo_policy import fixture as op_fixture from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import importutils import neutron from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions from neutron import context from neutron import manager from neutron import policy from neutron.tests import base class PolicyFileTestCase(base.BaseTestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.Context('fake', 'fake', is_admin=False) self.target = {'tenant_id': 'fake'} def test_modified_policy_reloads(self): tmpfilename = self.get_temp_file_path('policy') action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") policy.refresh(policy_file=tmpfilename) policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") policy.refresh(policy_file=tmpfilename) self.target = {'tenant_id': 'fake_tenant'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) class PolicyTestCase(base.BaseTestCase): def setUp(self): super(PolicyTestCase, self).setUp() # NOTE(vish): preload rules to circumvent reloading from file rules = { "true": '@', "example:allowed": '@', "example:denied": '!', "example:get_http": "http:http://www.example.com", "example:my_file": "role:compute_admin or tenant_id:%(tenant_id)s", "example:early_and_fail": "! and @", "example:early_or_success": "@ or !", "example:lowercase_admin": "role:admin or role:sysadmin", "example:uppercase_admin": "role:ADMIN or role:sysadmin", } policy.refresh() # NOTE(vish): then overload underlying rules policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.Context('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_check_bad_action_noraise(self): action = "example:denied" result = policy.check(self.context, action, self.target) self.assertEqual(result, False) def test_check_non_existent_action(self): action = "example:idonotexist" result_1 = policy.check(self.context, action, self.target) self.assertFalse(result_1) result_2 = policy.check(self.context, action, self.target, might_not_exist=True) self.assertTrue(result_2) def test_enforce_good_action(self): action = "example:allowed" result = policy.enforce(self.context, action, self.target) self.assertTrue(result) def test_enforce_http_true(self): self.useFixture(op_fixture.HttpCheckFixture()) action = "example:get_http" target = {} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_http_false(self): self.useFixture(op_fixture.HttpCheckFixture(False)) action = "example:get_http" target = {} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def test_templatized_enforcement(self): target_mine = {'tenant_id': 'fake'} target_not_mine = {'tenant_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince) we mix case in the Admin role here to ensure # case is ignored admin_context = context.Context('admin', 'fake', roles=['AdMiN']) policy.enforce(admin_context, lowercase_action, self.target) policy.enforce(admin_context, uppercase_action, self.target) class DefaultPolicyTestCase(base.BaseTestCase): def setUp(self): super(DefaultPolicyTestCase, self).setUp() tmpfilename = self.get_temp_file_path('policy.json') self.rules = { "default": '', "example:exist": '!', } with open(tmpfilename, "w") as policyfile: jsonutils.dump(self.rules, policyfile) policy.refresh(policy_file=tmpfilename) self.context = context.Context('fake', 'fake') def test_policy_called(self): self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): policy.enforce(self.context, "example:noexist", {}) FAKE_RESOURCE_NAME = 'fake_resource' FAKE_SPECIAL_RESOURCE_NAME = 'fake_policy' FAKE_RESOURCES = {"%ss" % FAKE_RESOURCE_NAME: {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}, # special plural name "%s" % FAKE_SPECIAL_RESOURCE_NAME.replace('y', 'ies'): {'attr': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'enforce_policy': True, 'validate': {'type:dict': {'sub_attr_1': {'type:string': None}, 'sub_attr_2': {'type:string': None}}} }}} class NeutronPolicyTestCase(base.BaseTestCase): def fakepolicyinit(self, **kwargs): enf = policy._ENFORCER enf.set_rules(oslo_policy.Rules(self.rules)) def setUp(self): super(NeutronPolicyTestCase, self).setUp() policy.refresh() # Add Fake resources to RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES) self._set_rules() def remove_fake_resource(): del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] self.patcher = mock.patch.object(neutron.policy, 'init', new=self.fakepolicyinit) self.patcher.start() self.addCleanup(remove_fake_resource) self.context = context.Context('fake', 'fake', roles=['user']) plugin_klass = importutils.import_class( "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") self.manager_patcher = mock.patch('neutron.manager.NeutronManager') fake_manager = self.manager_patcher.start() fake_manager_instance = fake_manager.return_value fake_manager_instance.plugin = plugin_klass() def _set_rules(self, **kwargs): rules_dict = { "context_is_admin": "role:admin", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or " "tenant_id:%(network:tenant_id)s", "admin_or_owner": ("rule:context_is_admin or " "tenant_id:%(tenant_id)s"), "admin_only": "rule:context_is_admin", "regular_user": "role:user", "shared": "field:networks:shared=True", "external": "field:networks:router:external=True", "network_device": "field:port:device_owner=~^network:", "default": '@', "create_network": "rule:admin_or_owner", "create_network:shared": "rule:admin_only", "update_network": '@', "update_network:shared": "rule:admin_only", "get_network": "rule:admin_or_owner or rule:shared or " "rule:external or rule:context_is_advsvc", "create_subnet": "rule:admin_or_network_owner", "create_port:mac": "rule:admin_or_network_owner or " "rule:context_is_advsvc", "create_port:device_owner": "not rule:network_device", "update_port": "rule:admin_or_owner or rule:context_is_advsvc", "get_port": "rule:admin_or_owner or rule:context_is_advsvc", "delete_port": "rule:admin_or_owner or rule:context_is_advsvc", "create_fake_resource": "rule:admin_or_owner", "create_fake_resource:attr": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_1": "rule:admin_or_owner", "create_fake_resource:attr:sub_attr_2": "rule:admin_only", "create_fake_policy:": "rule:admin_or_owner", "get_firewall_policy": "rule:admin_or_owner or " "rule:shared", "get_firewall_rule": "rule:admin_or_owner or " "rule:shared", "insert_rule": "rule:admin_or_owner", "remove_rule": "rule:admin_or_owner", } rules_dict.update(**kwargs) self.rules = oslo_policy.Rules.from_dict(rules_dict) def test_firewall_policy_insert_rule_with_admin_context(self): action = "insert_rule" target = {} result = policy.check(context.get_admin_context(), action, target) self.assertTrue(result) def test_firewall_policy_insert_rule_with_owner(self): action = "insert_rule" target = {"tenant_id": "own_tenant"} user_context = context.Context('', "own_tenant", roles=['user']) result = policy.check(user_context, action, target) self.assertTrue(result) def test_firewall_policy_remove_rule_without_admin_or_owner(self): action = "remove_rule" target = {"firewall_rule_id": "rule_id", "tenant_id": "tenantA"} user_context = context.Context('', "another_tenant", roles=['user']) result = policy.check(user_context, action, target) self.assertFalse(result) def _test_action_on_attr(self, context, action, obj, attr, value, exception=None, **kwargs): action = "%s_%s" % (action, obj) target = {'tenant_id': 'the_owner', attr: value} if kwargs: target.update(kwargs) if exception: self.assertRaises(exception, policy.enforce, context, action, target) else: result = policy.enforce(context, action, target) self.assertTrue(result) def _test_nonadmin_action_on_attr(self, action, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user']) self._test_action_on_attr(user_context, action, "network", attr, value, exception, **kwargs) def _test_advsvc_action_on_attr(self, action, obj, attr, value, exception=None, **kwargs): user_context = context.Context('', "user", roles=['user', 'advsvc']) self._test_action_on_attr(user_context, action, obj, attr, value, exception, **kwargs) def test_nonadmin_write_on_private_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_private_fails(self): self._test_nonadmin_action_on_attr('get', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_write_on_shared_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', True, oslo_policy.PolicyNotAuthorized) def test_create_port_device_owner_regex(self): blocked_values = ('network:', 'network:abdef', 'network:dhcp', 'network:router_interface') for val in blocked_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val, oslo_policy.PolicyNotAuthorized ) ok_values = ('network', 'networks', 'my_network:test', 'my_network:') for val in ok_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val ) def test_advsvc_get_network_works(self): self._test_advsvc_action_on_attr('get', 'network', 'shared', False) def test_advsvc_create_network_fails(self): self._test_advsvc_action_on_attr('create', 'network', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_advsvc_create_port_works(self): self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False) def test_advsvc_get_port_works(self): self._test_advsvc_action_on_attr('get', 'port', 'shared', False) def test_advsvc_update_port_works(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_advsvc_action_on_attr('update', 'port', 'shared', True, **kwargs) def test_advsvc_delete_port_works(self): self._test_advsvc_action_on_attr('delete', 'port', 'shared', False) def test_advsvc_create_subnet_fails(self): self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False, oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_shared_succeeds(self): self._test_nonadmin_action_on_attr('get', 'shared', True) def test_check_is_admin_with_admin_context_succeeds(self): admin_context = context.get_admin_context() # explicitly set roles as this test verifies user credentials # with the policy engine admin_context.roles = ['admin'] self.assertTrue(policy.check_is_admin(admin_context)) def test_check_is_admin_with_user_context_fails(self): self.assertFalse(policy.check_is_admin(self.context)) def test_check_is_admin_with_no_admin_policy_fails(self): del self.rules[policy.ADMIN_CTX_POLICY] admin_context = context.get_admin_context() self.assertFalse(policy.check_is_admin(admin_context)) def test_check_is_advsvc_with_admin_context_fails(self): admin_context = context.get_admin_context() self.assertFalse(policy.check_is_advsvc(admin_context)) def test_check_is_advsvc_with_svc_context_succeeds(self): svc_context = context.Context('', 'svc', roles=['advsvc']) self.assertTrue(policy.check_is_advsvc(svc_context)) def test_check_is_advsvc_with_no_advsvc_policy_fails(self): del self.rules[policy.ADVSVC_CTX_POLICY] svc_context = context.Context('', 'svc', roles=['advsvc']) self.assertFalse(policy.check_is_advsvc(svc_context)) def test_check_is_advsvc_with_user_context_fails(self): self.assertFalse(policy.check_is_advsvc(self.context)) def _test_enforce_adminonly_attribute(self, action, **kwargs): admin_context = context.get_admin_context() target = {'shared': True} if kwargs: target.update(kwargs) result = policy.enforce(admin_context, action, target) self.assertTrue(result) def test_enforce_adminonly_attribute_create(self): self._test_enforce_adminonly_attribute('create_network') def test_enforce_adminonly_attribute_update(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_enforce_adminonly_attribute('update_network', **kwargs) def test_reset_adminonly_attr_to_default_fails(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_nonadmin_action_on_attr('update', 'shared', False, oslo_policy.PolicyNotAuthorized, **kwargs) def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def _test_build_subattribute_match_rule(self, validate_value): bk = FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = ( validate_value) action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} self.assertFalse(policy._build_subattr_match_rule( 'attr', FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr'], action, target)) FAKE_RESOURCES['%ss' % FAKE_RESOURCE_NAME]['attr']['validate'] = bk def test_build_subattribute_match_rule_empty_dict_validator(self): self._test_build_subattribute_match_rule({}) def test_build_subattribute_match_rule_wrong_validation_info(self): self._test_build_subattribute_match_rule( {'type:dict': 'wrong_stuff'}) def test_build_match_rule_special_pluralized(self): action = "create_" + FAKE_SPECIAL_RESOURCE_NAME pluralized = "create_fake_policies" target = {} result = policy._build_match_rule(action, target, pluralized) self.assertEqual("rule:" + action, str(result)) def test_build_match_rule_normal_pluralized_when_create(self): action = "create_" + FAKE_RESOURCE_NAME target = {} result = policy._build_match_rule(action, target, None) self.assertEqual("rule:" + action, str(result)) def test_enforce_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x'}} result = policy.enforce(self.context, action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} result = policy.enforce(context.get_admin_context(), action, target, None) self.assertTrue(result) def test_enforce_admin_only_subattribute_nonadminctx_returns_403(self): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_enforce_regularuser_on_read(self): action = "get_network" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_policy_shared(self): action = "get_firewall_policy" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_firewall_rule_shared(self): action = "get_firewall_rule" target = {'shared': True, 'tenant_id': 'somebody_else'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check(self): # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_tenant_id_check_parent_resource(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_enforce_plugin_failure(self): def fakegetnetwork(*args, **kwargs): raise NotImplementedError('Blast!') # the policy check and plugin method we use in this test are irrelevant # so long that we verify that, if *f* blows up, the behavior of the # policy engine to propagate the exception is preserved action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} self.assertRaises(NotImplementedError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_parent_resource_bw_compatibility(self): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} self._set_rules( admin_or_network_owner="role:admin or " "tenant_id:%(network_tenant_id)s") action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): target = {'network_id': 'whatever'} result = policy.enforce(self.context, action, target) self.assertTrue(result) def test_tenant_id_check_no_target_field_raises(self): # Try and add a bad rule self.assertRaises( exceptions.PolicyInitError, oslo_policy.Rules.from_dict, {'test_policy': 'tenant_id:(wrong_stuff)'}) def _test_enforce_tenant_id_raises(self, bad_rule): self._set_rules(admin_or_owner=bad_rule) # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} self.fakepolicyinit() self.assertRaises(exceptions.PolicyCheckError, policy.enforce, self.context, action, target) def test_enforce_tenant_id_check_malformed_target_field_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(malformed_field)s') def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') def test_process_rules(self): action = "create_" + FAKE_RESOURCE_NAME # Construct RuleChecks for an action, attribute and subattribute match_rule = oslo_policy.RuleCheck('rule', action) attr_rule = oslo_policy.RuleCheck( 'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME)) sub_attr_rules = [oslo_policy.RuleCheck( 'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))] # Build an AndCheck from the given RuleChecks # Make the checks nested to better check the recursion sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules) attr_rule = oslo_policy.AndCheck( [attr_rule, sub_attr_rules]) match_rule = oslo_policy.AndCheck([match_rule, attr_rule]) # Assert that the rules are correctly extracted from the match_rule rules = policy._process_rules_list([], match_rule) self.assertEqual(['create_fake_resource', 'create_fake_resource:fake_resources', 'create_fake_resource:attr:sub_attr_1'], rules) @mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True) @mock.patch.object(policy.LOG, 'debug') def test_log_rule_list(self, mock_debug, mock_is_e): policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_')) self.assertTrue(mock_is_e.called) self.assertTrue(mock_debug.called)
yanheven/neutron
neutron/tests/unit/test_policy.py
Python
apache-2.0
26,323
[ "BLAST" ]
388d7e82d9d3aed253196a52b4c61b7802ba60f0c4c0176df86bb20f5fb7c796
# Copyright (C) 2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest as ut import importlib_wrapper import numpy as np tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import( "@TUTORIALS_DIR@/ferrofluid/ferrofluid_part1.py", EQUIL_STEPS=200, EQUIL_ROUNDS=10) @skipIfMissingFeatures class Tutorial(ut.TestCase): system = tutorial.system def test(self): self.assertEqual( int(np.sum(tutorial.n_clusters)), len(tutorial.cluster_sizes)) for i in range(8): self.assertLess( tutorial.size_dist[0][i + 1], tutorial.size_dist[0][i]) if __name__ == "__main__": ut.main()
fweik/espresso
testsuite/scripts/tutorials/test_ferrofluid_1.py
Python
gpl-3.0
1,332
[ "ESPResSo" ]
87220fafa47da492171107851052bb5450eb7e15e90742ed69f8456b0646d8dc
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on July 31 09:51:20 2014 @author: Mathieu Garon @email: mathieugaron91@gmail.com """ import roslib; roslib.load_manifest('picam_tracker_py') import rospy from sensor_msgs.msg import Image import std_srvs.srv import cv2 import picamera import wiringpi2 as gpio import os import io import numpy as np import threading import time import image_processor as proc import sharedGlobals as sg import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt class picam_tester: def __init__(self): try: self.picam = picamera.PiCamera() except: rospy.logfatal("Check if the Picam is free or installed") rospy.Service('test_camera',std_srvs.srv.Empty,self.test_cb) rospy.Service('gaussian_background_modeling',std_srvs.srv.Empty,self.gauss_BG_model_cb) rospy.Service('correlation',std_srvs.srv.Empty,self.correlation_cb) self._init_picamera() self._init_led() #datas: self.mu = [] self.sig = [] self._flash_led(nflash=4) rospy.loginfo("Picam_tester ready...") rospy.spin() def __del__(self): self.picam.close() def correlation_cb(self,req): rospy.loginfo("Correlation") w=1296 h=730 d=50 template_signal =np.array([1,1,0,0,1,1,0,0,1,1,1,1,0,0,0,0]) with sg.VAR_LOCK: sg.CORR_DATA = proc.rectangleBuffers(25,len(template_signal),[[250,200,200,200]]) #[[500,200,300,300]]) corr_thread = proc.TimeCorrelation(template_signal) video_fps = self._process_video(proc.Correlation,w,h,d,processors = 3) self._get_chunk_time() with sg.VAR_LOCK: corr_thread.terminated = True corr_thread.join() self._empty_pool() self._reset_globals() #self._save_video(filename='correlation.avi',fps=video_fps) rospy.loginfo("Save Plot") plt.plot(sg.PLOT) plt.plot(sg.PLOT2) plt.savefig('/home/CameraNetwork/July/plot.jpg') cv2.imwrite('/home/CameraNetwork/July/image.jpg',sg.PICTURE) rospy.loginfo("Ending service.") return [] def gauss_BG_model_cb(self,req): rospy.loginfo("Background Gaussian Modeling") self._process_video(proc.GrayFrameCapture) with sg.VAR_LOCK: self._empty_pool() self._reset_globals() Matrix = sg.VIDEO_MATRIX[0:15].astype(np.uint16) d,h,w = Matrix.shape rospy.loginfo("Starting modelisation") self.mu = sum(Matrix)/d std = Matrix-self.mu self.sig = np.sqrt(sum(std*std)/d) rospy.loginfo("normfit done...") sg.MU = self.mu.astype(np.float32) sg.SIG = self.sig.astype(np.float32) #substract part: rospy.loginfo("Starting background substraction...") video_fps = self._process_video(proc.BackgroundSubstraction,depth=30,processors=2) self._get_chunk_time() with sg.VAR_LOCK: self._empty_pool() self._reset_globals() self._save_video(filename='bg_substraction.avi',fps=video_fps) rospy.loginfo("Ending service.") return [] def test_cb(self,req): rospy.loginfo("Begin Tests!") video_fps = self._process_video(proc.TestImageProcessor) totalTime = 0 self._get_chunk_time() with sg.VAR_LOCK: self._empty_pool(); self._reset_globals() self.save_video(fps=video_fps) rospy.loginfo("Ending service.") return [] def _process_video(self,procClass,width=1296,heigth=730,depth=50, processors=4): gpio.digitalWrite(self.led,True) with sg.VAR_LOCK: #yuv : convert width and height to fit with yuv format sg._WIDTH = (width+31)//32*32 sg._HEIGTH = (heigth+15)//16*16 sg._DEPTH = depth sg.VIDEO_MATRIX = np.zeros([sg._DEPTH + 1,sg._HEIGTH,sg._WIDTH],np.uint8) sg.POOL = [procClass() for i in range(processors)] self.picam.resolution = (width,heigth) self.picam.framerate = 90 rospy.sleep(1) startTime = rospy.get_rostime() self.picam.capture_sequence(proc.streams(),'yuv',use_video_port=True) gpio.digitalWrite(self.led,False) deltaTime = rospy.get_rostime() - startTime fps = depth/deltaTime.to_sec() rospy.loginfo("Capture : " + str(fps) + " fps.") return fps def _save_video(self,filename='test.avi',fps=20): rospy.loginfo("Saving Video...") filename = '/home/CameraNetwork/July/' + filename video =cv2.VideoWriter(filename,cv2.cv.CV_FOURCC('M','J','P','G'),fps, (sg._WIDTH,sg._HEIGTH),isColor = False) for i in sg.VIDEO_MATRIX: video.write(i) video.release() def _empty_pool(self): rospy.loginfo("Terminating Threads...") while sg.POOL: processor = sg.POOL.pop() processor.terminated = True processor.join() def _get_chunk_time(self): totalTime = 0 for i in sg.TIME_LIST: totalTime += i.to_sec() rospy.loginfo("the chunk takes " + str(totalTime/len(sg.TIME_LIST)) + " sec") def _reset_globals(self): sg.POOL = [] sg.FRAME_COUNT = 0 sg.DONE = False sg.TIME_LIST = [] #proc.VIDEO_MATRIX = np.zeros([_DEPTH + 1,_HEIGTH,_WIDTH],np.uint8) def _init_picamera(self): self.picam.exposure_mode = 'fixedfps' self.picam.awb_mode = 'off' self.picam.awb_gains = 1.4 #self.picam.resolution = (1296,972) #self.picam.framerate = 40 def _init_led(self): self.led = 5 os.system("gpio export " + str(self.led) + " out") if gpio.wiringPiSetupSys() != 0: rospy.logfatal("Unable to setup gpio") gpio.digitalWrite(self.led,False) def _flash_led(self, nflash=1, delay=0.1): for n in range(nflash): gpio.digitalWrite(self.led,True) rospy.sleep(delay) gpio.digitalWrite(self.led,False) rospy.sleep(delay) if __name__ == "__main__": rospy.init_node('picam_tester') server = picam_tester();
iamblusky/Led_Tracking
src/picam_tracker_py/scripts/picam_timer.py
Python
bsd-2-clause
6,348
[ "Gaussian" ]
7416afeae7bedd17c9e5aaea54c6981f63156b56fccc7607b0252207384bb3d4
from jobman import DD, expand, flatten import pynet.layer as layer from pynet.model import * from pynet.layer import * from pynet.datasets.mnist import Mnist, Mnist_Blocks import pynet.datasets.spec as spec import pynet.datasets.mnist as mnist import pynet.datasets.transfactor as tf import pynet.datasets.mapping as mapping import pynet.learning_method as learning_methods from pynet.learning_rule import LearningRule from pynet.log import Log from pynet.train_object import TrainObject from pynet.cost import Cost import pynet.datasets.preprocessor as preproc import pynet.datasets.dataset_noise as noisy import pynet.layer_noise as layer_noise import cPickle import os from hps.models.model import NeuralNet import theano from theano.sandbox.cuda.var import CudaNdarraySharedVariable floatX = theano.config.floatX class RecSys(NeuralNet): def __init__(self, state): self.state = state def run(self): dataset = self.build_dataset() learning_rule = self.build_learning_rule() model = self.build_model(dataset) learn_method = self.build_learning_method() database = self.build_database(dataset, learning_rule, learn_method, model) log = self.build_log(database) train_obj = TrainObject(log = log, dataset = dataset, learning_rule = learning_rule, learning_method = learn_method, model = model) train_obj.run() # log.info("fine tuning") # for layer in train_obj.model.layers: # layer.dropout_below = None # layer.noise = None # train_obj.setup() # train_obj.run() def build_layer(self, dataset, layer_name): output_noise = None if layer_name.layer_noise.type is None else \ getattr(layer_noise, layer_name.layer_noise.type)() if layer_name.layer_noise.type in ['BlackOut', 'MaskOut', 'BatchOut']: output_noise.ratio = layer_name.layer_noise.ratio elif layer_name.layer_noise.type is 'Gaussian': output_noise.std = layer_name.layer_noise.std output_noise.mean = layer_name.layer_noise.mean output = getattr(layer, layer_name.type)(dim=layer_name.dim, name=layer_name.name, dropout_below=layer_name.dropout_below, noise=output_noise) return output def build_model(self, dataset): model = MLP(input_dim=dataset.feature_size(), rand_seed=self.state.model.rand_seed) hidden1 = self.build_layer(dataset, self.state.hidden1) hidden2 = self.build_layer(dataset, self.state.hidden2) output = self.build_layer(dataset, self.state.output) model.add_layer(hidden1) model.add_layer(hidden2) model.add_layer(output) return model
hycis/Pynet
hps/models/RecSys.py
Python
apache-2.0
3,023
[ "Gaussian" ]
629f7d291c14e4b8928e68644ba6f720e77d69d12b521fbf1ecc700a024e2472
import os import warnings import numpy as np from .. import DataArray from ..core import indexing from ..core.utils import is_scalar from .common import BackendArray from .file_manager import CachingFileManager from .locks import SerializableLock # TODO: should this be GDAL_LOCK instead? RASTERIO_LOCK = SerializableLock() _ERROR_MSG = ( "The kind of indexing operation you are trying to do is not " "valid on rasterio files. Try to load your data with ds.load()" "first." ) class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" def __init__(self, manager, lock, vrt_params=None): from rasterio.vrt import WarpedVRT self.manager = manager self.lock = lock # cannot save riods as an attribute: this would break pickleability riods = manager.acquire() if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) self.vrt_params = vrt_params self._shape = (riods.count, riods.height, riods.width) dtypes = riods.dtypes if not np.all(np.asarray(dtypes) == dtypes[0]): raise ValueError("All bands should have the same dtype") self._dtype = np.dtype(dtypes[0]) @property def dtype(self): return self._dtype @property def shape(self): return self._shape def _get_indexer(self, key): """ Get indexer for rasterio array. Parameter --------- key: tuple of int Returns ------- band_key: an indexer for the 1st dimension window: two tuples. Each consists of (start, stop). squeeze_axis: axes to be squeezed np_ind: indexer for loaded numpy array See also -------- indexing.decompose_indexer """ assert len(key) == 3, "rasterio datasets should always be 3D" # bands cannot be windowed but they can be listed band_key = key[0] np_inds = [] # bands (axis=0) cannot be windowed but they can be listed if isinstance(band_key, slice): start, stop, step = band_key.indices(self.shape[0]) band_key = np.arange(start, stop, step) # be sure we give out a list band_key = (np.asarray(band_key) + 1).tolist() if isinstance(band_key, list): # if band_key is not a scalar np_inds.append(slice(None)) # but other dims can only be windowed window = [] squeeze_axis = [] for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])): if isinstance(k, slice): # step is always positive. see indexing.decompose_indexer start, stop, step = k.indices(n) np_inds.append(slice(None, None, step)) elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later squeeze_axis.append(-(2 - i)) start = k stop = k + 1 else: start, stop = np.min(k), np.max(k) + 1 np_inds.append(k - start) window.append((start, stop)) if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray): # do outer-style indexing np_inds[-2:] = np.ix_(*np_inds[-2:]) return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds) def _getitem(self, key): from rasterio.vrt import WarpedVRT band_key, window, squeeze_axis, np_inds = self._get_indexer(key) if not band_key or any(start == stop for (start, stop) in window): # no need to do IO shape = (len(band_key),) + tuple(stop - start for (start, stop) in window) out = np.zeros(shape, dtype=self.dtype) else: with self.lock: riods = self.manager.acquire(needs_lock=False) if self.vrt_params is not None: riods = WarpedVRT(riods, **self.vrt_params) out = riods.read(band_key, window=window) if squeeze_axis: out = np.squeeze(out, axis=squeeze_axis) return out[np_inds] def __getitem__(self, key): return indexing.explicit_indexing_adapter( key, self.shape, indexing.IndexingSupport.OUTER, self._getitem ) def _parse_envi(meta): """Parse ENVI metadata into Python data structures. See the link for information on the ENVI header file format: http://www.harrisgeospatial.com/docs/enviheaderfiles.html Parameters ---------- meta : dict Dictionary of keys and str values to parse, as returned by the rasterio tags(ns='ENVI') call. Returns ------- parsed_meta : dict Dictionary containing the original keys and the parsed values """ def parsevec(s): return np.fromstring(s.strip("{}"), dtype="float", sep=",") def default(s): return s.strip("{}") parse = {"wavelength": parsevec, "fwhm": parsevec} parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()} return parsed_meta def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None): """Open a file with rasterio (experimental). This should work with any file that rasterio can open (most often: geoTIFF). The x and y coordinates are generated automatically from the file's geoinformation, shifted to the center of each pixel (see `"PixelIsArea" Raster Space <http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_ for more information). You can generate 2D coordinates from the file's attributes with:: from affine import Affine da = xr.open_rasterio('path_to_file.tif') transform = Affine.from_gdal(*da.attrs['transform']) nx, ny = da.sizes['x'], da.sizes['y'] x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform Parameters ---------- filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT Path to the file to open. Or already open rasterio dataset. parse_coordinates : bool, optional Whether to parse the x and y coordinates out of the file's ``transform`` attribute or not. The default is to automatically parse the coordinates only if they are rectilinear (1D). It can be useful to set ``parse_coordinates=False`` if your files are very large or if you don't need the coordinates. chunks : int, tuple or dict, optional Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new DataArray into a dask array. cache : bool, optional If True, cache data loaded from the underlying datastore in memory as NumPy arrays when accessed to avoid reading from the underlying data- store multiple times. Defaults to True unless you specify the `chunks` argument to use dask, in which case it defaults to False. lock : False, True or threading.Lock, optional If chunks is provided, this argument is passed on to :py:func:`dask.array.from_array`. By default, a global lock is used to avoid issues with concurrent access to the same file when using dask's multithreaded backend. Returns ------- data : DataArray The newly created DataArray. """ import rasterio from rasterio.vrt import WarpedVRT vrt_params = None if isinstance(filename, rasterio.io.DatasetReader): filename = filename.name elif isinstance(filename, rasterio.vrt.WarpedVRT): vrt = filename filename = vrt.src_dataset.name vrt_params = dict( crs=vrt.crs.to_string(), resampling=vrt.resampling, src_nodata=vrt.src_nodata, dst_nodata=vrt.dst_nodata, tolerance=vrt.tolerance, transform=vrt.transform, width=vrt.width, height=vrt.height, warp_extras=vrt.warp_extras, ) if lock is None: lock = RASTERIO_LOCK manager = CachingFileManager(rasterio.open, filename, lock=lock, mode="r") riods = manager.acquire() if vrt_params is not None: riods = WarpedVRT(riods, **vrt_params) if cache is None: cache = chunks is None coords = {} # Get bands if riods.count < 1: raise ValueError("Unknown dims") coords["band"] = np.asarray(riods.indexes) # Get coordinates if riods.transform.is_rectilinear: # 1d coordinates parse = True if parse_coordinates is None else parse_coordinates if parse: nx, ny = riods.width, riods.height # xarray coordinates are pixel centered x, _ = riods.transform * (np.arange(nx) + 0.5, np.zeros(nx) + 0.5) _, y = riods.transform * (np.zeros(ny) + 0.5, np.arange(ny) + 0.5) coords["y"] = y coords["x"] = x else: # 2d coordinates parse = False if (parse_coordinates is None) else parse_coordinates if parse: warnings.warn( "The file coordinates' transformation isn't " "rectilinear: xarray won't parse the coordinates " "in this case. Set `parse_coordinates=False` to " "suppress this warning.", RuntimeWarning, stacklevel=3, ) # Attributes attrs = {} # Affine transformation matrix (always available) # This describes coefficients mapping pixel coordinates to CRS # For serialization store as tuple of 6 floats, the last row being # always (0, 0, 1) per definition (see # https://github.com/sgillies/affine) attrs["transform"] = tuple(riods.transform)[:6] if hasattr(riods, "crs") and riods.crs: # CRS is a dict-like object specific to rasterio # If CRS is not None, we convert it back to a PROJ4 string using # rasterio itself try: attrs["crs"] = riods.crs.to_proj4() except AttributeError: attrs["crs"] = riods.crs.to_string() if hasattr(riods, "res"): # (width, height) tuple of pixels in units of CRS attrs["res"] = riods.res if hasattr(riods, "is_tiled"): # Is the TIF tiled? (bool) # We cast it to an int for netCDF compatibility attrs["is_tiled"] = np.uint8(riods.is_tiled) if hasattr(riods, "nodatavals"): # The nodata values for the raster bands attrs["nodatavals"] = tuple( np.nan if nodataval is None else nodataval for nodataval in riods.nodatavals ) if hasattr(riods, "scales"): # The scale values for the raster bands attrs["scales"] = riods.scales if hasattr(riods, "offsets"): # The offset values for the raster bands attrs["offsets"] = riods.offsets if hasattr(riods, "descriptions") and any(riods.descriptions): # Descriptions for each dataset band attrs["descriptions"] = riods.descriptions if hasattr(riods, "units") and any(riods.units): # A list of units string for each dataset band attrs["units"] = riods.units # Parse extra metadata from tags, if supported parsers = {"ENVI": _parse_envi, "GTiff": lambda m: m} driver = riods.driver if driver in parsers: if driver == "GTiff": meta = parsers[driver](riods.tags()) else: meta = parsers[driver](riods.tags(ns=driver)) for k, v in meta.items(): # Add values as coordinates if they match the band count, # as attributes otherwise if isinstance(v, (list, np.ndarray)) and len(v) == riods.count: coords[k] = ("band", np.asarray(v)) else: attrs[k] = v data = indexing.LazilyOuterIndexedArray( RasterioArrayWrapper(manager, lock, vrt_params) ) # this lets you write arrays loaded with rasterio data = indexing.CopyOnWriteArray(data) if cache and chunks is None: data = indexing.MemoryCachedArray(data) result = DataArray(data=data, dims=("band", "y", "x"), coords=coords, attrs=attrs) if chunks is not None: from dask.base import tokenize # augment the token with the file modification time try: mtime = os.path.getmtime(filename) except OSError: # the filename is probably an s3 bucket rather than a regular file mtime = None token = tokenize(filename, mtime, chunks) name_prefix = "open_rasterio-%s" % token result = result.chunk(chunks, name_prefix=name_prefix, token=token) # Make the file closeable result._file_obj = manager return result
jhamman/xarray
xarray/backends/rasterio_.py
Python
apache-2.0
12,938
[ "NetCDF" ]
817bc51d27b007fc96a7793826c2a3adba364381a717c51a8a60c0cf544cbc15
# -*- coding: UTF-8 -*- # Copyright (C) 2016 Sylvain Taverne <taverne.sylvain@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Import from standard library from subprocess import Popen # Import from itools from itools.fs.vfs import Folder from itools.uri import get_uri_name, Path class GulpBuilder(object): """ Run "gulp build" in project's repository & add generated files $ ui/{SKINS}/* into the project MANIFEST file. That allow to avoid commit compiled JS/CSS files into GIT. """ def __init__(self, worktree, manifest): self.worktree = worktree self.manifest = manifest self.vfs = Folder('.') if self.vfs.is_folder('ui/'): self.dist_folders = tuple(['ui/{0}'.format(x) for x in Folder('ui/').get_names()]) def run(self): npm_done = self.launch_npm_install() gulp_done = self.launch_gulp_build() # Add DIST files into manifest if npm_done or gulp_done: for path in self.vfs.traverse('ui/'): relative_path = self.vfs.get_relative_path(path) if (relative_path and relative_path.startswith(self.dist_folders) and self.vfs.is_file(path)): self.manifest.add(relative_path) def launch_npm_install(self): done = False for path in self.manifest: filename = get_uri_name(path) if filename == 'package.json': print '***'*25 print '*** Run $ npm install on ', path print '***'*25 path = str(Path(path)[:-1]) + '/' p = Popen(['npm', 'install'], cwd=path) p.wait() done = True return done def launch_gulp_build(self): done = False for path in self.manifest: filename = get_uri_name(path) if filename == 'package.json': print '***'*25 print '*** Run $ gulp build on ', path print '***'*25 path = str(Path(path)[:-1]) + '/' p = Popen(['gulp', 'build'], cwd=path) p.wait() done = True return done
nicolasderam/itools
itools/pkg/build_gulp.py
Python
gpl-3.0
2,833
[ "GULP" ]
aeca73d821aa0ca78307db38d075d33b77705a9d9e385987a562b9bdd3d7b308
#!/usr/bin/env python """Tests for Genetic Algorithm Repair code. This tests classes which are designed for repairing organisms after mutation and crossover. """ # standard library import unittest # biopython from Bio.Alphabet import Alphabet from Bio.Seq import MutableSeq # local stuff from Bio.NeuralNetwork.Gene.Schema import Schema from Bio.GA.Organism import Organism from Bio.GA.Repair.Stabilizing import AmbiguousRepair class TestAlphabet(Alphabet): """Simple test alphabet. """ alphabet_matches = {"1": "1", "2": "2", "3": "3", "*": "123"} letters = ["1", "2", "3", "*"] def test_fitness(genome): """Simple class for calculating fitnesses. """ return 1 class AmbiguousRepairTest(unittest.TestCase): """Test for the ability to repair too many ambiguous genes in a genome. """ def setUp(self): alphabet = TestAlphabet() test_genome = MutableSeq("11*22*33*", alphabet) self.organism = Organism(test_genome, test_fitness) self.ambig_info = Schema(alphabet.alphabet_matches) def test_single_repair(self): """Test repair of a single ambiguous position in a genome. """ repairer = AmbiguousRepair(self.ambig_info, 2) for repair_attempt in range(5): new_org = repairer.repair(self.organism) new_genome_seq = new_org.genome.toseq() assert new_genome_seq.count("*") == 2, \ "Did not repair genome, got %s" % str(new_genome_seq) def test_multiple_repair(self): """Test repair of multiple ambiguous positions in a genome. """ repairer = AmbiguousRepair(self.ambig_info, 0) for repair_attempt in range(5): new_org = repairer.repair(self.organism) new_genome_seq = new_org.genome.toseq() assert new_genome_seq.count("*") == 0, \ "Did not repair genome, got %s" % str(new_genome_seq) if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity=2) unittest.main(testRunner=runner)
updownlife/multipleK
dependencies/biopython-1.65/Tests/test_GARepair.py
Python
gpl-2.0
2,135
[ "Biopython" ]
50658461bf764ad2a86f2e7f574e8d5b6914d4a03790b56b7dadd58bffdf961c
import sys import time as tm import numpy as np import ConfigParser def getlist(option, sep=',', chars=None): """Return a list from a ConfigParser option. By default, split on a comma and strip whitespaces.""" return [float(chunk.strip(chars)) for chunk in option.split(sep)] class dnf: def __init__(self, configFileName): config = ConfigParser.RawConfigParser() config.read(configFileName) self.n = config.getint('Model', 'numNeurons') self.m, self.k = self.n//6, 5*self.n//6 self.x_inf = config.getfloat('Model', 'x_inf') self.x_sup = config.getfloat('Model', 'x_sup') self.tau1 = config.getfloat('Model', 'tau1') self.tau2 = config.getfloat('Model', 'tau2') self.axonalVelGS = config.getfloat('Model', 'axonalvelocityGS') self.axonalVelSG = config.getfloat('Model', 'axonalvelocitySG') self.axonalVelGG = config.getfloat('Model', 'axonalvelocityGG') self.K = getlist(config.get('Model', 'SynapticStrength')) self.S = getlist(config.get('Model', 'SynapticVariance')) self.Wcx = config.getfloat('Model', 'synapticstrengthCx') self.Wstr = config.getfloat('Model', 'synapticstrengthStr') self.switch = config.getint('DBS', 'switch') self.Kc = config.getfloat('DBS', 'Kc') self.tau = config.getint('DBS', 'tau') self.l = (self.x_sup - self.x_inf) self.dx = self.l/float(self.n) self.mean = self.l/2 self.norm() self.printParams() def printParams(self): print 'Model Parameters' print '----------------' print "Number of neurons: {}".format(self.n) print "Domain Omega: [{}, {}]".format(self.x_inf, self.x_sup) print "Length of domain: {}".format(self.l) print "Synaptic decay times: {} and {}".format(self.tau1, self.tau2) print "Axonal transmission velocity G->S: {}".format(self.axonalVelGS) print "Axonal transmission velocity S->G: {}".format(self.axonalVelSG) print "Axonal transmission velocity G->G: {}".format(self.axonalVelGG) print "Synaptic stregnths: {}, {}, {}".format(self.K[0], self.K[1], self.K[2]) print "Synaptic variances: {}, {}, {}".format(self.S[0], self.S[1], self.S[2]) print 'DBS Parameters' print '--------------' print "DBS Gain (Kc): {}".format(self.Kc) print "DBS control signal time constant: {}".format(self.tau) print "-------------------------------------------------------------" def S1(self, x): """ Sigmoid function of population #1 """ # return 1.0/(1.0 + np.exp(-x)) - 0.5 return 0.3/(1 + np.exp(-4.*x/0.3) * 283./17.) def S2(self, x): """ Sigmoid function of population #2 """ # return 1.0/(1.0 + np.exp(-x)) - 0.5 return 0.4/(1 + np.exp(-4.*x/0.4) * 325./75.) def G1(self, x): """ Centered at zero, sigmoid function """ return self.S1(x) - self.S1(0) def G2(self, x): """ Centered at zero, sigmoid function """ return self.S2(x) - self.S2(0) def gaussian(self, x, sigma=1.0): ''' Gaussian function ''' return (1.0/(np.sqrt(2*np.pi)*sigma))*np.exp(-.5*(x/sigma)**2) # return np.exp(-0.5*(x/sigma)**2) def g(self, x, sigma): return np.exp(-.5*(x/sigma)**2) def build_distances(self, nodes, mean, x_inf, x_sup): """ Computes all the possible distances between units """ X, Y = np.meshgrid(np.linspace(x_inf, x_sup, nodes), np.linspace(x_inf, x_sup, nodes)) D = abs((X-mean) - (Y-mean)) return D def norm(self): """ Computes the norms """ N = 5000 dx = self.l/float(N) d = self.build_distances(N, 0.0, 0.0, 1.0) norm = np.zeros((len(self.K), )) for i in range(len(self.K)): tmp = (self.K[i] * self.g(d, self.S[i]))**2 norm[i] = tmp.sum() * dx * dx print "Norm W22: {}".format(norm[2]) def build_kernels(self): """ Build the synaptic connectivity matrices """ n = self.n # Compute all the possible distances dist = [self.build_distances(n, 0.917, 0.0, 1.0), self.build_distances(n, 0.083, 0.0, 1.0), self.build_distances(n, 0.912, 0.83, 1.0)] # Create a temporary vector containing gaussians g = np.empty((len(self.K), n, n)) for j in range(len(self.K)): for i in range(n): # g[j, i] = self.K[j] * self.gaussian(dist[i], self.S[j]) g[j, i] = self.K[j] * self.g(dist[j][i], self.S[j]) g[j, self.m:self.k] = 0.0 # GPe to STN connections W12 = np.zeros((n, n)) W12[:self.m, self.k:] = g[0, self.k:, self.k:] # STN to GPe connections W21 = np.zeros((n, n)) W21[self.k:, :self.m] = g[1, :self.m, :self.m] # GPe to GPe connections W22 = np.zeros((n, n)) W22[self.k:, self.k:] = g[2, self.k:, self.k:] np.fill_diagonal(W22, 0.0) return W12, W21, W22, dist def optogenetics_failure(self, x, percent): idx = np.random.choice(np.arange(0, x.shape[0], 1), percent, replace=False) x[idx] = 0.0 return sum(1 for i in x.flatten() if i == 0) def initial_conditions(self, time): """ Set the initial conditions """ n = self.n self.X1 = np.zeros((time, n)) self.X2 = np.zeros((time, n)) def run(self, tf, dt, percentage): np.random.seed(62) """ Run a simulation """ n, m, k = self.n, self.m, self.k # Total simulation time simTime = int(tf/dt) # Returns the three synaptic connections kernels W12, W21, W22, delays = self.build_kernels() # Compute delays by dividing distances by axonal velocity delays12 = np.floor(delays[0]/self.axonalVelGS) delays21 = np.floor(delays[1]/self.axonalVelSG) delays22 = np.floor(delays[2]/self.axonalVelGG) maxDelay = int(max(delays12[0].max(), delays21[0].max(), delays22[0].max())) # Set the initial conditions and the history self.initial_conditions(simTime) # Initialize the cortical and striatal inputs Cx = 0.026 * self.Wcx Str = 0.002 * self.Wstr # Presynaptic activities pre12, pre21, pre22 = np.empty((m,)), np.empty((m,)), np.empty((m,)) # DBS signals # A is a gaussian that defines spatialy the stimulation zone x = np.linspace(self.x_inf, self.x_sup, n) tmp = self.g(x-0.5, .09) A = np.zeros((m, )) A = tmp[(n-10)//2:(n+10)//2] zeros = self.optogenetics_failure(A, percentage) # Xref is the reference signal Xref = np.zeros((m, )) # Simulation U, U_ = np.zeros((m, )), np.zeros((simTime, m)) t0 = tm.time() for i in range(maxDelay, simTime): if i*dt > 500: U_[i] = self.Kc * (self.X1[i-1, :m] - Xref) U = self.switch * A * U_[i] # Take into account the history of rate for each neuron according # to its axonal delay for idxi, ii in enumerate(range(m)): mysum = 0.0 for jj in range(k, n): mysum += (W12[ii, jj] * self.X2[i-delays12[ii, jj], jj])*self.dx pre12[idxi] = mysum for idxi, ii in enumerate(range(k, n)): mysum = 0.0 for jj in range(0, m): mysum += (W21[ii, jj] * self.X1[i-delays21[ii, jj], jj])*self.dx pre21[idxi] = mysum for idxi, ii in enumerate(range(k, n)): mysum = 0.0 for jj in range(k, n): mysum += (W22[ii, jj] * self.X2[i-delays22[ii, jj], jj])*self.dx pre22[idxi] = mysum # Forward Euler step self.X1[i, :m] = (self.X1[i-1, :m] + (-self.X1[i-1, :m] + self.S1(-pre12 + Cx - U)) * dt/self.tau1) self.X2[i, k:] = (self.X2[i-1, k:] + (-self.X2[i-1, k:] + self.S2(pre21 - pre22 - Str))*dt/self.tau2) t1 = tm.time() print "Simulation time: {} sec".format(t1-t0) return U_, zeros if __name__ == '__main__': if len(sys.argv) == 3: config = ConfigParser.RawConfigParser() config.read(sys.argv[1]) tf = config.getfloat('Time', 'tf') dt = config.getfloat('Time', 'dt') sim = dnf(sys.argv[1]) damagePercent = [0, 3, 5, 7, 10] for i in damagePercent: _, zeros = sim.run(tf, dt, i) np.save(sys.argv[2]+"solution"+str(i)+'_'+"12", sim.X1) else: print "Parameters file {} does not exist!".format(sys.argv[1])
gdetor/neuralfieldDBSmodel
src/protocolEfficiency.py
Python
bsd-3-clause
9,182
[ "Gaussian", "NEURON" ]
ca325e937129ce7a0caee81e76e56f0eb82c6777ccffd9e389c4a2837fe6f78c
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from time import time from argparse import ArgumentParser from bigdl.orca import init_orca_context, stop_orca_context from bigdl.friesian.feature import FeatureTable LABEL_COL = 0 INT_COLS = ["_c{}".format(i) for i in list(range(1, 14))] CAT_COLS = ["_c{}".format(i) for i in list(range(14, 40))] conf = {"spark.network.timeout": "10000000", "spark.sql.broadcastTimeout": "7200", "spark.sql.shuffle.partitions": "2000", "spark.locality.wait": "0s", "spark.sql.hive.filesourcePartitionFileCacheSize": "4096000000", "spark.sql.crossJoin.enabled": "true", "spark.serializer": "org.apache.spark.serializer.KryoSerializer", "spark.kryo.unsafe": "true", "spark.kryoserializer.buffer.max": "1024m", "spark.task.cpus": "1", "spark.executor.heartbeatInterval": "200s", "spark.driver.maxResultSize": "40G", # Default replication is 3. You may adjust according your cluster settings. "spark.hadoop.dfs.replication": "1"} def _parse_args(): parser = ArgumentParser() parser.add_argument("--cluster_mode", type=str, default="local", help="The cluster mode, such as local, yarn, standalone or spark-submit.") parser.add_argument("--master", type=str, default=None, help="The master url, only used when the cluster_mode is standalone.") parser.add_argument("--cores", type=int, default=48, help="The number of cores to use on each node.") parser.add_argument("--memory", type=str, default="240g", help="The amount of memory to allocate on each node.") parser.add_argument("--num_nodes", type=int, default=8, help="The number of nodes to use in the cluster.") parser.add_argument("--driver_cores", type=int, default=4, help="The number of cores to use for the driver.") parser.add_argument("--driver_memory", type=str, default="36g", help="The amount of memory to allocate for the driver.") parser.add_argument("--days", type=str, default="0-23", help="The day range for data preprocessing, such as 0-23, 0-1.") parser.add_argument("--frequency_limit", type=int, default=15, help="Categories with frequency below this value will be " "omitted from encoding.") parser.add_argument("--input_folder", type=str, required=True, help="The path to the folder of parquet files, " "either a local path or an HDFS path.") parser.add_argument("--output_folder", type=str, help="The path to save the preprocessed data and " "the generated string indices to parquet files. " "HDFS path is recommended.") args = parser.parse_args() start, end = args.days.split("-") args.day_range = list(range(int(start), int(end) + 1)) args.days = len(args.day_range) return args def preprocess_and_save(data_tbl, models, mode, save_path): data_tbl = data_tbl.encode_string(CAT_COLS, models) \ .fillna(0, INT_COLS + CAT_COLS).log(INT_COLS) data_tbl = data_tbl.ordinal_shuffle_partition() if save_path: if mode == "train": save_path = os.path.join(save_path, "saved_data") elif mode == "test": save_path = os.path.join(save_path, "saved_data_test") else: raise ValueError("mode should be either train or test") print("Saving {} data files to {}".format(mode, save_path)) data_tbl.write_parquet(save_path) else: data_tbl.compute() return data_tbl if __name__ == "__main__": args = _parse_args() if args.cluster_mode == "local": init_orca_context("local", cores=args.cores, memory=args.memory) elif args.cluster_mode == "standalone": init_orca_context("standalone", master=args.master, cores=args.cores, num_nodes=args.num_nodes, memory=args.memory, driver_cores=args.driver_cores, driver_memory=args.driver_memory, conf=conf) elif args.cluster_mode == "yarn": init_orca_context("yarn-client", cores=args.cores, num_nodes=args.num_nodes, memory=args.memory, driver_cores=args.driver_cores, driver_memory=args.driver_memory, conf=conf) elif args.cluster_mode == "spark-submit": init_orca_context("spark-submit") else: raise ValueError( "cluster_mode should be one of 'local', 'yarn', 'standalone' and 'spark-submit'" ", but got " + args.cluster_mode) time_start = time() paths = [os.path.join(args.input_folder, "day_%d.parquet" % i) for i in args.day_range] tbl = FeatureTable.read_parquet(paths) idx_list = tbl.gen_string_idx(CAT_COLS, freq_limit=args.frequency_limit) if args.days == 24: # Full Criteo dataset # Exclude the last path day_23.parquet since the first half of day_23 is separated for test. train_data = FeatureTable.read_parquet(paths[:-1]) train_preprocessed = preprocess_and_save(train_data, idx_list, "train", args.output_folder) test_data = FeatureTable.read_parquet( os.path.join(args.input_folder, "day_23_test.parquet")) test_preprocessed = preprocess_and_save(test_data, idx_list, "test", args.output_folder) else: train_data = FeatureTable.read_parquet(paths) train_preprocessed = preprocess_and_save(train_data, idx_list, "train", args.output_folder) time_end = time() print("Total preprocessing time: ", time_end - time_start) train_preprocessed.show(5) if args.output_folder: for idx in idx_list: idx.write_parquet(args.output_folder) print("Finished") stop_orca_context()
intel-analytics/BigDL
python/friesian/example/dlrm/dlrm_preprocessing.py
Python
apache-2.0
6,611
[ "ORCA" ]
8ceda40053839978db43d171345b2ff1f995412c45d0f045765874a9bf894e2d
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * import os class Orca(Package): """An ab initio, DFT and semiempirical SCF-MO package Note: Orca is licensed software. You will need to create an account on the Orca homepage and download Orca yourself. Spack will search your current directory for the download file. Alternatively, add this file to a mirror so that Spack can find it. For instructions on how to set up a mirror, see http://spack.readthedocs.io/en/latest/mirrors.html""" homepage = "https://cec.mpg.de" url = "file://{0}/orca_4_0_1_2_linux_x86-64_openmpi202.tar.zst".format(os.getcwd()) version('4.2.1', sha256='9bbb3bfdca8220b417ee898b27b2885508d8c82799adfa63dde9e72eab49a6b2', expand=False) version('4.2.0', sha256='55a5ca5aaad03396ac5ada2f14b61ffa735fdc2d98355e272465e07a6749d399', expand=False) version('4.0.1.2', sha256='cea442aa99ec0d7ffde65014932196b62343f7a6191b4bfc438bfb38c03942f7', expand=False) depends_on('zstd', type='build') # Map Orca version with the required OpenMPI version openmpi_versions = { '4.0.1.2': '2.0.2', '4.2.0': '3.1.4', '4.2.1': '3.1.4' } for orca_version, openmpi_version in openmpi_versions.items(): depends_on('openmpi@{0}'.format(openmpi_version), type='run', when='@{0}'.format(orca_version)) def url_for_version(self, version): out = "file://{0}/orca_{1}_linux_x86-64_openmpi{2}.tar.zst" return out.format(os.getcwd(), version.underscored, self.openmpi_versions[version.string]) def install(self, spec, prefix): # we have to extract the archive ourself # fortunately it's just full of a bunch of binaries vername = os.path.basename(self.stage.archive_file).split('.')[0] zstd = which('zstd') zstd('-d', self.stage.archive_file, '-o', vername + '.tar') tar = which('tar') tar('-xvf', vername + '.tar') # there are READMEs in there but they don't hurt anyone mkdirp(prefix.bin) install_tree(vername, prefix.bin) # Check "mpirun" usability when building against OpenMPI # with Slurm scheduler and add a "mpirun" wrapper that # calls "srun" if need be if '^openmpi ~legacylaunchers schedulers=slurm' in self.spec: mpirun_srun = join_path(os.path.dirname(__file__), "mpirun_srun.sh") install(mpirun_srun, prefix.bin.mpirun)
iulian787/spack
var/spack/repos/builtin/packages/orca/package.py
Python
lgpl-2.1
2,751
[ "ORCA" ]
223deba023954c96334eba60bd277e459c6af56feec0d3b685b24f011264ffcd
import tensorflow as tf import numpy as np from ops import * from utils import * import input_data # from scipy.misc import imsave as ims class Draw(): def __init__(self): self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) self.n_samples = self.mnist.train.num_examples self.img_size = 28 self.attention_n = 5 self.n_hidden = 256 self.n_z = 10 self.sequence_length = 10 self.batch_size = 64 self.share_parameters = False self.images = tf.placeholder(tf.float32, [None, 784]) self.e = tf.random_normal((self.batch_size, self.n_z), mean=0, stddev=1) # Qsampler noise self.lstm_enc = tf.nn.rnn_cell.LSTMCell(self.n_hidden, state_is_tuple=True) # encoder Op self.lstm_dec = tf.nn.rnn_cell.LSTMCell(self.n_hidden, state_is_tuple=True) # decoder Op self.cs = [0] * self.sequence_length self.mu, self.logsigma, self.sigma = [0] * self.sequence_length, [0] * self.sequence_length, [0] * self.sequence_length h_dec_prev = tf.zeros((self.batch_size, self.n_hidden)) enc_state = self.lstm_enc.zero_state(self.batch_size, tf.float32) dec_state = self.lstm_dec.zero_state(self.batch_size, tf.float32) x = self.images for t in range(self.sequence_length): # error image + original image c_prev = tf.zeros((self.batch_size, self.img_size**2)) if t == 0 else self.cs[t-1] x_hat = x - tf.sigmoid(c_prev) # read the image r = self.read_basic(x,x_hat,h_dec_prev) print r.get_shape() # r = self.read_attention(x,x_hat,h_dec_prev) # encode it to guass distrib self.mu[t], self.logsigma[t], self.sigma[t], enc_state = self.encode(enc_state, tf.concat(1, [r, h_dec_prev])) # sample from the distrib to get z z = self.sampleQ(self.mu[t],self.sigma[t]) print z.get_shape() # retrieve the hidden layer of RNN h_dec, dec_state = self.decode_layer(dec_state, z) print h_dec.get_shape() # map from hidden layer -> image portion, and then write it. self.cs[t] = c_prev + self.write_basic(h_dec) # self.cs[t] = c_prev + self.write_attention(h_dec) h_dec_prev = h_dec self.share_parameters = True # from now on, share variables # the final timestep self.generated_images = tf.nn.sigmoid(self.cs[-1]) self.generation_loss = tf.reduce_mean(-tf.reduce_sum(self.images * tf.log(1e-10 + self.generated_images) + (1-self.images) * tf.log(1e-10 + 1 - self.generated_images),1)) kl_terms = [0]*self.sequence_length for t in xrange(self.sequence_length): mu2 = tf.square(self.mu[t]) sigma2 = tf.square(self.sigma[t]) logsigma = self.logsigma[t] kl_terms[t] = 0.5 * tf.reduce_sum(mu2 + sigma2 - 2*logsigma, 1) - self.sequence_length*0.5 self.latent_loss = tf.reduce_mean(tf.add_n(kl_terms)) self.cost = self.generation_loss + self.latent_loss optimizer = tf.train.AdamOptimizer(1e-3, beta1=0.5) grads = optimizer.compute_gradients(self.cost) for i,(g,v) in enumerate(grads): if g is not None: grads[i] = (tf.clip_by_norm(g,5),v) self.train_op = optimizer.apply_gradients(grads) self.sess = tf.Session() self.sess.run(tf.initialize_all_variables()) def train(self): for i in xrange(15000): xtrain, _ = self.mnist.train.next_batch(self.batch_size) cs, gen_loss, lat_loss, _ = self.sess.run([self.cs, self.generation_loss, self.latent_loss, self.train_op], feed_dict={self.images: xtrain}) print "iter %d genloss %f latloss %f" % (i, gen_loss, lat_loss) if i % 500 == 0: cs = 1.0/(1.0+np.exp(-np.array(cs))) # x_recons=sigmoid(canvas) for cs_iter in xrange(10): results = cs[cs_iter] results_square = np.reshape(results, [-1, 28, 28]) print results_square.shape ims("results/"+str(i)+"-step-"+str(cs_iter)+".jpg",merge(results_square,[8,8])) # given a hidden decoder layer: # locate where to put attention filters def attn_window(self, scope, h_dec): with tf.variable_scope(scope, reuse=self.share_parameters): parameters = dense(h_dec, self.n_hidden, 5) # gx_, gy_: center of 2d gaussian on a scale of -1 to 1 gx_, gy_, log_sigma2, log_delta, log_gamma = tf.split(1,5,parameters) # move gx/gy to be a scale of -imgsize to +imgsize gx = (self.img_size+1)/2 * (gx_ + 1) gy = (self.img_size+1)/2 * (gy_ + 1) sigma2 = tf.exp(log_sigma2) # stride/delta: how far apart these patches will be delta = (self.img_size - 1) / ((self.attention_n-1) * tf.exp(log_delta)) # returns [Fx, Fy, gamma] return self.filterbank(gx,gy,sigma2,delta) + (tf.exp(log_gamma),) # Given a center, distance, and spread # Construct [attention_n x attention_n] patches of gaussian filters # represented by Fx = horizontal gaussian, Fy = vertical guassian def filterbank(self, gx, gy, sigma2, delta): # 1 x N, look like [[0,1,2,3,4]] grid_i = tf.reshape(tf.cast(tf.range(self.attention_n), tf.float32),[1, -1]) # centers for the individual patches mu_x = gx + (grid_i - self.attention_n/2 - 0.5) * delta mu_y = gy + (grid_i - self.attention_n/2 - 0.5) * delta mu_x = tf.reshape(mu_x, [-1, self.attention_n, 1]) mu_y = tf.reshape(mu_y, [-1, self.attention_n, 1]) # 1 x 1 x imgsize, looks like [[[0,1,2,3,4,...,27]]] im = tf.reshape(tf.cast(tf.range(self.img_size), tf.float32), [1, 1, -1]) # list of gaussian curves for x and y sigma2 = tf.reshape(sigma2, [-1, 1, 1]) Fx = tf.exp(-tf.square((im - mu_x) / (2*sigma2))) Fy = tf.exp(-tf.square((im - mu_x) / (2*sigma2))) # normalize so area-under-curve = 1 Fx = Fx / tf.maximum(tf.reduce_sum(Fx,2,keep_dims=True),1e-8) Fy = Fy / tf.maximum(tf.reduce_sum(Fy,2,keep_dims=True),1e-8) return Fx, Fy # the read() operation without attention def read_basic(self, x, x_hat, h_dec_prev): return tf.concat(1,[x,x_hat]) def read_attention(self, x, x_hat, h_dec_prev): Fx, Fy, gamma = self.attn_window("read", h_dec_prev) # we have the parameters for a patch of gaussian filters. apply them. def filter_img(img, Fx, Fy, gamma): Fxt = tf.transpose(Fx, perm=[0,2,1]) img = tf.reshape(img, [-1, self.img_size, self.img_size]) # Apply the gaussian patches: # keep in mind: horiz = imgsize = verts (they are all the image size) # keep in mind: attn = height/length of attention patches # allfilters = [attn, vert] * [imgsize,imgsize] * [horiz, attn] # we have batches, so the full batch_matmul equation looks like: # [1, 1, vert] * [batchsize,imgsize,imgsize] * [1, horiz, 1] glimpse = tf.batch_matmul(Fy, tf.batch_matmul(img, Fxt)) glimpse = tf.reshape(glimpse, [-1, self.attention_n**2]) # finally scale this glimpse w/ the gamma parameter return glimpse * tf.reshape(gamma, [-1, 1]) x = filter_img(x, Fx, Fy, gamma) x_hat = filter_img(x_hat, Fx, Fy, gamma) return tf.concat(1, [x, x_hat]) # encode an attention patch def encode(self, prev_state, image): # update the RNN with image with tf.variable_scope("encoder",reuse=self.share_parameters): hidden_layer, next_state = self.lstm_enc(image, prev_state) # map the RNN hidden state to latent variables with tf.variable_scope("mu", reuse=self.share_parameters): mu = dense(hidden_layer, self.n_hidden, self.n_z) with tf.variable_scope("sigma", reuse=self.share_parameters): logsigma = dense(hidden_layer, self.n_hidden, self.n_z) sigma = tf.exp(logsigma) return mu, logsigma, sigma, next_state def sampleQ(self, mu, sigma): return mu + sigma*self.e def decode_layer(self, prev_state, latent): # update decoder RNN with latent var with tf.variable_scope("decoder", reuse=self.share_parameters): hidden_layer, next_state = self.lstm_dec(latent, prev_state) return hidden_layer, next_state def write_basic(self, hidden_layer): # map RNN hidden state to image with tf.variable_scope("write", reuse=self.share_parameters): decoded_image_portion = dense(hidden_layer, self.n_hidden, self.img_size**2) return decoded_image_portion def write_attention(self, hidden_layer): with tf.variable_scope("writeW", reuse=self.share_parameters): w = dense(hidden_layer, self.n_hidden, self.attention_n**2) w = tf.reshape(w, [self.batch_size, self.attention_n, self.attention_n]) Fx, Fy, gamma = self.attn_window("write", hidden_layer) Fyt = tf.transpose(Fy, perm=[0,2,1]) # [vert, attn_n] * [attn_n, attn_n] * [attn_n, horiz] wr = tf.batch_matmul(Fyt, tf.batch_matmul(w, Fx)) wr = tf.reshape(wr, [self.batch_size, self.img_size**2]) return wr * tf.reshape(1.0/gamma, [-1, 1]) model = Draw() model.train()
HaydenFaulkner/phd
tensorflow_code/external_libraries/draw-kvfrans/main.py
Python
mit
9,552
[ "Gaussian" ]
f19033d43b49e51fc230bf03179a1fe1226290f85a75b5d8aebfcb60410784aa
__RCSID__ = "$Id $" # We disable the no-member error because # they are constructed by SQLAlchemy for all # the objects mapped to a table. # pylint: disable=no-member import datetime import errno # # from DIRAC from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.DataManagementSystem.Client.FTS3Operation import FTS3Operation, FTS3TransferOperation, FTS3StagingOperation from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File from DIRAC.DataManagementSystem.Client.FTS3Job import FTS3Job from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.sql.expression import and_ from sqlalchemy.orm import relationship, sessionmaker, mapper from sqlalchemy.sql import update from sqlalchemy import create_engine, Table, Column, MetaData, ForeignKey, \ Integer, String, DateTime, Enum, BigInteger, SmallInteger, Float metadata = MetaData() fts3FileTable = Table( 'Files', metadata, Column( 'fileID', Integer, primary_key = True ), Column( 'operationID', Integer, ForeignKey( 'Operations.operationID', ondelete = 'CASCADE' ), nullable = False ), Column( 'attempt', Integer, server_default = '0' ), Column( 'lastUpdate', DateTime ), Column( 'rmsFileID', Integer, server_default = '0' ), Column( 'lfn', String( 1024 ) ), Column( 'checksum', String( 255 ) ), Column( 'size', BigInteger ), Column( 'targetSE', String( 255 ), nullable = False ), Column( 'error', String( 1024 ) ), Column( 'status', Enum( *FTS3File.ALL_STATES ), server_default = FTS3File.INIT_STATE, index = True ), mysql_engine = 'InnoDB', ) mapper( FTS3File, fts3FileTable ) fts3JobTable = Table( 'Jobs', metadata, Column( 'jobID', Integer, primary_key = True ), Column( 'operationID', Integer, ForeignKey( 'Operations.operationID', ondelete = 'CASCADE' ), nullable = False ), Column( 'submitTime', DateTime ), Column( 'lastUpdate', DateTime ), Column( 'lastMonitor', DateTime ), Column( 'completeness', Float ), Column( 'username', String( 255 ) ), # Could be fetched from Operation, but bad for perf Column( 'userGroup', String( 255 ) ), # Could be fetched from Operation, but bad for perf Column( 'ftsGUID', String( 255 ) ), Column( 'ftsServer', String( 255 ) ), Column( 'error', String( 1024 ) ), Column( 'status', Enum( *FTS3Job.ALL_STATES ), server_default = FTS3Job.INIT_STATE, index = True ), Column( 'assignment', String( 255 ), server_default = None ), mysql_engine = 'InnoDB', ) mapper( FTS3Job, fts3JobTable ) fts3OperationTable = Table( 'Operations', metadata, Column( 'operationID', Integer, primary_key = True ), Column( 'username', String( 255 ) ), Column( 'userGroup', String( 255 ) ), Column( 'rmsReqID', Integer, server_default = '-1' ), # -1 because with 0 we get any request Column( 'rmsOpID', Integer, server_default = '0' ), Column( 'sourceSEs', String( 255 ) ), Column( 'activity', String( 255 ) ), Column( 'priority', SmallInteger ), Column( 'creationTime', DateTime ), Column( 'lastUpdate', DateTime ), Column( 'status', Enum( *FTS3Operation.ALL_STATES ), server_default = FTS3Operation.INIT_STATE, index = True ), Column( 'error', String( 1024 ) ), Column( 'type', String( 255 ) ), Column( 'assignment', String( 255 ), server_default = None ), mysql_engine = 'InnoDB', ) fts3Operation_mapper = mapper( FTS3Operation, fts3OperationTable, properties = {'ftsFiles':relationship( FTS3File, lazy = 'joined', # Immediately load the entirety of the object innerjoin = True, # Use inner join instead of left outer join cascade = 'all, delete-orphan', # if a File is removed from the list, remove it from the DB passive_deletes = True, # used together with cascade='all, delete-orphan' ), 'ftsJobs':relationship( FTS3Job, lazy = 'joined', # Immediately load the entirety of the object cascade = 'all, delete-orphan', # if a File is removed from the list, remove it from the DB passive_deletes = True, # used together with cascade='all, delete-orphan' ), }, polymorphic_on = 'type', polymorphic_identity = 'Abs' ) mapper( FTS3TransferOperation, fts3OperationTable, inherits = fts3Operation_mapper, polymorphic_identity = 'Transfer' ) mapper( FTS3StagingOperation, fts3OperationTable, inherits = fts3Operation_mapper, polymorphic_identity = 'Staging' ) ######################################################################## class FTS3DB( object ): """ .. class:: RequestDB db holding requests """ def __getDBConnectionInfo( self, fullname ): """ Collect from the CS all the info needed to connect to the DB. This should be in a base class eventually """ result = getDBParameters( fullname ) if not result[ 'OK' ]: raise Exception( 'Cannot get database parameters: %s' % result[ 'Message' ] ) dbParameters = result[ 'Value' ] self.dbHost = dbParameters[ 'Host' ] self.dbPort = dbParameters[ 'Port' ] self.dbUser = dbParameters[ 'User' ] self.dbPass = dbParameters[ 'Password' ] self.dbName = dbParameters[ 'DBName' ] def __init__( self ): """c'tor :param self: self reference """ self.log = gLogger.getSubLogger( 'FTS3DB' ) # Initialize the connection info self.__getDBConnectionInfo( 'DataManagement/FTS3DB' ) runDebug = ( gLogger.getLevel() == 'DEBUG' ) self.engine = create_engine( 'mysql://%s:%s@%s:%s/%s' % ( self.dbUser, self.dbPass, self.dbHost, self.dbPort, self.dbName ), echo = runDebug ) metadata.bind = self.engine self.dbSession = sessionmaker( bind = self.engine ) def createTables( self ): """ create tables """ try: metadata.create_all( self.engine ) except SQLAlchemyError as e: return S_ERROR( e ) return S_OK() def persistOperation( self, operation ): """ update or insert request into db Also release the assignment tag :param operation: FTS3Operation instance """ session = self.dbSession( expire_on_commit = False ) # set the assignment to NULL # so that another agent can work on the request operation.assignment = None try: # Merge it in case it already is in the DB operation = session.merge( operation ) session.add( operation ) session.commit() session.expunge_all() return S_OK( operation.operationID ) except SQLAlchemyError as e: session.rollback() self.log.exception( "persistOperation: unexpected exception", lException = e ) return S_ERROR( "persistOperation: unexpected exception %s" % e ) finally: session.close() def getOperation( self, operationID ): """ read request This does not set the assignment flag :param operationID: ID of the FTS3Operation """ # expire_on_commit is set to False so that we can still use the object after we close the session session = self.dbSession( expire_on_commit = False ) try: operation = session.query( FTS3Operation )\ .filter( getattr( FTS3Operation, 'operationID' ) == operationID )\ .one() session.commit() ################################### session.expunge_all() return S_OK( operation ) except NoResultFound as e: # We use the ENOENT error, even if not really a file error :) return S_ERROR( errno.ENOENT, "No FTS3Operation with id %s" % operationID ) except SQLAlchemyError as e: return S_ERROR( "getOperation: unexpected exception : %s" % e ) finally: session.close() def getActiveJobs( self, limit = 20, lastMonitor = None, jobAssignmentTag = "Assigned" ): """ Get the FTSJobs that are not in a final state, and are not assigned for monitoring or has its operation being treated By assigning the job to the DB: * it cannot be monitored by another agent * the operation to which it belongs cannot be treated :param limit: max number of Jobs to retrieve :param lastMonitor: jobs monitored earlier than the given date :param jobAssignmentTag: if not None, block the Job for other queries, and use it as a prefix for the value in the operation table :returns: list of FTS3Jobs """ session = self.dbSession( expire_on_commit = False ) try: # the tild sign is for "not" ftsJobsQuery = session.query( FTS3Job )\ .join( FTS3Operation )\ .filter( ~FTS3Job.status.in_( FTS3Job.FINAL_STATES ) )\ .filter( FTS3Job.assignment == None )\ .filter( FTS3Operation.assignment == None )\ if lastMonitor: ftsJobsQuery = ftsJobsQuery.filter( FTS3Job.lastMonitor < lastMonitor ) if jobAssignmentTag: ftsJobsQuery = ftsJobsQuery.with_for_update() ftsJobsQuery = ftsJobsQuery.order_by( FTS3Job.lastMonitor.desc() ) ftsJobsQuery = ftsJobsQuery.limit( limit ) ftsJobs = ftsJobsQuery.all() if jobAssignmentTag: jobAssignmentTag += "_%s" % datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S' ) jobIds = [job.jobID for job in ftsJobs] if jobIds: session.execute( update( FTS3Job )\ .where( FTS3Job.jobID.in_( jobIds ) )\ .values( { 'assignment' : jobAssignmentTag} ) ) session.commit() session.expunge_all() return S_OK( ftsJobs ) except SQLAlchemyError as e: session.rollback() return S_ERROR( "getAllActiveJobs: unexpected exception : %s" % e ) finally: session.close() def updateFileStatus( self, fileStatusDict ): """Update the file ftsStatus and error The update is only done if the file is not in a final state :param fileStatusDict : { fileID : { status , error } } """ session = self.dbSession() try: for fileID, valueDict in fileStatusDict.iteritems(): updateDict = {FTS3File.status : valueDict['status']} # We only update error if it is specified if 'error' in valueDict: newError = valueDict['error'] # Replace empty string with None if not newError: newError = None updateDict[FTS3File.error] = newError session.execute( update( FTS3File )\ .where( and_( FTS3File.fileID == fileID, ~ FTS3File.status.in_( FTS3File.FINAL_STATES ) )\ )\ .values( updateDict ) ) session.commit() return S_OK() except SQLAlchemyError as e: session.rollback() self.log.exception( "updateFileFtsStatus: unexpected exception", lException = e ) return S_ERROR( "updateFileFtsStatus: unexpected exception %s" % e ) finally: session.close() def updateJobStatus( self, jobStatusDict ): """ Update the job Status and error The update is only done if the job is not in a final state The assignment flag is released :param jobStatusDict : { jobID : { status , error, completeness } } """ session = self.dbSession() try: for jobID, valueDict in jobStatusDict.iteritems(): updateDict = {FTS3Job.status : valueDict['status']} # We only update error if it is specified if 'error' in valueDict: newError = valueDict['error'] # Replace empty string with None if not newError: newError = None updateDict[FTS3Job.error] = newError if 'completeness' in valueDict: updateDict[FTS3Job.completeness] = valueDict['completeness'] updateDict[FTS3Job.assignment] = None session.execute( update( FTS3Job )\ .where( and_( FTS3Job.jobID == jobID, ~ FTS3Job.status.in_( FTS3Job.FINAL_STATES ) ) )\ .values( updateDict ) ) session.commit() return S_OK() except SQLAlchemyError as e: session.rollback() self.log.exception( "updateJobStatus: unexpected exception", lException = e ) return S_ERROR( "updateJobStatus: unexpected exception %s" % e ) finally: session.close() def getNonFinishedOperations( self, limit = 20, operationAssignmentTag = "Assigned" ): """ Get all the non assigned FTS3Operations that are not yet finished, so either Active or Processed. An operation won't be picked if it is already assigned, or one of its job is. :param limit: max number of operations to retrieve :param operationAssignmentTag: if not None, block the operations for other queries, and use it as a prefix for the value in the operation table :return: list of Operations """ session = self.dbSession( expire_on_commit = False ) try: ftsOperations = [] # We need to do the select in two times because the join clause that makes the limit difficult operationIDsQuery = session.query( FTS3Operation.operationID )\ .outerjoin( FTS3Job )\ .filter( FTS3Operation.status.in_( ['Active', 'Processed'] ) )\ .filter( FTS3Operation.assignment == None )\ .filter( FTS3Job.assignment == None )\ .limit( limit ) # Block the Operations for other requests if operationAssignmentTag: operationIDsQuery = operationIDsQuery.with_for_update() operationIDs = operationIDsQuery.all() operationIDs = [oidTuple[0] for oidTuple in operationIDs] if operationIDs: # Fetch the operation object for these IDs ftsOperations = session.query( FTS3Operation )\ .filter( FTS3Operation.operationID.in_( operationIDs ) )\ .all() if operationAssignmentTag: operationAssignmentTag += "_%s" % datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S' ) session.execute( update( FTS3Operation )\ .where( FTS3Operation.operationID.in_( operationIDs ) )\ .values( { 'assignment' : operationAssignmentTag} ) ) session.commit() session.expunge_all() return S_OK( ftsOperations ) except SQLAlchemyError as e: session.rollback() return S_ERROR( "getAllProcessedOperations: unexpected exception : %s" % e ) finally: session.close()
Andrew-McNab-UK/DIRAC
DataManagementSystem/DB/FTS3DB.py
Python
gpl-3.0
16,779
[ "DIRAC" ]
787407bcb7970999908c5df1e90e1cce1b8e9298457c68b7489a6cfc37db7c7c
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Busco(PythonPackage): """Assesses genome assembly and annotation completeness with Benchmarking Universal Single-Copy Orthologs""" homepage = "http://busco.ezlab.org/" url = "https://gitlab.com/api/v4/projects/ezlab%2Fbusco/repository/archive.tar.gz?sha=2.0.1" # TODO: check the installation procedure for version 3.0.2 # and uncomment the following line # version('3.0.2', '31d80042bb7e96422843fa43d0acbd21') # There is no tag for version 3.0.1 version('3.0.1', git='https://gitlab.com/ezlab/busco.git', commit='078252e00399550d7b0e8941cd4d986c8e868a83') version('2.0.1', '4dbcc8a0c18fa8f8312c103eb2fbb4e2') depends_on('python', type=('build', 'run')) depends_on('blast-plus') depends_on('hmmer') depends_on('augustus') def build(self, spec, prefix): if self.spec.satisfies('@2.0.1'): pass def install(self, spec, prefix): if self.spec.satisfies('@3.0.1'): with working_dir('scripts'): mkdirp(prefix.bin) install('generate_plot.py', prefix.bin) install('run_BUSCO.py', prefix.bin) install_tree('config', prefix.config) args = self.install_args(spec, prefix) self.setup_py('install', *args) if self.spec.satisfies('@2.0.1'): mkdirp(prefix.bin) install('BUSCO.py', prefix.bin) install('BUSCO_plot.py', prefix.bin)
tmerrick1/spack
var/spack/repos/builtin/packages/busco/package.py
Python
lgpl-2.1
2,734
[ "BLAST" ]
a8254877606f321acc0c89aac6f5f60609f2031cfe74ea58376c44674eb6057c
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL <http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This was written to work with FSL version 4.1.4. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from __future__ import print_function from builtins import range import os from glob import glob import warnings from shutil import rmtree import numpy as np from nibabel import load from ... import LooseVersion from .base import (FSLCommand, FSLCommandInputSpec, Info) from ..base import (load_template, File, traits, isdefined, TraitedSpec, BaseInterface, Directory, InputMultiPath, OutputMultiPath, BaseInterfaceInputSpec) from ...utils.filemanip import (list_to_filename, filename_to_list) from ...utils.misc import human_order_sorted warn = warnings.warn class Level1DesignInputSpec(BaseInterfaceInputSpec): interscan_interval = traits.Float(mandatory=True, desc='Interscan interval (in secs)') session_info = traits.Any(mandatory=True, desc='Session specific information generated by ``modelgen.SpecifyModel``') bases = traits.Either( traits.Dict(traits.Enum( 'dgamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)), traits.Dict(traits.Enum('gamma'), traits.Dict( traits.Enum('derivs'), traits.Bool)), traits.Dict(traits.Enum('none'), traits.Enum(None)), mandatory=True, desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}") model_serial_correlations = traits.Bool( desc="Option to model serial correlations using an \ autoregressive estimator (order 1). Setting this option is only \ useful in the context of the fsf file. If you set this to False, you need to repeat \ this option for FILMGLS by setting autocorr_noestimate to True", mandatory=True) contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum( 'T'), traits.List( traits.Str), traits.List( traits.Float)), traits.Tuple( traits.Str, traits.Enum( 'T'), traits.List( traits.Str), traits.List( traits.Float), traits.List( traits.Float)))))), desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list], [session list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ T-contrasts.") class Level1DesignOutputSpec(TraitedSpec): fsf_files = OutputMultiPath(File(exists=True), desc='FSL feat specification files') ev_files = OutputMultiPath(traits.List(File(exists=True)), desc='condition information files') class Level1Design(BaseInterface): """Generate FEAT specific files Examples -------- >>> level1design = Level1Design() >>> level1design.inputs.interscan_interval = 2.5 >>> level1design.inputs.bases = {'dgamma':{'derivs': False}} >>> level1design.inputs.session_info = 'session_info.npz' >>> level1design.run() # doctest: +SKIP """ input_spec = Level1DesignInputSpec output_spec = Level1DesignOutputSpec def _create_ev_file(self, evfname, evinfo): f = open(evfname, 'wt') for i in evinfo: if len(i) == 3: f.write('%f %f %f\n' % (i[0], i[1], i[2])) else: f.write('%f\n' % i[0]) f.close() def _create_ev_files( self, cwd, runinfo, runidx, usetd, contrasts, do_tempfilter, basis_key): """Creates EV files from condition and regressor information. Parameters: ----------- runinfo : dict Generated by `SpecifyModel` and contains information about events and other regressors. runidx : int Index to run number usetd : int Whether or not to use temporal derivatives for conditions contrasts : list of lists Information on contrasts to be evaluated """ conds = {} evname = [] if basis_key == "dgamma": basis_key = "hrf" ev_template = load_template('feat_ev_'+basis_key+'.tcl') ev_none = load_template('feat_ev_none.tcl') ev_ortho = load_template('feat_ev_ortho.tcl') ev_txt = '' # generate sections for conditions and other nuisance # regressors num_evs = [0, 0] for field in ['cond', 'regress']: for i, cond in enumerate(runinfo[field]): name = cond['name'] evname.append(name) evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx, len(evname))) evinfo = [] num_evs[0] += 1 num_evs[1] += 1 if field == 'cond': for j, onset in enumerate(cond['onset']): try: amplitudes = cond['amplitudes'] if len(amplitudes) > 1: amp = amplitudes[j] else: amp = amplitudes[0] except KeyError: amp = 1 if len(cond['duration']) > 1: evinfo.insert(j, [onset, cond['duration'][j], amp]) else: evinfo.insert(j, [onset, cond['duration'][0], amp]) if basis_key == "none": ev_txt += ev_template.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, cond_file=evfname) else: ev_txt += ev_template.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, temporalderiv=usetd, cond_file=evfname) if usetd: evname.append(name + 'TD') num_evs[1] += 1 elif field == 'regress': evinfo = [[j] for j in cond['val']] ev_txt += ev_none.substitute(ev_num=num_evs[0], ev_name=name, tempfilt_yn=do_tempfilter, cond_file=evfname) ev_txt += "\n" conds[name] = evfname self._create_ev_file(evfname, evinfo) # add ev orthogonalization for i in range(1, num_evs[0] + 1): for j in range(0, num_evs[0] + 1): ev_txt += ev_ortho.substitute(c0=i, c1=j) ev_txt += "\n" # add contrast info to fsf file if isdefined(contrasts): contrast_header = load_template('feat_contrast_header.tcl') contrast_prolog = load_template('feat_contrast_prolog.tcl') contrast_element = load_template('feat_contrast_element.tcl') contrast_ftest_element = load_template( 'feat_contrast_ftest_element.tcl') contrastmask_header = load_template('feat_contrastmask_header.tcl') contrastmask_footer = load_template('feat_contrastmask_footer.tcl') contrastmask_element = load_template( 'feat_contrastmask_element.tcl') # add t/f contrast info ev_txt += contrast_header.substitute() con_names = [] for j, con in enumerate(contrasts): con_names.append(con[0]) con_map = {} ftest_idx = [] ttest_idx = [] for j, con in enumerate(contrasts): if con[1] == 'F': ftest_idx.append(j) for c in con[2]: if c[0] not in list(con_map.keys()): con_map[c[0]] = [] con_map[c[0]].append(j) else: ttest_idx.append(j) for ctype in ['real', 'orig']: for j, con in enumerate(contrasts): if con[1] == 'F': continue tidx = ttest_idx.index(j) + 1 ev_txt += contrast_prolog.substitute(cnum=tidx, ctype=ctype, cname=con[0]) count = 0 for c in range(1, len(evname) + 1): if evname[c - 1].endswith('TD') and ctype == 'orig': continue count = count + 1 if evname[c - 1] in con[2]: val = con[3][con[2].index(evname[c - 1])] else: val = 0.0 ev_txt += contrast_element.substitute(cnum=tidx, element=count, ctype=ctype, val=val) ev_txt += "\n" for fconidx in ftest_idx: fval=0 if con[0] in con_map.keys() and fconidx in con_map[con[0]]: fval=1 ev_txt += contrast_ftest_element.substitute( cnum=ftest_idx.index(fconidx) + 1, element=tidx, ctype=ctype, val=fval) ev_txt += "\n" # add contrast mask info ev_txt += contrastmask_header.substitute() for j, _ in enumerate(contrasts): for k, _ in enumerate(contrasts): if j != k: ev_txt += contrastmask_element.substitute(c1=j + 1, c2=k + 1) ev_txt += contrastmask_footer.substitute() return num_evs, ev_txt def _format_session_info(self, session_info): if isinstance(session_info, dict): session_info = [session_info] return session_info def _get_func_files(self, session_info): """Returns functional files in the order of runs """ func_files = [] for i, info in enumerate(session_info): func_files.insert(i, info['scans']) return func_files def _run_interface(self, runtime): cwd = os.getcwd() fsf_header = load_template('feat_header_l1.tcl') fsf_postscript = load_template('feat_nongui.tcl') prewhiten = 0 if isdefined(self.inputs.model_serial_correlations): prewhiten = int(self.inputs.model_serial_correlations) usetd = 0 basis_key = list(self.inputs.bases.keys())[0] if basis_key in ['dgamma', 'gamma']: usetd = int(self.inputs.bases[basis_key]['derivs']) session_info = self._format_session_info(self.inputs.session_info) func_files = self._get_func_files(session_info) n_tcon = 0 n_fcon = 0 if isdefined(self.inputs.contrasts): for i, c in enumerate(self.inputs.contrasts): if c[1] == 'T': n_tcon += 1 elif c[1] == 'F': n_fcon += 1 for i, info in enumerate(session_info): do_tempfilter = 1 if info['hpf'] == np.inf: do_tempfilter = 0 num_evs, cond_txt = self._create_ev_files(cwd, info, i, usetd, self.inputs.contrasts, do_tempfilter, basis_key) nim = load(func_files[i]) (_, _, _, timepoints) = nim.shape fsf_txt = fsf_header.substitute(run_num=i, interscan_interval=self.inputs.interscan_interval, num_vols=timepoints, prewhiten=prewhiten, num_evs=num_evs[0], num_evs_real=num_evs[1], num_tcon=n_tcon, num_fcon=n_fcon, high_pass_filter_cutoff=info[ 'hpf'], temphp_yn=do_tempfilter, func_file=func_files[i]) fsf_txt += cond_txt fsf_txt += fsf_postscript.substitute(overwrite=1) f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w') f.write(fsf_txt) f.close() return runtime def _list_outputs(self): outputs = self.output_spec().get() cwd = os.getcwd() outputs['fsf_files'] = [] outputs['ev_files'] = [] usetd = 0 basis_key = list(self.inputs.bases.keys())[0] if basis_key in ['dgamma', 'gamma']: usetd = int(self.inputs.bases[basis_key]['derivs']) for runno, runinfo in enumerate(self._format_session_info(self.inputs.session_info)): outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno)) outputs['ev_files'].insert(runno, []) evname = [] for field in ['cond', 'regress']: for i, cond in enumerate(runinfo[field]): name = cond['name'] evname.append(name) evfname = os.path.join( cwd, 'ev_%s_%d_%d.txt' % (name, runno, len(evname))) if field == 'cond': if usetd: evname.append(name + 'TD') outputs['ev_files'][runno].append( os.path.join(cwd, evfname)) return outputs class FEATInputSpec(FSLCommandInputSpec): fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0, desc="File specifying the feat design spec file") class FEATOutputSpec(TraitedSpec): feat_dir = Directory(exists=True) class FEAT(FSLCommand): """Uses FSL feat to calculate first level stats """ _cmd = 'feat' input_spec = FEATInputSpec output_spec = FEATOutputSpec def _list_outputs(self): outputs = self._outputs().get() is_ica = False outputs['feat_dir'] = None with open(self.inputs.fsf_file, 'rt') as fp: text = fp.read() if "set fmri(inmelodic) 1" in text: is_ica = True for line in text.split('\n'): if line.find("set fmri(outputdir)") > -1: try: outputdir_spec = line.split('"')[-2] if os.path.exists(outputdir_spec): outputs['feat_dir'] = outputdir_spec except: pass if not outputs['feat_dir']: if is_ica: outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*ica'))[0] else: outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*feat'))[0] print('Outputs from FEATmodel:', outputs) return outputs class FEATModelInputSpec(FSLCommandInputSpec): fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0, desc="File specifying the feat design spec file", copyfile=False) ev_files = traits.List(File(exists=True), mandatory=True, argstr="%s", desc="Event spec files generated by level1design", position=1, copyfile=False) class FEATModelOutpuSpec(TraitedSpec): design_file = File( exists=True, desc='Mat file containing ascii matrix for design') design_image = File( exists=True, desc='Graphical representation of design matrix') design_cov = File( exists=True, desc='Graphical representation of design covariance') con_file = File( exists=True, desc='Contrast file containing contrast vectors') fcon_file = File(desc='Contrast file containing contrast vectors') class FEATModel(FSLCommand): """Uses FSL feat_model to generate design.mat files """ _cmd = 'feat_model' input_spec = FEATModelInputSpec output_spec = FEATModelOutpuSpec def _format_arg(self, name, trait_spec, value): if name == 'fsf_file': return super(FEATModel, self)._format_arg(name, trait_spec, self._get_design_root(value)) elif name == 'ev_files': return '' else: return super(FEATModel, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) return fname.split('.')[0] def _list_outputs(self): # TODO: figure out file names and get rid off the globs outputs = self._outputs().get() root = self._get_design_root(list_to_filename(self.inputs.fsf_file)) design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root)) assert len(design_file) == 1, 'No mat file generated by FEAT Model' outputs['design_file'] = design_file[0] design_image = glob(os.path.join(os.getcwd(), '%s.png' % root)) assert len( design_image) == 1, 'No design image generated by FEAT Model' outputs['design_image'] = design_image[0] design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root)) assert len( design_cov) == 1, 'No covariance image generated by FEAT Model' outputs['design_cov'] = design_cov[0] con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root)) assert len(con_file) == 1, 'No con file generated by FEAT Model' outputs['con_file'] = con_file[0] fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root)) if fcon_file: assert len(fcon_file) == 1, 'No fts file generated by FEAT Model' outputs['fcon_file'] = fcon_file[0] return outputs class FILMGLSInputSpec(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, position=-3, argstr='%s', desc='input data file') design_file = File(exists=True, position=-2, argstr='%s', desc='design matrix file') threshold = traits.Range(default=1000., low=0.0, argstr='%f', position=-1, usedefault=True, desc='threshold') smooth_autocorr = traits.Bool(argstr='-sa', desc='Smooth auto corr estimates') mask_size = traits.Int(argstr='-ms %d', desc="susan mask size") brightness_threshold = traits.Range(low=0, argstr='-epith %d', desc='susan brightness threshold, otherwise it is estimated') full_data = traits.Bool(argstr='-v', desc='output full data') _estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'] autocorr_estimate_only = traits.Bool(argstr='-ac', xor=_estimate_xor, desc='perform autocorrelation estimatation only') fit_armodel = traits.Bool(argstr='-ar', xor=_estimate_xor, desc='fits autoregressive model - default is to use tukey with M=sqrt(numvols)') tukey_window = traits.Int(argstr='-tukey %d', xor=_estimate_xor, desc='tukey window size to estimate autocorr') multitaper_product = traits.Int(argstr='-mt %d', xor=_estimate_xor, desc='multitapering with slepian tapers and num is the time-bandwidth product') use_pava = traits.Bool( argstr='-pava', desc='estimates autocorr using PAVA') autocorr_noestimate = traits.Bool(argstr='-noest', xor=_estimate_xor, desc='do not estimate autocorrs') output_pwdata = traits.Bool(argstr='-output_pwdata', desc='output prewhitened data and average design matrix') results_dir = Directory('results', argstr='-rn %s', usedefault=True, desc='directory to store results in') class FILMGLSInputSpec505(FSLCommandInputSpec): in_file = File(exists=True, mandatory=True, position=-3, argstr='--in=%s', desc='input data file') design_file = File(exists=True, position=-2, argstr='--pd=%s', desc='design matrix file') threshold = traits.Range(default=1000., low=0.0, argstr='--thr=%f', position=-1, usedefault=True, desc='threshold') smooth_autocorr = traits.Bool(argstr='--sa', desc='Smooth auto corr estimates') mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size") brightness_threshold = traits.Range(low=0, argstr='--epith=%d', desc=('susan brightness threshold, ' 'otherwise it is estimated')) full_data = traits.Bool(argstr='-v', desc='output full data') _estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'] autocorr_estimate_only = traits.Bool(argstr='--ac', xor=_estimate_xor, desc=('perform autocorrelation ' 'estimation only')) fit_armodel = traits.Bool(argstr='--ar', xor=_estimate_xor, desc=('fits autoregressive model - default is to ' 'use tukey with M=sqrt(numvols)')) tukey_window = traits.Int(argstr='--tukey=%d', xor=_estimate_xor, desc='tukey window size to estimate autocorr') multitaper_product = traits.Int(argstr='--mt=%d', xor=_estimate_xor, desc=('multitapering with slepian tapers ' 'and num is the time-bandwidth ' 'product')) use_pava = traits.Bool(argstr='--pava', desc='estimates autocorr using PAVA') autocorr_noestimate = traits.Bool(argstr='--noest', xor=_estimate_xor, desc='do not estimate autocorrs') output_pwdata = traits.Bool(argstr='--outputPWdata', desc=('output prewhitened data and average ' 'design matrix')) results_dir = Directory('results', argstr='--rn=%s', usedefault=True, desc='directory to store results in') class FILMGLSInputSpec507(FILMGLSInputSpec505): threshold = traits.Float(default=-1000., argstr='--thr=%f', position=-1, usedefault=True, desc='threshold') tcon_file = File(exists=True, argstr='--con=%s', desc='contrast file containing T-contrasts') fcon_file = File(exists=True, argstr='--fcon=%s', desc='contrast file containing F-contrasts') mode = traits.Enum('volumetric', 'surface', argstr="--mode=%s", desc="Type of analysis to be done") surface = File(exists=True, argstr="--in2=%s", desc=("input surface for autocorr smoothing in " "surface-based analyses")) class FILMGLSOutputSpec(TraitedSpec): param_estimates = OutputMultiPath(File(exists=True), desc='Parameter estimates for each column of the design matrix') residual4d = File(exists=True, desc='Model fit residual mean-squared error for each time point') dof_file = File(exists=True, desc='degrees of freedom') sigmasquareds = File( exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') results_dir = Directory(exists=True, desc='directory storing model estimation output') corrections = File(exists=True, desc='statistical corrections used within FILM modelling') thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') logfile = File(exists=True, desc='FILM run logfile') class FILMGLSOutputSpec507(TraitedSpec): param_estimates = OutputMultiPath(File(exists=True), desc='Parameter estimates for each column of the design matrix') residual4d = File(exists=True, desc='Model fit residual mean-squared error for each time point') dof_file = File(exists=True, desc='degrees of freedom') sigmasquareds = File( exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') results_dir = Directory(exists=True, desc='directory storing model estimation output') thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') logfile = File(exists=True, desc='FILM run logfile') copes = OutputMultiPath(File(exists=True), desc='Contrast estimates for each contrast') varcopes = OutputMultiPath(File(exists=True), desc='Variance estimates for each contrast') zstats = OutputMultiPath(File(exists=True), desc='z-stat file for each contrast') tstats = OutputMultiPath(File(exists=True), desc='t-stat file for each contrast') fstats = OutputMultiPath(File(exists=True), desc='f-stat file for each contrast') zfstats = OutputMultiPath(File(exists=True), desc='z-stat file for each F contrast') class FILMGLS(FSLCommand): """Use FSL film_gls command to fit a design matrix to voxel timeseries Examples -------- Initialize with no options, assigning them when calling run: >>> from nipype.interfaces import fsl >>> fgls = fsl.FILMGLS() >>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP Assign options through the ``inputs`` attribute: >>> fgls = fsl.FILMGLS() >>> fgls.inputs.in_file = 'functional.nii' >>> fgls.inputs.design_file = 'design.mat' >>> fgls.inputs.threshold = 10 >>> fgls.inputs.results_dir = 'stats' >>> res = fgls.run() #doctest: +SKIP Specify options when creating an instance: >>> fgls = fsl.FILMGLS(in_file='functional.nii', \ design_file='design.mat', \ threshold=10, results_dir='stats') >>> res = fgls.run() #doctest: +SKIP """ _cmd = 'film_gls' if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'): input_spec = FILMGLSInputSpec507 elif Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.4'): input_spec = FILMGLSInputSpec505 else: input_spec = FILMGLSInputSpec if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'): output_spec = FILMGLSOutputSpec507 else: output_spec = FILMGLSOutputSpec def _get_pe_files(self, cwd): files = None if isdefined(self.inputs.design_file): fp = open(self.inputs.design_file, 'rt') for line in fp.readlines(): if line.startswith('/NumWaves'): numpes = int(line.split()[-1]) files = [] for i in range(numpes): files.append(self._gen_fname('pe%d.nii' % (i + 1), cwd=cwd)) break fp.close() return files def _get_numcons(self): numtcons = 0 numfcons = 0 if isdefined(self.inputs.tcon_file): fp = open(self.inputs.tcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numtcons = int(line.split()[-1]) break fp.close() if isdefined(self.inputs.fcon_file): fp = open(self.inputs.fcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numfcons = int(line.split()[-1]) break fp.close() return numtcons, numfcons def _list_outputs(self): outputs = self._outputs().get() cwd = os.getcwd() results_dir = os.path.join(cwd, self.inputs.results_dir) outputs['results_dir'] = results_dir pe_files = self._get_pe_files(results_dir) if pe_files: outputs['param_estimates'] = pe_files outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir) outputs['dof_file'] = os.path.join(results_dir, 'dof') outputs['sigmasquareds'] = self._gen_fname('sigmasquareds.nii', cwd=results_dir) outputs['thresholdac'] = self._gen_fname('threshac1.nii', cwd=results_dir) if Info.version() and LooseVersion(Info.version()) < LooseVersion('5.0.7'): outputs['corrections'] = self._gen_fname('corrections.nii', cwd=results_dir) outputs['logfile'] = self._gen_fname('logfile', change_ext=False, cwd=results_dir) if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'): pth = results_dir numtcons, numfcons = self._get_numcons() base_contrast = 1 copes = [] varcopes = [] zstats = [] tstats = [] neffs = [] for i in range(numtcons): copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i), cwd=pth)) varcopes.append( self._gen_fname('varcope%d.nii' % (base_contrast + i), cwd=pth)) zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i), cwd=pth)) tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i), cwd=pth)) if copes: outputs['copes'] = copes outputs['varcopes'] = varcopes outputs['zstats'] = zstats outputs['tstats'] = tstats fstats = [] zfstats = [] for i in range(numfcons): fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i), cwd=pth)) zfstats.append( self._gen_fname('zfstat%d.nii' % (base_contrast + i), cwd=pth)) if fstats: outputs['fstats'] = fstats outputs['zfstats'] = zfstats return outputs class FEATRegisterInputSpec(BaseInterfaceInputSpec): feat_dirs = InputMultiPath( Directory(exists=True), desc="Lower level feat dirs", mandatory=True) reg_image = File( exists=True, desc="image to register to (will be treated as standard)", mandatory=True) reg_dof = traits.Int( 12, desc="registration degrees of freedom", usedefault=True) class FEATRegisterOutputSpec(TraitedSpec): fsf_file = File(exists=True, desc="FSL feat specification file") class FEATRegister(BaseInterface): """Register feat directories to a specific standard """ input_spec = FEATRegisterInputSpec output_spec = FEATRegisterOutputSpec def _run_interface(self, runtime): fsf_header = load_template('featreg_header.tcl') fsf_footer = load_template('feat_nongui.tcl') fsf_dirs = load_template('feat_fe_featdirs.tcl') num_runs = len(self.inputs.feat_dirs) fsf_txt = fsf_header.substitute(num_runs=num_runs, regimage=self.inputs.reg_image, regdof=self.inputs.reg_dof) for i, rundir in enumerate(filename_to_list(self.inputs.feat_dirs)): fsf_txt += fsf_dirs.substitute(runno=i + 1, rundir=os.path.abspath(rundir)) fsf_txt += fsf_footer.substitute() f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt') f.write(fsf_txt) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() outputs['fsf_file'] = os.path.abspath( os.path.join(os.getcwd(), 'register.fsf')) return outputs class FLAMEOInputSpec(FSLCommandInputSpec): cope_file = File(exists=True, argstr='--copefile=%s', mandatory=True, desc='cope regressor data file') var_cope_file = File(exists=True, argstr='--varcopefile=%s', desc='varcope weightings data file') dof_var_cope_file = File(exists=True, argstr='--dofvarcopefile=%s', desc='dof data file for varcope data') mask_file = File(exists=True, argstr='--maskfile=%s', mandatory=True, desc='mask file') design_file = File(exists=True, argstr='--designfile=%s', mandatory=True, desc='design matrix file') t_con_file = File( exists=True, argstr='--tcontrastsfile=%s', mandatory=True, desc='ascii matrix specifying t-contrasts') f_con_file = File(exists=True, argstr='--fcontrastsfile=%s', desc='ascii matrix specifying f-contrasts') cov_split_file = File( exists=True, argstr='--covsplitfile=%s', mandatory=True, desc='ascii matrix specifying the groups the covariance is split into') run_mode = traits.Enum( 'fe', 'ols', 'flame1', 'flame12', argstr='--runmode=%s', mandatory=True, desc='inference to perform') n_jumps = traits.Int( argstr='--njumps=%d', desc='number of jumps made by mcmc') burnin = traits.Int(argstr='--burnin=%d', desc='number of jumps at start of mcmc to be discarded') sample_every = traits.Int(argstr='--sampleevery=%d', desc='number of jumps for each sample') fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit') infer_outliers = traits.Bool(argstr='--inferoutliers', desc='infer outliers - not for fe') no_pe_outputs = traits.Bool(argstr='--nopeoutput', desc='do not output pe files') sigma_dofs = traits.Int(argstr='--sigma_dofs=%d', desc='sigma (in mm) to use for Gaussian smoothing the DOFs in FLAME 2. Default is 1mm, -1 indicates no smoothing') outlier_iter = traits.Int(argstr='--ioni=%d', desc='Number of max iterations to use when inferring outliers. Default is 12.') log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds # no support for ven, vef class FLAMEOOutputSpec(TraitedSpec): pes = OutputMultiPath(File(exists=True), desc=("Parameter estimates for each column of the " "design matrix for each voxel")) res4d = OutputMultiPath(File(exists=True), desc=("Model fit residual mean-squared error for " "each time point")) copes = OutputMultiPath(File(exists=True), desc="Contrast estimates for each contrast") var_copes = OutputMultiPath(File(exists=True), desc="Variance estimates for each contrast") zstats = OutputMultiPath(File(exists=True), desc="z-stat file for each contrast") tstats = OutputMultiPath(File(exists=True), desc="t-stat file for each contrast") zfstats = OutputMultiPath(File(exists=True), desc="z stat file for each f contrast") fstats = OutputMultiPath(File(exists=True), desc="f-stat file for each contrast") mrefvars = OutputMultiPath(File(exists=True), desc=("mean random effect variances for each " "contrast")) tdof = OutputMultiPath(File(exists=True), desc="temporal dof file for each contrast") weights = OutputMultiPath(File(exists=True), desc="weights file for each contrast") stats_dir = Directory(File(exists=True), desc="directory storing model estimation output") class FLAMEO(FSLCommand): """Use FSL flameo command to perform higher level model fits Examples -------- Initialize FLAMEO with no options, assigning them when calling run: >>> from nipype.interfaces import fsl >>> import os >>> flameo = fsl.FLAMEO(cope_file='cope.nii.gz', \ var_cope_file='varcope.nii.gz', \ cov_split_file='cov_split.mat', \ design_file='design.mat', \ t_con_file='design.con', \ mask_file='mask.nii', \ run_mode='fe') >>> flameo.cmdline 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' """ _cmd = 'flameo' input_spec = FLAMEOInputSpec output_spec = FLAMEOOutputSpec # ohinds: 2010-04-06 def _run_interface(self, runtime): log_dir = self.inputs.log_dir cwd = os.getcwd() if os.access(os.path.join(cwd, log_dir), os.F_OK): rmtree(os.path.join(cwd, log_dir)) return super(FLAMEO, self)._run_interface(runtime) # ohinds: 2010-04-06 # made these compatible with flameo def _list_outputs(self): outputs = self._outputs().get() pth = os.path.join(os.getcwd(), self.inputs.log_dir) pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*'))) assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate' outputs['pes'] = pes res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*'))) assert len(res4d) == 1, 'No residual volume generated by FSL Estimate' outputs['res4d'] = res4d[0] copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*'))) assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate' outputs['copes'] = copes var_copes = human_order_sorted( glob(os.path.join(pth, 'varcope[0-9]*.*'))) assert len( var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate' outputs['var_copes'] = var_copes zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*'))) assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate' outputs['zstats'] = zstats if isdefined(self.inputs.f_con_file): zfstats = human_order_sorted( glob(os.path.join(pth, 'zfstat[0-9]*.*'))) assert len( zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate' outputs['zfstats'] = zfstats fstats = human_order_sorted( glob(os.path.join(pth, 'fstat[0-9]*.*'))) assert len( fstats) >= 1, 'No fstat volumes generated by FSL CEstimate' outputs['fstats'] = fstats tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*'))) assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate' outputs['tstats'] = tstats mrefs = human_order_sorted( glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*'))) assert len( mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO' outputs['mrefvars'] = mrefs tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*'))) assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO' outputs['tdof'] = tdof weights = human_order_sorted( glob(os.path.join(pth, 'weights[0-9]*.*'))) assert len(weights) >= 1, 'No weight volumes generated by FLAMEO' outputs['weights'] = weights outputs['stats_dir'] = pth return outputs class ContrastMgrInputSpec(FSLCommandInputSpec): tcon_file = File(exists=True, mandatory=True, argstr='%s', position=-1, desc='contrast file containing T-contrasts') fcon_file = File(exists=True, argstr='-f %s', desc='contrast file containing F-contrasts') param_estimates = InputMultiPath(File(exists=True), argstr='', copyfile=False, mandatory=True, desc='Parameter estimates for each column of the design matrix') corrections = File(exists=True, copyfile=False, mandatory=True, desc='statistical corrections used within FILM modelling') dof_file = File(exists=True, argstr='', copyfile=False, mandatory=True, desc='degrees of freedom') sigmasquareds = File(exists=True, argstr='', position=-2, copyfile=False, mandatory=True, desc='summary of residuals, See Woolrich, et. al., 2001') contrast_num = traits.Range(low=1, argstr='-cope', desc='contrast number to start labeling copes from') suffix = traits.Str(argstr='-suffix %s', desc='suffix to put on the end of the cope filename before the contrast number, default is nothing') class ContrastMgrOutputSpec(TraitedSpec): copes = OutputMultiPath(File(exists=True), desc='Contrast estimates for each contrast') varcopes = OutputMultiPath(File(exists=True), desc='Variance estimates for each contrast') zstats = OutputMultiPath(File(exists=True), desc='z-stat file for each contrast') tstats = OutputMultiPath(File(exists=True), desc='t-stat file for each contrast') fstats = OutputMultiPath(File(exists=True), desc='f-stat file for each contrast') zfstats = OutputMultiPath(File(exists=True), desc='z-stat file for each F contrast') neffs = OutputMultiPath(File(exists=True), desc='neff file ?? for each contrast') class ContrastMgr(FSLCommand): """Use FSL contrast_mgr command to evaluate contrasts In interface mode this file assumes that all the required inputs are in the same location. """ _cmd = 'contrast_mgr' input_spec = ContrastMgrInputSpec output_spec = ContrastMgrOutputSpec def _run_interface(self, runtime): # The returncode is meaningless in ContrastMgr. So check the output # in stderr and if it's set, then update the returncode # accordingly. runtime = super(ContrastMgr, self)._run_interface(runtime) if runtime.stderr: self.raise_exception(runtime) return runtime def _format_arg(self, name, trait_spec, value): if name in ['param_estimates', 'corrections', 'dof_file']: return '' elif name in ['sigmasquareds']: path, _ = os.path.split(value) return path else: return super(ContrastMgr, self)._format_arg(name, trait_spec, value) def _get_design_root(self, infile): _, fname = os.path.split(infile) return fname.split('.')[0] def _get_numcons(self): numtcons = 0 numfcons = 0 if isdefined(self.inputs.tcon_file): fp = open(self.inputs.tcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numtcons = int(line.split()[-1]) break fp.close() if isdefined(self.inputs.fcon_file): fp = open(self.inputs.fcon_file, 'rt') for line in fp.readlines(): if line.startswith('/NumContrasts'): numfcons = int(line.split()[-1]) break fp.close() return numtcons, numfcons def _list_outputs(self): outputs = self._outputs().get() pth, _ = os.path.split(self.inputs.sigmasquareds) numtcons, numfcons = self._get_numcons() base_contrast = 1 if isdefined(self.inputs.contrast_num): base_contrast = self.inputs.contrast_num copes = [] varcopes = [] zstats = [] tstats = [] neffs = [] for i in range(numtcons): copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i), cwd=pth)) varcopes.append( self._gen_fname('varcope%d.nii' % (base_contrast + i), cwd=pth)) zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i), cwd=pth)) tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i), cwd=pth)) neffs.append(self._gen_fname('neff%d.nii' % (base_contrast + i), cwd=pth)) if copes: outputs['copes'] = copes outputs['varcopes'] = varcopes outputs['zstats'] = zstats outputs['tstats'] = tstats outputs['neffs'] = neffs fstats = [] zfstats = [] for i in range(numfcons): fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i), cwd=pth)) zfstats.append( self._gen_fname('zfstat%d.nii' % (base_contrast + i), cwd=pth)) if fstats: outputs['fstats'] = fstats outputs['zfstats'] = zfstats return outputs class L2ModelInputSpec(BaseInterfaceInputSpec): num_copes = traits.Range(low=1, mandatory=True, desc='number of copes to be combined') class L2ModelOutputSpec(TraitedSpec): design_mat = File(exists=True, desc='design matrix file') design_con = File(exists=True, desc='design contrast file') design_grp = File(exists=True, desc='design group file') class L2Model(BaseInterface): """Generate subject specific second level model Examples -------- >>> from nipype.interfaces.fsl import L2Model >>> model = L2Model(num_copes=3) # 3 sessions """ input_spec = L2ModelInputSpec output_spec = L2ModelOutputSpec def _run_interface(self, runtime): cwd = os.getcwd() mat_txt = ['/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), '/PPheights 1', '', '/Matrix'] for i in range(self.inputs.num_copes): mat_txt += ['1'] mat_txt = '\n'.join(mat_txt) con_txt = ['/ContrastName1 group mean', '/NumWaves 1', '/NumContrasts 1', '/PPheights 1', '/RequiredEffect 100', # XX where does this # number come from '', '/Matrix', '1'] con_txt = '\n'.join(con_txt) grp_txt = ['/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), '', '/Matrix'] for i in range(self.inputs.num_copes): grp_txt += ['1'] grp_txt = '\n'.join(grp_txt) txt = {'design.mat': mat_txt, 'design.con': con_txt, 'design.grp': grp_txt} # write design files for i, name in enumerate(['design.mat', 'design.con', 'design.grp']): f = open(os.path.join(cwd, name), 'wt') f.write(txt[name]) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() for field in list(outputs.keys()): outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) return outputs class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec): contrasts = traits.List( traits.Either(traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), traits.List(traits.Float)), traits.Tuple(traits.Str, traits.Enum('F'), traits.List(traits.Tuple(traits.Str, traits.Enum('T'), traits.List( traits.Str), traits.List( traits.Float)), ))), mandatory=True, desc="List of contrasts with each contrast being a list of the form - \ [('name', 'stat', [condition list], [weight list])]. if \ session list is None or not provided, all sessions are used. For F \ contrasts, the condition list should contain previously defined \ T-contrasts without any weight list.") regressors = traits.Dict(traits.Str, traits.List(traits.Float), mandatory=True, desc='dictionary containing named lists of regressors') groups = traits.List(traits.Int, desc='list of group identifiers (defaults to single group)') class MultipleRegressDesignOutputSpec(TraitedSpec): design_mat = File(exists=True, desc='design matrix file') design_con = File(exists=True, desc='design t-contrast file') design_fts = File(exists=True, desc='design f-contrast file') design_grp = File(exists=True, desc='design group file') class MultipleRegressDesign(BaseInterface): """Generate multiple regression design .. note:: FSL does not demean columns for higher level analysis. Please see `FSL documentation <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#higher>`_ for more details on model specification for higher level analysis. Examples -------- >>> from nipype.interfaces.fsl import MultipleRegressDesign >>> model = MultipleRegressDesign() >>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]] >>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3]) >>> model.run() # doctest: +SKIP """ input_spec = MultipleRegressDesignInputSpec output_spec = MultipleRegressDesignOutputSpec def _run_interface(self, runtime): cwd = os.getcwd() regs = sorted(self.inputs.regressors.keys()) nwaves = len(regs) npoints = len(self.inputs.regressors[regs[0]]) ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T']) nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) # write mat file mat_txt = ['/NumWaves %d' % nwaves, '/NumPoints %d' % npoints] ppheights = [] for reg in regs: maxreg = np.max(self.inputs.regressors[reg]) minreg = np.min(self.inputs.regressors[reg]) if np.sign(maxreg) == np.sign(minreg): regheight = max([abs(minreg), abs(maxreg)]) else: regheight = abs(maxreg - minreg) ppheights.append('%e' % regheight) mat_txt += ['/PPheights ' + ' '.join(ppheights)] mat_txt += ['', '/Matrix'] for cidx in range(npoints): mat_txt.append(' '.join( ['%e' % self.inputs.regressors[key][cidx] for key in regs])) mat_txt = '\n'.join(mat_txt) + '\n' # write t-con file con_txt = [] counter = 0 tconmap = {} for conidx, con in enumerate(self.inputs.contrasts): if con[1] == 'T': tconmap[conidx] = counter counter += 1 con_txt += ['/ContrastName%d %s' % (counter, con[0])] con_txt += ['/NumWaves %d' % nwaves, '/NumContrasts %d' % ntcons, '/PPheights %s' % ' '.join( ['%e' % 1 for i in range(counter)]), '/RequiredEffect %s' % ' '.join( ['%.3f' % 100 for i in range(counter)]), '', '/Matrix'] for idx in sorted(tconmap.keys()): convals = np.zeros((nwaves, 1)) for regidx, reg in enumerate(self.inputs.contrasts[idx][2]): convals[regs.index(reg) ] = self.inputs.contrasts[idx][3][regidx] con_txt.append(' '.join(['%e' % val for val in convals])) con_txt = '\n'.join(con_txt) + '\n' # write f-con file fcon_txt = '' if nfcons: fcon_txt = ['/NumWaves %d' % ntcons, '/NumContrasts %d' % nfcons, '', '/Matrix'] for conidx, con in enumerate(self.inputs.contrasts): if con[1] == 'F': convals = np.zeros((ntcons, 1)) for tcon in con[2]: convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1 fcon_txt.append(' '.join(['%d' % val for val in convals])) fcon_txt = '\n'.join(fcon_txt) fcon_txt += '\n' # write group file grp_txt = ['/NumWaves 1', '/NumPoints %d' % npoints, '', '/Matrix'] for i in range(npoints): if isdefined(self.inputs.groups): grp_txt += ['%d' % self.inputs.groups[i]] else: grp_txt += ['1'] grp_txt = '\n'.join(grp_txt) + '\n' txt = {'design.mat': mat_txt, 'design.con': con_txt, 'design.fts': fcon_txt, 'design.grp': grp_txt} # write design files for key, val in list(txt.items()): if ('fts' in key) and (nfcons == 0): continue filename = key.replace('_', '.') f = open(os.path.join(cwd, filename), 'wt') f.write(val) f.close() return runtime def _list_outputs(self): outputs = self._outputs().get() nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) for field in list(outputs.keys()): if ('fts' in field) and (nfcons == 0): continue outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) return outputs class SMMInputSpec(FSLCommandInputSpec): spatial_data_file = File( exists=True, position=0, argstr='--sdf="%s"', mandatory=True, desc="statistics spatial map", copyfile=False) mask = File(exists=True, position=1, argstr='--mask="%s"', mandatory=True, desc="mask file", copyfile=False) no_deactivation_class = traits.Bool(position=2, argstr="--zfstatmode", desc="enforces no deactivation class") class SMMOutputSpec(TraitedSpec): null_p_map = File(exists=True) activation_p_map = File(exists=True) deactivation_p_map = File(exists=True) class SMM(FSLCommand): ''' Spatial Mixture Modelling. For more detail on the spatial mixture modelling see Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. ''' _cmd = 'mm --ld=logdir' input_spec = SMMInputSpec output_spec = SMMOutputSpec def _list_outputs(self): outputs = self._outputs().get() # TODO get the true logdir from the stdout outputs['null_p_map'] = self._gen_fname(basename="w1_mean", cwd="logdir") outputs['activation_p_map'] = self._gen_fname( basename="w2_mean", cwd="logdir") if not isdefined(self.inputs.no_deactivation_class) or not self.inputs.no_deactivation_class: outputs['deactivation_p_map'] = self._gen_fname( basename="w3_mean", cwd="logdir") return outputs class MELODICInputSpec(FSLCommandInputSpec): in_files = InputMultiPath( File(exists=True), argstr="-i %s", mandatory=True, position=0, desc="input file names (either single file name or a list)", sep=",") out_dir = Directory( argstr="-o %s", desc="output directory name", genfile=True) mask = File(exists=True, argstr="-m %s", desc="file name of mask for thresholding") no_mask = traits.Bool(argstr="--nomask", desc="switch off masking") update_mask = traits.Bool( argstr="--update_mask", desc="switch off mask updating") no_bet = traits.Bool(argstr="--nobet", desc="switch off BET") bg_threshold = traits.Float( argstr="--bgthreshold=%f", desc="brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected)") dim = traits.Int(argstr="-d %d", desc="dimensionality reduction into #num dimensions" "(default: automatic estimation)") dim_est = traits.Str(argstr="--dimest=%s", desc="use specific dim. estimation technique:" " lap, bic, mdl, aic, mean (default: lap)") sep_whiten = traits.Bool( argstr="--sep_whiten", desc="switch on separate whitening") sep_vn = traits.Bool( argstr="--sep_vn", desc="switch off joined variance normalization") num_ICs = traits.Int( argstr="-n %d", desc="number of IC's to extract (for deflation approach)") approach = traits.Str(argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), " " 3D: tica (default), concat") non_linearity = traits.Str( argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4") var_norm = traits.Bool( argstr="--vn", desc="switch off variance normalization") pbsc = traits.Bool( argstr="--pbsc", desc="switch off conversion to percent BOLD signal change") cov_weight = traits.Float(argstr="--covarweight=%f", desc="voxel-wise weights for the covariance " "matrix (e.g. segmentation information)") epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change") epsilonS = traits.Float( argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA") maxit = traits.Int(argstr="--maxit=%d", desc="maximum number of iterations before restart") max_restart = traits.Int( argstr="--maxrestart=%d", desc="maximum number of restarts") mm_thresh = traits.Float( argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference") no_mm = traits.Bool( argstr="--no_mm", desc="switch off mixture modelling on IC maps") ICs = File(exists=True, argstr="--ICs=%s", desc="filename of the IC components file for mixture modelling") mix = File(exists=True, argstr="--mix=%s", desc="mixing matrix for mixture modelling / filtering") smode = File(exists=True, argstr="--smode=%s", desc="matrix of session modes for report generation") rem_cmp = traits.List( traits.Int, argstr="-f %d", desc="component numbers to remove") report = traits.Bool(argstr="--report", desc="generate Melodic web report") bg_image = File(exists=True, argstr="--bgimage=%s", desc="specify background image for report" " (default: mean image)") tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds") log_power = traits.Bool( argstr="--logPower", desc="calculate log of power for frequency spectrum") t_des = File(exists=True, argstr="--Tdes=%s", desc="design matrix across time-domain") t_con = File(exists=True, argstr="--Tcon=%s", desc="t-contrast matrix across time-domain") s_des = File(exists=True, argstr="--Sdes=%s", desc="design matrix across subject-domain") s_con = File(exists=True, argstr="--Scon=%s", desc="t-contrast matrix across subject-domain") out_all = traits.Bool(argstr="--Oall", desc="output everything") out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix") out_stats = traits.Bool( argstr="--Ostats", desc="output thresholded maps and probability maps") out_pca = traits.Bool(argstr="--Opca", desc="output PCA results") out_white = traits.Bool( argstr="--Owhite", desc="output whitening/dewhitening matrices") out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs") out_mean = traits.Bool(argstr="--Omean", desc="output mean volume") report_maps = traits.Str(argstr="--report_maps=%s", desc="control string for spatial map images (see slicer)") remove_deriv = traits.Bool(argstr="--remove_deriv", desc="removes every second entry in paradigm" " file (EV derivatives)") class MELODICOutputSpec(TraitedSpec): out_dir = Directory(exists=True) report_dir = Directory(exists=True) class MELODIC(FSLCommand): """Multivariate Exploratory Linear Optimised Decomposition into Independent Components Examples -------- >>> melodic_setup = MELODIC() >>> melodic_setup.inputs.approach = 'tica' >>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii'] >>> melodic_setup.inputs.no_bet = True >>> melodic_setup.inputs.bg_threshold = 10 >>> melodic_setup.inputs.tr_sec = 1.5 >>> melodic_setup.inputs.mm_thresh = 0.5 >>> melodic_setup.inputs.out_stats = True >>> melodic_setup.inputs.t_des = 'timeDesign.mat' >>> melodic_setup.inputs.t_con = 'timeDesign.con' >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' >>> melodic_setup.inputs.s_con = 'subjectDesign.con' >>> melodic_setup.inputs.out_dir = 'groupICA.out' >>> melodic_setup.cmdline 'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000' >>> melodic_setup.run() # doctest: +SKIP """ input_spec = MELODICInputSpec output_spec = MELODICOutputSpec _cmd = 'melodic' def _list_outputs(self): outputs = self.output_spec().get() outputs['out_dir'] = self.inputs.out_dir if not isdefined(outputs['out_dir']): outputs['out_dir'] = self._gen_filename("out_dir") if isdefined(self.inputs.report) and self.inputs.report: outputs['report_dir'] = os.path.join( self._gen_filename("out_dir"), "report") return outputs def _gen_filename(self, name): if name == "out_dir": return os.getcwd() class SmoothEstimateInputSpec(FSLCommandInputSpec): dof = traits.Int(argstr='--dof=%d', mandatory=True, xor=['zstat_file'], desc='number of degrees of freedom') mask_file = File(argstr='--mask=%s', exists=True, mandatory=True, desc='brain mask volume') residual_fit_file = File(argstr='--res=%s', exists=True, requires=['dof'], desc='residual-fit image file') zstat_file = File(argstr='--zstat=%s', exists=True, xor=['dof'], desc='zstat image file') class SmoothEstimateOutputSpec(TraitedSpec): dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))') volume = traits.Int(desc='number of voxels in mask') resels = traits.Float(desc='number of resels') class SmoothEstimate(FSLCommand): """ Estimates the smoothness of an image Examples -------- >>> est = SmoothEstimate() >>> est.inputs.zstat_file = 'zstat1.nii.gz' >>> est.inputs.mask_file = 'mask.nii' >>> est.cmdline 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' """ input_spec = SmoothEstimateInputSpec output_spec = SmoothEstimateOutputSpec _cmd = 'smoothest' def aggregate_outputs(self, runtime=None, needed_outputs=None): outputs = self._outputs() stdout = runtime.stdout.split('\n') outputs.dlh = float(stdout[0].split()[1]) outputs.volume = int(stdout[1].split()[1]) outputs.resels = float(stdout[2].split()[1]) return outputs class ClusterInputSpec(FSLCommandInputSpec): in_file = File(argstr='--in=%s', mandatory=True, exists=True, desc='input volume') threshold = traits.Float(argstr='--thresh=%.10f', mandatory=True, desc='threshold for input volume') out_index_file = traits.Either(traits.Bool, File, argstr='--oindex=%s', desc='output of cluster index (in size order)', hash_files=False) out_threshold_file = traits.Either(traits.Bool, File, argstr='--othresh=%s', desc='thresholded image', hash_files=False) out_localmax_txt_file = traits.Either(traits.Bool, File, argstr='--olmax=%s', desc='local maxima text file', hash_files=False) out_localmax_vol_file = traits.Either(traits.Bool, File, argstr='--olmaxim=%s', desc='output of local maxima volume', hash_files=False) out_size_file = traits.Either(traits.Bool, File, argstr='--osize=%s', desc='filename for output of size image', hash_files=False) out_max_file = traits.Either(traits.Bool, File, argstr='--omax=%s', desc='filename for output of max image', hash_files=False) out_mean_file = traits.Either(traits.Bool, File, argstr='--omean=%s', desc='filename for output of mean image', hash_files=False) out_pval_file = traits.Either(traits.Bool, File, argstr='--opvals=%s', desc='filename for image output of log pvals', hash_files=False) pthreshold = traits.Float(argstr='--pthresh=%.10f', requires=['dlh', 'volume'], desc='p-threshold for clusters') peak_distance = traits.Float(argstr='--peakdist=%.10f', desc='minimum distance between local maxima/minima, in mm (default 0)') cope_file = traits.File(argstr='--cope=%s', desc='cope volume') volume = traits.Int(argstr='--volume=%d', desc='number of voxels in the mask') dlh = traits.Float(argstr='--dlh=%.10f', desc='smoothness estimate = sqrt(det(Lambda))') fractional = traits.Bool('--fractional', desc='interprets the threshold as a fraction of the robust range') connectivity = traits.Int(argstr='--connectivity=%d', desc='the connectivity of voxels (default 26)') use_mm = traits.Bool('--mm', desc='use mm, not voxel, coordinates') find_min = traits.Bool('--min', desc='find minima instead of maxima') no_table = traits.Bool( '--no_table', desc='suppresses printing of the table info') minclustersize = traits.Bool(argstr='--minclustersize', desc='prints out minimum significant cluster size') xfm_file = File(argstr='--xfm=%s', desc='filename for Linear: input->standard-space transform. Non-linear: input->highres transform') std_space_file = File(argstr='--stdvol=%s', desc='filename for standard-space volume') num_maxima = traits.Int(argstr='--num=%d', desc='no of local maxima to report') warpfield_file = File(argstr='--warpvol=%s', desc='file contining warpfield') class ClusterOutputSpec(TraitedSpec): index_file = File(desc='output of cluster index (in size order)') threshold_file = File(desc='thresholded image') localmax_txt_file = File(desc='local maxima text file') localmax_vol_file = File(desc='output of local maxima volume') size_file = File(desc='filename for output of size image') max_file = File(desc='filename for output of max image') mean_file = File(desc='filename for output of mean image') pval_file = File(desc='filename for image output of log pvals') class Cluster(FSLCommand): """ Uses FSL cluster to perform clustering on statistical output Examples -------- >>> cl = Cluster() >>> cl.inputs.threshold = 2.3 >>> cl.inputs.in_file = 'zstat1.nii.gz' >>> cl.inputs.out_localmax_txt_file = 'stats.txt' >>> cl.cmdline 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000' """ input_spec = ClusterInputSpec output_spec = ClusterOutputSpec _cmd = 'cluster' filemap = {'out_index_file': 'index', 'out_threshold_file': 'threshold', 'out_localmax_txt_file': 'localmax.txt', 'out_localmax_vol_file': 'localmax', 'out_size_file': 'size', 'out_max_file': 'max', 'out_mean_file': 'mean', 'out_pval_file': 'pval'} def _list_outputs(self): outputs = self.output_spec().get() for key, suffix in list(self.filemap.items()): outkey = key[4:] inval = getattr(self.inputs, key) if isdefined(inval): if isinstance(inval, bool): if inval: change_ext = True if suffix.endswith('.txt'): change_ext = False outputs[outkey] = self._gen_fname(self.inputs.in_file, suffix='_' + suffix, change_ext=change_ext) else: outputs[outkey] = os.path.abspath(inval) return outputs def _format_arg(self, name, spec, value): if name in list(self.filemap.keys()): if isinstance(value, bool): fname = self._list_outputs()[name[4:]] else: fname = value return spec.argstr % fname return super(Cluster, self)._format_arg(name, spec, value) class RandomiseInputSpec(FSLCommandInputSpec): in_file = File(exists=True, desc='4D input file', argstr='-i %s', position=0, mandatory=True) base_name = traits.Str( 'tbss_', desc='the rootname that all generated files will have', argstr='-o "%s"', position=1, usedefault=True) design_mat = File( exists=True, desc='design matrix file', argstr='-d %s', position=2) tcon = File( exists=True, desc='t contrasts file', argstr='-t %s', position=3) fcon = File(exists=True, desc='f contrasts file', argstr='-f %s') mask = File(exists=True, desc='mask image', argstr='-m %s') x_block_labels = File( exists=True, desc='exchangeability block labels file', argstr='-e %s') demean = traits.Bool( desc='demean data temporally before model fitting', argstr='-D') one_sample_group_mean = traits.Bool( desc='perform 1-sample group-mean test instead of generic permutation test', argstr='-1') show_total_perms = traits.Bool( desc='print out how many unique permutations would be generated and exit', argstr='-q') show_info_parallel_mode = traits.Bool( desc='print out information required for parallel mode and exit', argstr='-Q') vox_p_values = traits.Bool( desc='output voxelwise (corrected and uncorrected) p-value images', argstr='-x') tfce = traits.Bool( desc='carry out Threshold-Free Cluster Enhancement', argstr='-T') tfce2D = traits.Bool( desc='carry out Threshold-Free Cluster Enhancement with 2D optimisation', argstr='--T2') f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only') raw_stats_imgs = traits.Bool( desc='output raw ( unpermuted ) statistic images', argstr='-R') p_vec_n_dist_files = traits.Bool( desc='output permutation vector and null distribution text files', argstr='-P') num_perm = traits.Int( argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)') seed = traits.Int( argstr='--seed=%d', desc='specific integer seed for random number generator') var_smooth = traits.Int( argstr='-v %d', desc='use variance smoothing (std is in mm)') c_thresh = traits.Float( argstr='-c %.2f', desc='carry out cluster-based thresholding') cm_thresh = traits.Float( argstr='-C %.2f', desc='carry out cluster-mass-based thresholding') f_c_thresh = traits.Float( argstr='-F %.2f', desc='carry out f cluster thresholding') f_cm_thresh = traits.Float( argstr='-S %.2f', desc='carry out f cluster-mass thresholding') tfce_H = traits.Float( argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)') tfce_E = traits.Float( argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)') tfce_C = traits.Float( argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)') class RandomiseOutputSpec(TraitedSpec): tstat_files = traits.List( File(exists=True), desc='t contrast raw statistic') fstat_files = traits.List( File(exists=True), desc='f contrast raw statistic') t_p_files = traits.List( File(exists=True), desc='f contrast uncorrected p values files') f_p_files = traits.List( File(exists=True), desc='f contrast uncorrected p values files') t_corrected_p_files = traits.List( File(exists=True), desc='t contrast FWE (Family-wise error) corrected p values files') f_corrected_p_files = traits.List( File(exists=True), desc='f contrast FWE (Family-wise error) corrected p values files') class Randomise(FSLCommand): """FSL Randomise: feeds the 4D projected FA data into GLM modelling and thresholding in order to find voxels which correlate with your model Example ------- >>> import nipype.interfaces.fsl as fsl >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') >>> rand.cmdline 'randomise -i allFA.nii -o "tbss_" -d design.mat -t design.con -m mask.nii' """ _cmd = 'randomise' input_spec = RandomiseInputSpec output_spec = RandomiseOutputSpec def _list_outputs(self): outputs = self.output_spec().get() outputs['tstat_files'] = glob(self._gen_fname( '%s_tstat*.nii' % self.inputs.base_name)) outputs['fstat_files'] = glob(self._gen_fname( '%s_fstat*.nii' % self.inputs.base_name)) prefix = False if self.inputs.tfce or self.inputs.tfce2D: prefix = 'tfce' elif self.inputs.vox_p_values: prefix = 'vox' elif self.inputs.c_thresh or self.inputs.f_c_thresh: prefix = 'clustere' elif self.inputs.cm_thresh or self.inputs.f_cm_thresh: prefix = 'clusterm' if prefix: outputs['t_p_files'] = glob(self._gen_fname( '%s_%s_p_tstat*' % (self.inputs.base_name, prefix))) outputs['t_corrected_p_files'] = glob(self._gen_fname( '%s_%s_corrp_tstat*.nii' % (self.inputs.base_name, prefix))) outputs['f_p_files'] = glob(self._gen_fname( '%s_%s_p_fstat*.nii' % (self.inputs.base_name, prefix))) outputs['f_corrected_p_files'] = glob(self._gen_fname( '%s_%s_corrp_fstat*.nii' % (self.inputs.base_name, prefix))) return outputs class GLMInputSpec(FSLCommandInputSpec): in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1, desc='input file name (text matrix or 3D/4D image file)') out_file = File(name_template="%s_glm", argstr='-o %s', position=3, desc=('filename for GLM parameter estimates' + ' (GLM betas)'), name_source="in_file", keep_extension=True) design = File(exists=True, argstr='-d %s', mandatory=True, position=2, desc=('file name of the GLM design matrix (text time' + ' courses for temporal regression or an image' + ' file for spatial regression)')) contrasts = File(exists=True, argstr='-c %s', desc=('matrix of t-statics' + ' contrasts')) mask = File(exists=True, argstr='-m %s', desc=('mask image file name if' + ' input is image')) dof = traits.Int(argstr='--dof=%d', desc=('set degrees of freedom' + ' explicitly')) des_norm = traits.Bool(argstr='--des_norm', desc=('switch on normalization of the design' + ' matrix columns to unit std deviation')) dat_norm = traits.Bool(argstr='--dat_norm', desc=('switch on normalization' ' of the data time' + ' series to unit std' + ' deviation')) var_norm = traits.Bool(argstr='--vn', desc=('perform MELODIC variance-' + 'normalisation on data')) demean = traits.Bool(argstr='--demean', desc=('switch on demeaining of ' + ' design and data')) out_cope = File(argstr='--out_cope=%s', desc='output file name for COPE (either as txt or image') out_z_name = File(argstr='--out_z=%s', desc='output file name for Z-stats (either as txt or image') out_t_name = File(argstr='--out_t=%s', desc='output file name for t-stats (either as txt or image') out_p_name = File(argstr='--out_p=%s', desc=('output file name for p-values of Z-stats (either as' + ' text file or image)')) out_f_name = File(argstr='--out_f=%s', desc='output file name for F-value of full model fit') out_pf_name = File(argstr='--out_pf=%s', desc='output file name for p-value for full model fit') out_res_name = File(argstr='--out_res=%s', desc='output file name for residuals') out_varcb_name = File(argstr='--out_varcb=%s', desc='output file name for variance of COPEs') out_sigsq_name = File(argstr='--out_sigsq=%s', desc=('output file name for residual noise variance' + ' sigma-square')) out_data_name = File(argstr='--out_data=%s', desc='output file name for pre-processed data') out_vnscales_name = File(argstr='--out_vnscales=%s', desc=('output file name for scaling factors for variance' + ' normalisation')) class GLMOutputSpec(TraitedSpec): out_file = File(exists=True, desc=('file name of GLM parameters' ' (if generated)')) out_cope = OutputMultiPath(File(exists=True), desc=('output file name for COPEs (either as ' 'text file or image)')) out_z = OutputMultiPath(File(exists=True), desc=('output file name for COPEs (either as text ' 'file or image)')) out_t = OutputMultiPath(File(exists=True), desc=('output file name for t-stats (either as ' 'text file or image)')) out_p = OutputMultiPath(File(exists=True), desc=('output file name for p-values of Z-stats ' '(either as text file or image)')) out_f = OutputMultiPath(File(exists=True), desc=('output file name for F-value of full model ' 'fit')) out_pf = OutputMultiPath(File(exists=True), desc=('output file name for p-value for full ' 'model fit')) out_res = OutputMultiPath(File(exists=True), desc='output file name for residuals') out_varcb = OutputMultiPath(File(exists=True), desc='output file name for variance of COPEs') out_sigsq = OutputMultiPath(File(exists=True), desc=('output file name for residual noise ' 'variance sigma-square')) out_data = OutputMultiPath(File(exists=True), desc='output file for preprocessed data') out_vnscales = OutputMultiPath(File(exists=True), desc=('output file name for scaling factors ' 'for variance normalisation')) class GLM(FSLCommand): """ FSL GLM: Example ------- >>> import nipype.interfaces.fsl as fsl >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') >>> glm.cmdline 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' """ _cmd = 'fsl_glm' input_spec = GLMInputSpec output_spec = GLMOutputSpec def _list_outputs(self): outputs = super(GLM, self)._list_outputs() if isdefined(self.inputs.out_cope): outputs['out_cope'] = os.path.abspath(self.inputs.out_cope) if isdefined(self.inputs.out_z_name): outputs['out_z'] = os.path.abspath(self.inputs.out_z_name) if isdefined(self.inputs.out_t_name): outputs['out_t'] = os.path.abspath(self.inputs.out_t_name) if isdefined(self.inputs.out_p_name): outputs['out_p'] = os.path.abspath(self.inputs.out_p_name) if isdefined(self.inputs.out_f_name): outputs['out_f'] = os.path.abspath(self.inputs.out_f_name) if isdefined(self.inputs.out_pf_name): outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name) if isdefined(self.inputs.out_res_name): outputs['out_res'] = os.path.abspath(self.inputs.out_res_name) if isdefined(self.inputs.out_varcb_name): outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name) if isdefined(self.inputs.out_sigsq_name): outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name) if isdefined(self.inputs.out_data_name): outputs['out_data'] = os.path.abspath(self.inputs.out_data_name) if isdefined(self.inputs.out_vnscales_name): outputs['out_vnscales'] = os.path.abspath( self.inputs.out_vnscales_name) return outputs
FCP-INDI/nipype
nipype/interfaces/fsl/model.py
Python
bsd-3-clause
88,336
[ "Gaussian" ]
48a325834f3bfd087f0048bcae2a8b9feabd580956ce923256e7ce0b634fcd25
""" An example creating a structured grid data set from numpy arrays using TVTK and visualizing it using mlab. In this example, we create a structured-grid data set: we describe data, both scalar and vector, lying on a structured-grid, ie a grid where each vertex has 6 neighboors. For this we directly create a StructuredGrid tvtk object, rather than using the mlab.pipeline source functions, as it gives us more control. To visualize the resulting dataset, we apply several modules, using the mlab.pipeline interface (see :ref:`controlling-the-pipeline-with-mlab-scripts`) """ # Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in> # Copyright (c) 2008, Prabhu Ramachandran. # License: BSD Style. from numpy import mgrid, empty, sin, pi from tvtk.api import tvtk from mayavi import mlab # Generate some points. x, y, z = mgrid[1:6:11j, 0:4:13j, 0:3:6j] base = x[..., 0] + y[..., 0] # Some interesting z values. for i in range(z.shape[2]): z[..., i] = base * 0.25 * i # The actual points. pts = empty(z.shape + (3,), dtype=float) pts[..., 0] = x pts[..., 1] = y pts[..., 2] = z # Simple scalars. scalars = x * x + y * y + z * z # Some vectors vectors = empty(z.shape + (3,), dtype=float) vectors[..., 0] = (4 - y * 2) vectors[..., 1] = (x * 3 - 12) vectors[..., 2] = sin(z * pi) # We reorder the points, scalars and vectors so this is as per VTK's # requirement of x first, y next and z last. pts = pts.transpose(2, 1, 0, 3).copy() pts.shape = pts.size / 3, 3 scalars = scalars.T.copy() vectors = vectors.transpose(2, 1, 0, 3).copy() vectors.shape = vectors.size / 3, 3 # Create the dataset. sg = tvtk.StructuredGrid(dimensions=x.shape, points=pts) sg.point_data.scalars = scalars.ravel() sg.point_data.scalars.name = 'temperature' sg.point_data.vectors = vectors sg.point_data.vectors.name = 'velocity' # Thats it! # Now visualize the data. d = mlab.pipeline.add_dataset(sg) gx = mlab.pipeline.grid_plane(d) gy = mlab.pipeline.grid_plane(d) gy.grid_plane.axis = 'y' gz = mlab.pipeline.grid_plane(d) gz.grid_plane.axis = 'z' iso = mlab.pipeline.iso_surface(d) iso.contour.maximum_contour = 75.0 vec = mlab.pipeline.vectors(d) vec.glyph.mask_input_points = True vec.glyph.glyph.scale_factor = 1.5 mlab.show()
dmsurti/mayavi
examples/mayavi/mlab/simple_structured_grid.py
Python
bsd-3-clause
2,224
[ "Mayavi", "VTK" ]
d48aa65e8f89cd639dc30d0a2352c4f2b59b1f543c99022eaa334a51eebe65f4
#----------------------------------------------------------------------------- # Copyright (c) 2013-2014, Allen B. Riddell # # This file is licensed under Version 3.0 of the GNU General Public # License. See LICENSE for a text of the license. #----------------------------------------------------------------------------- NAME = 'horizont' DESCRIPTION = 'Topic models' LONG_DESCRIPTION = open('README.rst').read() MAINTAINER = 'Allen B. Riddell' MAINTAINER_EMAIL = 'abr@ariddell.org' URL = 'https://github.com/ariddell/horizont' LICENSE = 'GPLv3' CLASSIFIERS = [ 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis' ] import ast import codecs import os import sys from setuptools import setup, find_packages # before distutils import from distutils.command.sdist import sdist from distutils.extension import Extension REQUIRES = ['numpy', 'scipy', 'scikit-learn'] PY2 = sys.version_info[0] == 2 if PY2: REQUIRES += ['futures'] # VersionFinder from from django-compressor class VersionFinder(ast.NodeVisitor): def __init__(self): self.version = None def visit_Assign(self, node): if node.targets[0].id == '__version__': self.version = node.value.s def read(*parts): filename = os.path.join(os.path.dirname(__file__), *parts) with codecs.open(filename, encoding='utf-8') as fp: return fp.read() def find_version(*parts): finder = VersionFinder() finder.visit(ast.parse(read(*parts))) return finder.version try: from Cython.Build import cythonize cython = True except ImportError: cython = False class CheckSDist(sdist): """Custom sdist that ensures Cython has compiled all pyx files to c.""" def initialize_options(self): sdist.initialize_options(self) self._pyxfiles = [] for root, dirs, files in os.walk('horizont'): for f in files: if f.endswith('.pyx'): self._pyxfiles.append(os.path.join(root, f)) def run(self): if 'cython' in cmdclass: self.run_command('cython') else: for pyxfile in self._pyxfiles: cfile = pyxfile[:-3] + 'c' msg = "C-source file '%s' not found." % (cfile) + \ " Run 'setup.py cython' before sdist." assert os.path.isfile(cfile), msg sdist.run(self) cmdclass = {'sdist': CheckSDist} ########################################################################### # Cython extensions to compile ########################################################################### random_sources = ["horizont/RNG/GRNG.cpp", "horizont/RNG/RNG.cpp", "horizont/BayesLogit/Code/C/PolyaGamma.cpp", "horizont/BayesLogit/Code/C/PolyaGammaAlt.cpp", "horizont/BayesLogit/Code/C/PolyaGammaSP.cpp", "horizont/BayesLogit/Code/C/InvertY.cpp"] include_gsl_dir = os.environ.get('GSL_INC_DIR', "/usr/include/") lib_gsl_dir = os.environ.get('GSL_LIB_DIR', "/usr/lib/") random_include_dirs = ["horizont/BayesLogit/Code/C", "horizont/RNG", include_gsl_dir] random_library_dirs = [lib_gsl_dir] random_libraries = ['gsl', 'gslcblas'] # FIXME: this could be simplified, c.f. pandas # The build will not fail if GSL cannot be found, but extensions requiring GSL # will not work. if cython: extensions = [Extension("horizont._lda", ["horizont/_lda.pyx"]), Extension("horizont._random", ["horizont/_random.pyx"] + random_sources, include_dirs=random_include_dirs, library_dirs=random_library_dirs, libraries=random_libraries, optional=True), Extension("horizont._utils", ["horizont/_utils.pyx"])] extensions = cythonize(extensions) else: extensions = [Extension("horizont._lda", ["horizont/_lda.c"]), Extension("horizont._random", ["horizont/_random.cpp"] + random_sources, include_dirs=random_include_dirs, library_dirs=random_library_dirs, libraries=random_libraries, optional=True), Extension("horizont._utils", ["horizont/_utils.c"])] import numpy include_dirs = [numpy.get_include()] # package data package_data_pats = ['*.hpp', '*.pxd', '*.pyx', 'tests/*.dat', 'tests/*.ldac'] # get every file under horizont/BayesLogit/Code/C/", "horizont/RNG/" package_data_pats += sum( [[os.path.join(path.replace('horizont/', ''), fn) for fn in files] for path, dirs, files in os.walk('horizont/BayesLogit/Code/C/')], []) package_data_pats += sum( [[os.path.join(path.replace('horizont/', ''), fn) for fn in files] for path, dirs, files in os.walk('horizont/RNG/')], []) ########################################################################### # Setup proper ########################################################################### setup(install_requires=REQUIRES, name=NAME, version=find_version("horizont", "__init__.py"), maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, packages=find_packages(), description=DESCRIPTION, long_description=LONG_DESCRIPTION, license=LICENSE, url=URL, classifiers=CLASSIFIERS, ext_modules=extensions, include_dirs=include_dirs, package_data={'horizont' : package_data_pats}, platforms='any')
ariddell/horizont
setup.py
Python
gpl-3.0
6,222
[ "VisIt" ]
6a045ec9cc349f0443a996d1420f9a183538255256ac9b2d45640c50efcd3e78
# -*- coding: utf-8 -*- # # This file is part of Invenio-Client. # Copyright (C) 2014 CERN. # # Invenio-Client is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio-Client is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. # # In applying this licence, CERN does not waive the privileges and immunities # granted to it by virtue of its status as an Intergovernmental Organization # or submit itself to any jurisdiction. """Tool to connect to remote Invenio servers using Invenio APIs. Example of use: .. code-block:: python from invenio_client import InvenioConnector demo = InvenioConnector("http://demo.inveniosoftware.org") results = demo.search("higgs") for record in results: print record["245__a"][0] print record["520__b"][0] for author in record["100__"]: print author["a"][0], author["u"][0] FIXME: - implement cache expiration - exceptions handling - parsing of ``<!-- Search-Engine-Total-Number-Of-Results: N -->`` - better checking of input parameters """ from __future__ import print_function import os import re import requests import json import splinter import sys import tempfile import time import xml.sax from requests.exceptions import (ConnectionError, InvalidSchema, InvalidURL, MissingSchema, RequestException) from urlparse import urlparse from ._compat import binary_type from .version import __version__ CFG_USER_AGENT = "invenio_connector" class InvenioConnectorError(Exception): """General connector error.""" def __init__(self, value): """Set the internal "value" attribute.""" super(InvenioConnectorError, self).__init__() self.value = value def __str__(self): """Return oneself as a string based on self.value.""" return str(self.value) class InvenioConnectorAuthError(InvenioConnectorError): """Failed authentication during remote connections.""" class InvenioConnectorServerError(InvenioConnectorError): """Problem with connecting to Invenio server.""" class InvenioConnector(object): """Create an connector to a server running Invenio.""" def __init__(self, url, user="", password="", login_method="Local", insecure_login=False): """ Initialize a new instance of the server at given URL. If the server happens to be running on the local machine, the access will be done directly using the Python APIs. In that case you can choose from which base path to import the necessary file specifying the local_import_path parameter. :param url: the url to which this instance will be connected. Defaults to CFG_SITE_URL, if available. :type url: string :param user: the optional username for interacting with the Invenio instance in an authenticated way. :type user: string :param password: the corresponding password. :type password: string :param login_method: the name of the login method the Invenio instance is expecting for this user (in case there is more than one). :type login_method: string """ assert url is not None self.server_url = url self._validate_server_url() self.cached_queries = {} self.cached_records = {} self.cached_baskets = {} self.user = user self.password = password self.login_method = login_method self.browser = None self.cookies = {} if self.user: if not insecure_login and \ not self.server_url.startswith('https://'): raise InvenioConnectorAuthError( "You have to use a secure URL (HTTPS) to login") self._init_browser() self._check_credentials() def _init_browser(self): """Overide in appropriate way to prepare a logged in browser.""" self.browser = splinter.Browser('phantomjs') self.browser.visit(self.server_url + "/youraccount/login") try: self.browser.fill('nickname', self.user) self.browser.fill('password', self.password) except: self.browser.fill('p_un', self.user) self.browser.fill('p_pw', self.password) self.browser.fill('login_method', self.login_method) self.browser.find_by_css('input[type=submit]').click() def _check_credentials(self): if not len(self.browser.cookies.all()): raise InvenioConnectorAuthError( "It was not possible to successfully login with " "the provided credentials") self.cookies = self.browser.cookies.all() def search(self, read_cache=True, ssl_verify=True, recid=None, **kwparams): """ Returns records corresponding to the given search query. See docstring of invenio.legacy.search_engine.perform_request_search() for an overview of available parameters. """ parse_results = False of = kwparams.get('of', "") if of == "": parse_results = True of = "xm" kwparams['of'] = of params = kwparams cache_key = (json.dumps(params), parse_results) if cache_key not in self.cached_queries or \ not read_cache: if recid: results = requests.get(self.server_url + '/record/' + recid, params=params, cookies=self.cookies, stream=True, verify=ssl_verify, allow_redirects=True) if results.history: new_recid = urlparse(results.url).path.split('/')[-1] raise InvenioConnectorServerError('The record has been' 'merged with recid ' + new_recid) else: results = requests.get(self.server_url + "/search", params=params, cookies=self.cookies, stream=True, verify=ssl_verify) if 'youraccount/login' in results.url: # Current user not able to search collection raise InvenioConnectorAuthError( "You are trying to search a restricted collection. " "Please authenticate yourself.\n") else: return self.cached_queries[cache_key] if parse_results: # FIXME: we should not try to parse if results is string parsed_records = self._parse_results(results.raw, self.cached_records) self.cached_queries[cache_key] = parsed_records return parsed_records else: # pylint: disable=E1103 # The whole point of the following code is to make sure we can # handle two types of variable. try: res = results.content except AttributeError: res = results # pylint: enable=E1103 if of == "id": try: if isinstance(res, binary_type): # Transform to list res = [int(recid.strip()) for record_id in res.decode('utf-8').strip("[]").split(",") if record_id.strip() != ""] res.reverse() except (ValueError, AttributeError): res = [] self.cached_queries[cache_key] = res return res def search_with_retry(self, sleeptime=3.0, retrycount=3, **params): """Perform a search given a dictionary of ``search(...)`` parameters. It accounts for server timeouts as necessary and will retry some number of times. :param sleeptime: number of seconds to sleep between retries :param retrycount: number of times to retry given search :param params: search parameters :return: records in given format """ results = [] count = 0 while count < retrycount: try: results = self.search(**params) break except requests.exceptions.Timeout: sys.stderr.write("Timeout while searching...Retrying\n") time.sleep(sleeptime) count += 1 else: sys.stderr.write( "Aborting search after %d attempts.\n" % (retrycount,)) return results def search_similar_records(self, recid): """Return the records similar to the given one.""" return self.search(p="recid:" + str(recid), rm="wrd") def search_records_cited_by(self, recid): """Return records cited by the given one.""" return self.search(p="recid:" + str(recid), rm="citation") def get_records_from_basket(self, bskid, group_basket=False, read_cache=True): """ Returns the records from the (public) basket with given bskid """ if bskid not in self.cached_baskets or not read_cache: if self.user: if group_basket: group_basket = '&category=G' else: group_basket = '' results = requests.get( self.server_url + "/yourbaskets/display?of=xm&bskid=" + str(bskid) + group_basket, cookies=self.cookies, stream=True) else: results = requests.get( self.server_url + "/yourbaskets/display_public?of=xm&bskid=" + str(bskid), stream=True) else: return self.cached_baskets[bskid] parsed_records = self._parse_results(results.raw, self.cached_records) self.cached_baskets[bskid] = parsed_records return parsed_records def get_record(self, recid, read_cache=True): """Return the record with given recid.""" if recid in self.cached_records or not read_cache: return self.cached_records[recid] else: return self.search(recid=str(recid)) def upload_marcxml(self, marcxml, mode): """Upload a record to the server. :param marcxml: the XML to upload. :param mode: the mode to use for the upload. - "-i" insert new records - "-r" replace existing records - "-c" correct fields of records - "-a" append fields to records - "-ir" insert record or replace if it exists """ if mode not in ["-i", "-r", "-c", "-a", "-ir"]: raise NameError("Incorrect mode " + str(mode)) return requests.post(self.server_url + "/batchuploader/robotupload", data={'file': marcxml, 'mode': mode}, headers={'User-Agent': CFG_USER_AGENT}) def _parse_results(self, results, cached_records): """ Parses the given results (in MARCXML format). The given "cached_records" list is a pool of already existing parsed records (in order to avoid keeping several times the same records in memory) """ parser = xml.sax.make_parser() handler = RecordsHandler(cached_records) parser.setContentHandler(handler) parser.parse(results) return handler.records def _validate_server_url(self): """Validates self.server_url""" try: request = requests.head(self.server_url) if request.status_code >= 400: raise InvenioConnectorServerError( "Unexpected status code '%d' accessing URL: %s" % (request.status_code, self.server_url)) except (InvalidSchema, MissingSchema) as err: raise InvenioConnectorServerError( "Bad schema, expecting http:// or https://:\n %s" % (err,)) except ConnectionError as err: raise InvenioConnectorServerError( "Couldn't establish connection to '%s':\n %s" % (self.server_url, err)) except InvalidURL as err: raise InvenioConnectorServerError( "Invalid URL '%s':\n %s" % (self.server_url, err)) except RequestException as err: raise InvenioConnectorServerError( "Unknown error connecting to '%s':\n %s" % (self.server_url, err)) class Record(dict): """Represent an Invenio record.""" def __init__(self, recid=None, marcxml=None, server_url=None): self.recid = recid self.marcxml = "" if marcxml is not None: self.marcxml = marcxml self.server_url = server_url def __setitem__(self, item, value): tag, ind1, ind2, subcode = decompose_code(item) if subcode is not None: super(Record, self).__setitem__( tag + ind1 + ind2, [{subcode: [value]}]) else: super(Record, self).__setitem__(tag + ind1 + ind2, value) def __getitem__(self, item): tag, ind1, ind2, subcode = decompose_code(item) datafields = dict.__getitem__(self, tag + ind1 + ind2) if subcode is not None: subfields = [] for datafield in datafields: if subcode in datafield: subfields.extend(datafield[subcode]) return subfields else: return datafields def __repr__(self): return "Record(" + dict.__repr__(self) + ")" def __str__(self): return self.marcxml def export(self, of="marcxml"): """ Returns the record in chosen format """ return self.marcxml def url(self): """ Returns the URL to this record. Returns None if not known """ if self.server_url is not None and \ self.recid is not None: return '/'.join( [self.server_url, CFG_SITE_RECORD, str(self.recid)]) else: return None class RecordsHandler(xml.sax.handler.ContentHandler): "MARCXML Parser" def __init__(self, records): """Initialize MARCXML Parser. :param records: dictionary with an already existing cache of records """ self.cached_records = records self.records = [] self.in_record = False self.in_controlfield = False self.in_datafield = False self.in_subfield = False self.cur_tag = None self.cur_subfield = None self.cur_controlfield = None self.cur_datafield = None self.cur_record = None self.recid = 0 self.buffer = "" self.counts = 0 def startElement(self, name, attributes): if name == "record": self.cur_record = Record() self.in_record = True elif name == "controlfield": tag = attributes["tag"] self.cur_datafield = "" self.cur_tag = tag self.cur_controlfield = [] if tag not in self.cur_record: self.cur_record[tag] = self.cur_controlfield self.in_controlfield = True elif name == "datafield": tag = attributes["tag"] self.cur_tag = tag ind1 = attributes["ind1"] if ind1 == " ": ind1 = "_" ind2 = attributes["ind2"] if ind2 == " ": ind2 = "_" if tag + ind1 + ind2 not in self.cur_record: self.cur_record[tag + ind1 + ind2] = [] self.cur_datafield = {} self.cur_record[tag + ind1 + ind2].append(self.cur_datafield) self.in_datafield = True elif name == "subfield": subcode = attributes["code"] if subcode not in self.cur_datafield: self.cur_subfield = [] self.cur_datafield[subcode] = self.cur_subfield else: self.cur_subfield = self.cur_datafield[subcode] self.in_subfield = True def characters(self, data): if self.in_subfield: self.buffer += data elif self.in_controlfield: self.buffer += data elif "Search-Engine-Total-Number-Of-Results:" in data: print(data) match_obj = re.search("\d+", data) if match_obj: print(int(match_obj.group())) self.counts = int(match_obj.group()) def endElement(self, name): if name == "record": self.in_record = False elif name == "controlfield": if self.cur_tag == "001": self.recid = int(self.buffer) if self.recid in self.cached_records: # Record has already been parsed, no need to add pass else: # Add record to the global cache self.cached_records[self.recid] = self.cur_record # Add record to the ordered list of results self.records.append(self.cached_records[self.recid]) self.cur_controlfield.append(self.buffer) self.in_controlfield = False self.buffer = "" elif name == "datafield": self.in_datafield = False elif name == "subfield": self.in_subfield = False self.cur_subfield.append(self.buffer) self.buffer = "" def decompose_code(code): """Decompose a MARC "code" into tag, ind1, ind2, subcode.""" code = "%-6s" % code ind1 = code[3:4] if ind1 == " ": ind1 = "_" ind2 = code[4:5] if ind2 == " ": ind2 = "_" subcode = code[5:6] if subcode == " ": subcode = None return (code[0:3], ind1, ind2, subcode)
inveniosoftware/invenio-client
invenio_client/connector.py
Python
gpl-2.0
18,756
[ "VisIt" ]
d05b6c412f30249d39ec161941078f6c12c955f72ab157f4717f5c5316c20627
# Character.py # Aaron Taylor # Moose Abumeeiz # # This is the class for the main character it handles all controls and # functions that they have from pygame import * from random import randint from math import * from const import GRATIO from UIHeart import * from Tear import * from Fire import * from Coin import * from Key import * from Pickup import * from Heart import * from Bomb import * from Item import * from Pill import * from Trapdoor import * from Banner import * class Character: """The main class for Isaac""" hurtDistance = .6 def __init__(self, variant, xy, keys, textures, sounds, fonts): self.variant = variant self.x, self.y = xy self.textures = textures["character"][variant] # Record import sounds and textures self.tearTextures = textures["tears"] self.tearSounds = sounds["tear"] self.heartTextures = textures["hearts"] self.sounds = sounds["hurt"] # Setup starting info self.dead = False self.isFlying = False self.pill = None self.lastHurt = -1 # Tears + hearts self.tears = [] self.hearts = [UIHeart(0, 2, textures["hearts"]) for i in range(3)] # Head, shoulders knees and toes, knees and toes! self.heads = [self.textures.subsurface(Rect((i*64)*2, 0, 64, 64)) for i in range(3)] self.heads.append(transform.flip(self.heads[1], True, False)) self.tearHeads = [self.textures.subsurface(Rect(64+(i*64)*2, 0, 64, 64)) for i in range(3)] self.tearHeads.append(transform.flip(self.tearHeads[1], True, False)) self.feet = [ [self.textures.subsurface(Rect((i*64), 64, 64, 64)) for i in range(8)], [self.textures.subsurface(Rect((i*64), 64*2, 64, 64)) for i in range(8)], [], [] ] # The rect for the characters body self.bodyRect = Rect(self.x-16, self.y-16, 32, 32) # The things he can pickup self.pickups = [Pickup(i, textures["pickups"], fonts["pickups"]) for i in range(3)] # Keys, Bombs, Coins # Walkign forward and backwards have the same animation for frame in self.feet[0]: self.feet[2].append(frame) # Allow for reversed feet for frame in self.feet[1]: self.feet[3].append(transform.flip(frame, True, False)) # Setup head and body self.head = self.heads[0] self.body = self.feet[0][0] # Used for holding arms in the air and gettting hurt self.specialFrames = [self.textures.subsurface(i*128, 272+128, 128, 128) for i in range(1, 3)] self.specialFrame = 0 self.lastPickup = 0 # Animation setup self.interval = .06 self.lastAnimate = 0 self.walking = False self.eyesOpen = True self.walkIndex = 0 self.lastTear = 0 # Velocity self.xVel = 0 self.yVel = 0 # Direction self.up = False self.down = False self.left = False self.right = False self.lastKey = 0 self.lastTearKey = 0 # Last pressed key self.lastKeys = [] self.lastTearKeys = [] # Keys self.moveKeys = keys[0] self.tearKeys = keys[1] # Stats self.speed = 2 self.shotRate = 1 self.damage = 2 self.range = 2 self.shotSpeed = 1 self.luck = 1 # The items isaac has picked up self.items = [] def heal(self, ammount, variant): # Heal character if not self.dead: starting = -1 heartCount = len(self.hearts) for i in range(heartCount): # Advance to the correct heart type if self.hearts[i].variant == variant: starting = i break # Catch heart overflow if starting == -1: return 2 # Track the leftover ammount for heart leftover = self.hearts[starting].add(ammount) # Loop and add hearts for i in range(starting, heartCount): if leftover <= 0: break else: heart = self.hearts[i] leftover = heart.add(leftover) # Heart overflow if leftover != 0 and variant != 0 and (i == heartCount-1 or self.hearts[i+1].variant != variant): return leftover return True def clearTears(self): self.tears = [] def hurt(self, ammount, enemyX, enemyY, time): if time-self.lastHurt < 1: return self.sounds[randint(0,1)].play() # Play random hurt sound leftover = self.hearts[-1].damage(1) # Hurt the last heart for i in range(len(self.hearts)-1, -1, -1): if type(leftover) == bool and leftover: # If the heart should be removed del self.hearts[i] break else: if leftover <= 0: break # There is no longer a need to take health away else: if i == 0 and self.hearts[i].health == 1: # Check number of hearts self.dead = True leftover = self.hearts[i].damage(leftover) # damage # Character push back if enemyX != None and enemyY != None: # Push the character away from where they were hurt dx, dy = ((enemyX-self.x)*-1, (enemyY-self.y)*-1) angle = atan2(dy, dx) # Reverse it! pConst = 2 # Add the direction to the X and Y velocity self.xVel += pConst*cos(angle) self.yVel += pConst*sin(angle) self.lastHurt = time # Set character to hurt look self.specialFrame = 2 # Check if character should die if self.hearts[0].health == 0: self.die() def usePill(self): if self.pill != None: # Ensure the character has a pill self.pill.use(self) # Pass in the character to check for PHD st = self.pill.stats # The pills statss types = ["Speed", "Tears", "Damage", "Range", "Shot Speed", "Luck"] # The types of pills if sum(st) == -1: # Its a negative pill self.game.banners.append(Banner(types[st.index(-1)] + " Down", self.game.textures)) else: # Its a positive pill self.game.banners.append(Banner(types[st.index(1)] + " Up", self.game.textures)) # Add all the stats self.speed += st[0] self.shotRate += st[1] self.damage += st[2] self.range += st[3] self.shotSpeed += st[4] self.luck += st[5] # Destroy pill self.pill = None def die(self): self.dead = True def updateVel(self): # Update the X and Y velocity if self.up: self.yVel += -0.15 if self.down: self.yVel += 0.15 if self.left: self.xVel += -0.15 if self.right: self.xVel += 0.15 # Ensure you cant click 2 oppisite directions at the same time if (not self.left) and (not self.right) or (self.left and self.right): self.xVel *= 0.85 if (not self.up) and (not self.down) or (self.up and self.down): self.yVel *= 0.85 # Cap the maximum velocity if abs(self.xVel) > 1: self.xVel = 1 if self.xVel > 0 else -1 if abs(self.yVel) > 1: self.yVel = 1 if self.yVel > 0 else -1 # Reset head texture if self.xVel == 0 and self.yVel == 0: self.head = self.heads[0] def moving(self, key, value, joystick): # Find correct key try: index = self.moveKeys.index(key) except: try: index = self.tearKeys.index(key) if value: self.lastTearKey = index self.lastTearKeys.append(index) else: self.lastTearKeys.remove(index) except: pass return # Set direction if index == 0: self.down = value elif index == 1: self.right = value elif index == 2: self.up = value elif index == 3: self.left = value # Set the last key down and add to down keys if value: self.lastKey = index self.lastKeys.append(index) else: try: # Attempt to remove an up key self.lastKeys.remove(index) except: pass # Reset head if len(self.lastKeys) > 0: self.head = self.heads[self.lastKeys[-1]] else: self.head = self.heads[0] if len(self.lastTearKeys) > 0: self.head = self.heads[self.lastTearKeys[-1]] def step(self, time): xVel = round(abs(self.xVel), 1) yVel = round(abs(self.yVel), 1) if xVel > 0.1 or yVel > 0.1: # The character will be still (creating a sliding effect) if all([not self.down, not self.right, not self.up, not self.left]): # No keys are down self.head = self.heads[0] # Reset body if len(self.lastKeys) > 0: self.body = self.feet[self.lastKeys[-1]][int(self.walkIndex)] else: self.body = self.feet[self.lastKey][int(self.walkIndex)] self.walkIndex += 1 if self.walkIndex >= len(self.feet[0]): # Reset foot frame self.walkIndex = 0 else: self.walkIndex = 0 self.head = self.heads[0] self.body = self.feet[0][0] # When you remove a key, set position to latest key down if len(self.lastKeys) > 0 and (xVel > 0.1 or yVel > 0.1): self.head = self.heads[self.lastKeys[-1]] self.body = self.feet[self.lastKeys[-1]][int(self.walkIndex)] else: self.head = self.heads[0] # Fix head if len(self.lastTearKeys) > 0: self.head = self.heads[self.lastTearKeys[-1]] def render(self, surface, time, bounds, obsticals, doors): move = [0,0] # Which direction on the map to move # Move feet when necesarry if time-self.lastAnimate >= self.interval: self.lastAnimate = time self.step(time) # Allow for Arm lift and Hurt animation if self.specialFrame == 2 and time-self.lastHurt >= 0.22: self.specialFrame = 0 elif self.specialFrame == 1 and time-self.lastPickup >= 0.5: self.specialFrame = 0 # Spawn a tear in the correct direction if self.lastTearKey in self.lastTearKeys and time-self.lastTear >= (8-self.shotRate)/18: self.tears.append(Tear([(0, 1), (1, 0), (0, -1), (-1, 0)][self.lastTearKey], (self.x, self.y-20), (self.xVel*1.5, self.yVel*1.5), self.shotSpeed, self.damage, self.range, True, self.tearTextures, self.tearSounds)) self.lastTear = time elif time-self.lastTear <= 0.1: try: # Set the head to favor the tear self.head = self.tearHeads[self.heads.index(self.head)] except: pass else: try: self.head = self.heads[self.tearHeads.index(self.head)] except: pass sizeModifier = 2.5 # Tear speed to grid ratio if sum(map(int, [self.left, self.right, self.up, self.down])) > 1: sizeModifier /= 1.414213 # So there is no benefit to going diagonal (sqrt(2)) # Delta x and y dx = self.xVel * sizeModifier * (self.speed//2+1) dy = self.yVel * sizeModifier * (self.speed//2+1) # Ensure the tear is within the level bounds inBoundsX = bounds.collidepoint(self.x+dx, self.y) inBoundsY = bounds.collidepoint(self.x, self.y+dy) rockColX = False rockColY = False for ob in obsticals: # Collide with ob try: if ob.destroyed: continue except: pass # Check collission with anything in the level rcx = ob.bounds.collidepoint(self.x+dx, self.y) rcy = ob.bounds.collidepoint(self.x, self.y+dy) if rcx: rockColX = rcx if rcy: rockColY = rcy # Bash out every possible tear collide if rcx or rcy: if type(ob) == Fire: self.hurt(1, None, None, time) elif type(ob) == Coin: self.pickups[0].add(ob.worth) ob.pickup() elif type(ob) == Key: if self.pickups[0].use(ob.price): self.pickups[2].add(1) ob.pickup() elif type(ob) == Bomb and not ob.shouldExplode: if self.pickups[0].use(ob.price): self.pickups[1].add(1) ob.pickup() elif type(ob) == Heart: if self.pickups[0].use(ob.price): amm = self.heal(ob.health, ob.variant) if amm == 0: self.hearts.append(UIHeart(ob.variant, ob.health, self.heartTextures)) elif type(amm) == int: self.hearts.append(UIHeart(ob.variant, amm, self.heartTextures)) ob.pickup() if ob.variant == 1: # Sould heart self.specialFrame = 1 self.lastPickup = time elif type(ob) == Pill: if self.pickups[0].use(ob.price): self.pill = ob ob.pickup() elif type(ob) == PHD: if self.pickups[0].use(ob.price): self.items.append(ob) ob.pickup() elif type(ob) == Trapdoor: self.game.floorIndex += 1 self.game.currentRoom = (0,0) self.game.setup() self.game.updateFloor() if not ob.collideable and not rockColX and not rockColY: # Object not collideable rockColX = rockColY = False # Moves x and y mx = [0, 1, 0, -1] my = [-1, 0, 1, 0] # Render doors for i in range(len(doors)): door = doors[i] # Dont allow walking through closed doors if not door.isOpen: continue # Door collision dcx = door.rect.collidepoint(self.x+dx, self.y) dcy = door.rect.collidepoint(self.x, self.y+dy) # If youre in a locked room with 1 exit, unlock the doors if len(doors) == 1 and door.locked: door.locked = False # Unlocking doors if door.locked and self.pickups[2].score > 0 and (dcx or dcy): door.locked = False self.pickups[2].score -= 1 continue # Stop you from walking through locked doors if door.locked: continue # Door collission x and y if dcx: self.x += dx if dcy: self.y += dy side = door.side # Try to walk throught the door if not dcx or not dcy: if sum(map(int, [ mx[side] < 0 and door.rect.x-(self.x+dx) > 0, mx[side] > 0 and door.rect.x+door.rect.w-(self.x+dx) < 0, my[side] > 0 and door.rect.y-(self.y+dy) > 0, my[side] < 0 and door.rect.y+door.rect.h-(self.y+dy) < 0, ])) == 1: move[0] = mx[side] move[1] = my[side] break # Move character self.x += dx if inBoundsX and (not rockColX or self.isFlying) else 0 self.y += dy if inBoundsY and (not rockColY or self.isFlying) else 0 # Update characters body rect self.bodyRect = Rect(self.x-16, self.y, 32, 16) # Move body rect # Update velocity self.updateVel() # Draw characters special frame if self.specialFrame == 0: surface.blit(self.body, (self.x-32, self.y-32)) surface.blit(self.head, (self.x-32, self.y-32-20)) else: surface.blit(self.specialFrames[self.specialFrame-1], (self.x-64, self.y-72)) # Render tears for tear in self.tears[:]: if not tear.render(surface, time, bounds, obsticals): self.tears.remove(tear) for i in range(len(self.hearts)): self.hearts[i].render(surface, i) for p in self.pickups: p.render(surface) if self.pill != None: surface.blit(self.pill.texture, (WIDTH-80, HEIGHT-60)) for item in self.items: item.renderCorner(surface) return move
ExPHAT/binding-of-isaac
Character.py
Python
mit
13,914
[ "MOOSE" ]
7da81593bc80f9abc7db9d713a111a01472e9b99c56b84ecabf2e98ba2556afe
import os import numpy as np from ase import Atom, Atoms from ase.lattice import bulk from ase.units import Hartree, Bohr from gpaw import GPAW, FermiDirac from gpaw.response.bse import BSE from ase.dft.kpoints import monkhorst_pack from gpaw.mpi import rank GS = 1 bse = 1 check = 1 if GS: kpts = (4,4,4) a = 5.431 # From PRB 73,045112 (2006) atoms = bulk('Si', 'diamond', a=a) calc = GPAW(h=0.2, kpts=kpts, occupations=FermiDirac(0.001), nbands=12, convergence={'bands':-4}) atoms.set_calculator(calc) atoms.get_potential_energy() calc.write('Si.gpw','all') if bse: eshift = 0.8 bse = BSE('Si.gpw', w=np.linspace(0,10,201), q=np.array([0.0001, 0, 0.0]), optical_limit=True, ecut=50., nc=np.array([4,6]), nv=np.array([2,4]), eshift=eshift, nbands=8) bse.get_dielectric_function('Si_bse.dat') if rank == 0 and os.path.isfile('phi_qaGp'): os.remove('phi_qaGp') if check: d = np.loadtxt('Si_bse.dat') Nw1 = 64 Nw2 = 77 if d[Nw1, 2] > d[Nw1-1, 2] and d[Nw1, 2] > d[Nw1+1, 2] \ and d[Nw2, 2] > d[Nw2-1, 2] and d[Nw2, 2] > d[Nw2+1, 2]: pass else: raise ValueError('Absorption peak not correct ! ') if np.abs(d[Nw1, 2] - 53.3382894891) > 1. \ or np.abs(d[Nw2, 2] - 62.7667801949 ) > 2.: print d[Nw1, 2], d[Nw2, 2] raise ValueError('Please check spectrum strength ! ')
robwarm/gpaw-symm
gpaw/test/bse_silicon.py
Python
gpl-3.0
1,576
[ "ASE", "GPAW" ]
0085673b9554f56d010ea9a19b92d77c511f2a36ad02bfeddac94a60b3a4d956
"""Module level constants specifying available data""" from datetime import datetime as dt MIN_SEASON = 2008 """Oldest season currently supported""" MAX_SEASON = dt.today().year if dt.today().month < 10 else dt.today().year + 1 """Year of the most recent season. Seasons are denoted by the year in which they end, i.e. 2013-2014 is denoted 2014""" GAME_CT_DICT = { 2008: 1230, 2009: 1230, 2010: 1230, 2011: 1230, 2012: 1230, 2013: 720, 2014: 1230, 2015: 1230, 2016: 1230 } MISS_REG_GAMES = [ "20030001", "20030002", "20030003", "20030004", "20030005", "20030006", "20030007", "20030008", "20030009", "20030010", "20030011", "20030012", "20030013", "20030014", "20030015", "20030016", "20030017", "20030018", "20030019", "20030020", "20030021", "20030022", "20030023", "20030024", "20030025", "20030026", "20030027", "20030028", "20030029", "20030030", "20030031", "20030032", "20030033", "20030034", "20030035", "20030036", "20030037", "20030038", "20030039", "20030040", "20030041", "20030042", "20030043", "20030044", "20030045", "20030046", "20030047", "20030048", "20030049", "20030050", "20030051", "20030052", "20030053", "20030054", "20030055", "20030056", "20030057", "20030058", "20030059", "20030060", "20030061", "20030062", "20030063", "20030064", "20030065", "20030066", "20030067", "20030068", "20030069", "20030070", "20030071", "20030072", "20030073", "20030074", "20030075", "20030076", "20030077", "20030078", "20030079", "20030080", "20030081", "20030082", "20030083", "20030084", "20030085", "20030086", "20030087", "20030088", "20030089", "20030090", "20030091", "20030092", "20030093", "20030094", "20030095", "20030096", "20030097", "20030098", "20030099", "20030100", "20030101", "20030102", "20030103", "20030104", "20030105", "20030106", "20030107", "20030108", "20030109", "20030110", "20030111", "20030112", "20030113", "20030114", "20030115", "20030116", "20030117", "20030118", "20030119", "20030120", "20030121", "20030122", "20030123", "20030124", "20030125", "20030126", "20030127", "20030134", "20030135", "20030582", "20030598", "20030872", "20040010", "20040251", "20040453", "20040456", "20040482", "20040802", "20060018", "20060140", "20060298", "20060458", "20060974", "20071024", "20090259", "20090409", "20091077", "20100081", "20100827", "20100836", "20100857", "20100863", "20100874", "20110429", "20120259" ] """Regular season games without RTSS data on NHL.com. Format:: YYYYNNNN where YYYY is the four digit year and NNNN the four digit game number. """ MISS_PLAYOFF_GAMES = [ "20040134" "20060233" ] """Playoff games without RTSS data on NHL.com. Format:: YYYYNNNN where YYYY is the four digit year and NNNN the four digit game number. """ TEAMS_BY_ABBR = { 'ANA': 'Anaheim Ducks', # (1993-94 - present) 'ARI': 'Arizona Coyotes', # (2014-15 - present) 'ATF': 'Atlanta Flames', # (1972-73 - 1979-80) 'ARL': 'Atlanta Trashers', # (1999-00 - 2010-11) 'BOS': 'Boston Bruins', # (1925-26 - present) 'BKN': 'Brooklyn Americans', # (1941-1942) 'BUF': 'Buffalo Sabres', # (1970-71 - present) 'CGY': 'Calgary Flames', # (1980-81 - present) 'CLF': 'California Golden Seals', # (1970-71 - 1975-76) 'CAR': 'Carolina Hurricanes', # (1997-98 - present) 'CHI': 'Chicago Blackhawks', # (1926-27 - present) 'CLE': 'Cleveland Barons', # (1976-77 - 1977-78) 'COR': 'Colorado Rockies', # (1976-77 - 1981-82) 'COL': 'Colorado Avalanche', # (1995-96 - present) 'CBJ': 'Columbus Blue Jackets', # (2000-01 - present) 'DAL': 'Dallas Stars', # (1993-94 - present) 'DTC': 'Detroit Cougars', # (1926-27 - 1929-30) 'DTF': 'Detroit Falcons', # (1930-31 - 1931-32) 'DET': 'Detroit Red Wings', # (1932-33 - present) 'EDM': 'Edmonton Oilers', # (1979-80 - present) 'FLA': 'Florida Panthers', # (1993-94 - present) 'HAM': 'Hamilton Tigers', # (1920-21 - 1924-25) 'HAR': 'Hartford Whalers', # (1979-80 - 1996-97) 'KC': 'Kansas City Scouts', # (1974-75 - 1975-76) 'LA': 'Los Angeles Kings', # (1967-68 - present) 'MIN': 'Minnesota Wild', # (2000-01 - present) 'MNS': 'Minnesota North Stars', # (1967-68 - 1992-93) 'MTL': 'Montreal Canadiens', # (1917-18 - present) 'MTM': 'Montreal Maroons', # (1924-25 - 1937-38) 'MTW': 'Montreal Wanderers', # (1917-18) 'NSH': 'Nashville Predators', # (1998-99 - present) 'NJ': 'New Jersey Devils', # (1982-83 - present) 'NYA': 'New York Americans', # (1925-26 - 1940-41) 'NYI': 'New York Islanders', # (1972-73 - present) 'NYR': 'New York Rangers', # (1926-27 - present) 'OAK': 'Oakland Seals', # (1967-68 - 1969-70) 'PHI': 'Philadelphia Flyers', # (1967-68 - present) 'PHQ': 'Philadelphia Quakers', # (1930-31) 'PHO': 'Phoenix Coyotes', # (1996-97 - 2013-14) 'PIT': 'Pittsburgh Penguins', # (1967-68 - present) 'PIP': 'Pittsburgh Pirates', # (1925-26 - 1929-30) 'OTT': 'Ottawa Senators', # (1992-93 - present) 'OTS': 'Ottawa Senators (orig)', # (1917-18 - 1930-31, 1932-33 - 1933-34) 'QUE': 'Quebec Nordiques', # (1979-80 - 1994-95) 'QUB': 'Quebec Bulldogs', # (1919-20) 'STL': 'St Louis Blues', # (1967-68 - present) 'STE': 'St Louis Eagles', # (1934-35) 'SJ': 'San Jose Sharks', # (1991-92 - present) 'TB': 'Tampa Bay Lightning', # (1992-93 - present) 'TOR': 'Toronto Maple Leafs', # (1926-27 - present) 'TRA': 'Toronto Arenas', # (1917-18 - 1918-19) 'TRS': 'Toronto St Pats', # (1919-20 - 1925-26) 'VAN': 'Vancouver Canucks', # (1970-71 - present) 'WSH': 'Washington Capitals', # (1974-75 - present) 'WIJ': 'Winnipeg Jets (orig)', # (1st) (1979-80 - 1995-96) 'WPG': 'Winnipeg Jets', # (2011-12 - present) }
robhowley/nhlscrapi
nhlscrapi/constants.py
Python
apache-2.0
5,910
[ "COLUMBUS" ]
303554d5f60d8514ecc10312b54c2b963e5fdaf2016846b1234cfbef1130bdd4
# -*- coding: utf-8 -*- from flask import Blueprint, render_template, request, redirect, flash, url_for from flask.ext.login import login_required, current_user from octopus.extensions import nav, db from octopus.user.forms import EditUserProfile from octopus.models import User from octopus.utils import flash_errors blueprint = Blueprint("user", __name__, url_prefix='/user', static_folder="../static") nav.Bar('user', [ nav.Item('<i class="fa fa-user fa-lg"></i>', '', html_attrs=str("data-placement='bottom',\ title='Users'" ), items=[ nav.Item('My Profile', 'user.view'), nav.Item('All Users', 'user.all_users') ]) ]) @blueprint.route("/all_users") @login_required def all_users(): users = db.session.query(User.id.label("ID"), User.username.label("Username"), User.first_name.label("First Name"), User.last_name.label("Last Name"), User.email.label("Email") ).order_by(User.id.desc()) view_url = {'func': lambda x: url_for('user.view', user_id=getattr(x, 'ID'))} return render_template("user/members.html", users=users, view_url=view_url) @blueprint.route('/query') @login_required def query(): return render_template('public/home.html') @blueprint.route("/view", methods=['GET', 'POST']) @blueprint.route("/view/<user_id>", methods=['GET', 'POST']) @login_required def view(user_id=None): if user_id is None: user = current_user user_id = current_user.id else: user = User.query.filter_by(id=user_id).first_or_404() form = EditUserProfile(user_id, request.form) if request.method == 'POST': if form.validate_on_submit(): form.commit_updates() flash("User Profile Edits Saved", category='success') return redirect(url_for('user.view', user_id=user_id)) else: flash_errors(form) return render_template("user/profile.html", user=user, form=form)
quaintm/octopus
octopus/user/views.py
Python
bsd-3-clause
2,064
[ "Octopus" ]
ad95e5202cda1fd0dd71786f251f0cb85ce131e67250b56234c8aa5143e22dbb
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import mock import packaging.version import grpc from grpc.experimental import aio import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.aiplatform_v1.services.endpoint_service import ( EndpointServiceAsyncClient, ) from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient from google.cloud.aiplatform_v1.services.endpoint_service import pagers from google.cloud.aiplatform_v1.services.endpoint_service import transports from google.cloud.aiplatform_v1.services.endpoint_service.transports.base import ( _GOOGLE_AUTH_VERSION, ) from google.cloud.aiplatform_v1.types import accelerator_type from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import endpoint from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint from google.cloud.aiplatform_v1.types import endpoint_service from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import operation as gca_operation from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth # TODO(busunkim): Once google-auth >= 1.25.0 is required transitively # through google-api-core: # - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), reason="This test requires google-auth < 1.25.0", ) requires_google_auth_gte_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), reason="This test requires google-auth >= 1.25.0", ) def client_cert_source_callback(): return b"cert bytes", b"key bytes" # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. def modify_default_endpoint(client): return ( "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT ) def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" sandbox_endpoint = "example.sandbox.googleapis.com" sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" assert EndpointServiceClient._get_default_mtls_endpoint(None) is None assert ( EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert ( EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) @pytest.mark.parametrize( "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} client = client_class.from_service_account_info(info) assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "aiplatform.googleapis.com:443" @pytest.mark.parametrize( "transport_class,transport_name", [ (transports.EndpointServiceGrpcTransport, "grpc"), (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), ], ) def test_endpoint_service_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) with mock.patch.object( service_account.Credentials, "with_always_use_jwt_access", create=True ) as use_jwt: creds = service_account.Credentials(None, None, None) transport = transport_class(credentials=creds, always_use_jwt_access=False) use_jwt.assert_not_called() @pytest.mark.parametrize( "client_class", [EndpointServiceClient, EndpointServiceAsyncClient,] ) def test_endpoint_service_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds assert isinstance(client, client_class) assert client.transport._host == "aiplatform.googleapis.com:443" def test_endpoint_service_client_get_transport_class(): transport = EndpointServiceClient.get_transport_class() available_transports = [ transports.EndpointServiceGrpcTransport, ] assert transport in available_transports transport = EndpointServiceClient.get_transport_class("grpc") assert transport == transports.EndpointServiceGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), ( EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) @mock.patch.object( EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient), ) @mock.patch.object( EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient), ) def test_endpoint_service_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() # Check the case api_endpoint is provided. options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "always". with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): client = client_class() # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): client = client_class() # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name,use_client_cert_env", [ ( EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true", ), ( EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true", ), ( EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false", ), ( EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ], ) @mock.patch.object( EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient), ) @mock.patch.object( EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_endpoint_service_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. # Check the case client_cert_source is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): options = client_options.ClientOptions( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None expected_host = client.DEFAULT_ENDPOINT else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=True, ): with mock.patch( "google.auth.transport.mtls.default_client_cert_source", return_value=client_cert_source_callback, ): if use_client_cert_env == "false": expected_host = client.DEFAULT_ENDPOINT expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT expected_client_cert_source = client_cert_source_callback patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=expected_host, scopes=None, client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( "google.auth.transport.mtls.has_default_client_cert_source", return_value=False, ): patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), ( EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) def test_endpoint_service_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), ( EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", ), ], ) def test_endpoint_service_client_client_options_credentials_file( client_class, transport_class, transport_name ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_endpoint_service_client_client_options_from_dict(): with mock.patch( "google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None client = EndpointServiceClient( client_options={"api_endpoint": "squid.clam.whelk"} ) grpc_transport.assert_called_once_with( credentials=None, credentials_file=None, host="squid.clam.whelk", scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, ) def test_create_endpoint( transport: str = "grpc", request_type=endpoint_service.CreateEndpointRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.create_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.CreateEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_create_endpoint_from_dict(): test_create_endpoint(request_type=dict) def test_create_endpoint_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: client.create_endpoint() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.CreateEndpointRequest() @pytest.mark.asyncio async def test_create_endpoint_async( transport: str = "grpc_asyncio", request_type=endpoint_service.CreateEndpointRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.create_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.CreateEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_create_endpoint_async_from_dict(): await test_create_endpoint_async(request_type=dict) def test_create_endpoint_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.CreateEndpointRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.create_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_create_endpoint_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.CreateEndpointRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.create_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_create_endpoint_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_endpoint( parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == "parent_value" assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value") def test_create_endpoint_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.create_endpoint( endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), ) @pytest.mark.asyncio async def test_create_endpoint_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_endpoint( parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == "parent_value" assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value") @pytest.mark.asyncio async def test_create_endpoint_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.create_endpoint( endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), ) def test_get_endpoint( transport: str = "grpc", request_type=endpoint_service.GetEndpointRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint.Endpoint( name="name_value", display_name="display_name_value", description="description_value", etag="etag_value", ) response = client.get_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.GetEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, endpoint.Endpoint) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" def test_get_endpoint_from_dict(): test_get_endpoint(request_type=dict) def test_get_endpoint_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: client.get_endpoint() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.GetEndpointRequest() @pytest.mark.asyncio async def test_get_endpoint_async( transport: str = "grpc_asyncio", request_type=endpoint_service.GetEndpointRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( endpoint.Endpoint( name="name_value", display_name="display_name_value", description="description_value", etag="etag_value", ) ) response = await client.get_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.GetEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, endpoint.Endpoint) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" @pytest.mark.asyncio async def test_get_endpoint_async_from_dict(): await test_get_endpoint_async(request_type=dict) def test_get_endpoint_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.GetEndpointRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: call.return_value = endpoint.Endpoint() client.get_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_get_endpoint_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.GetEndpointRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) await client.get_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_get_endpoint_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint.Endpoint() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_endpoint(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" def test_get_endpoint_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_endpoint( endpoint_service.GetEndpointRequest(), name="name_value", ) @pytest.mark.asyncio async def test_get_endpoint_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint.Endpoint() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_endpoint(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_endpoint_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_endpoint( endpoint_service.GetEndpointRequest(), name="name_value", ) def test_list_endpoints( transport: str = "grpc", request_type=endpoint_service.ListEndpointsRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint_service.ListEndpointsResponse( next_page_token="next_page_token_value", ) response = client.list_endpoints(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.ListEndpointsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListEndpointsPager) assert response.next_page_token == "next_page_token_value" def test_list_endpoints_from_dict(): test_list_endpoints(request_type=dict) def test_list_endpoints_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: client.list_endpoints() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.ListEndpointsRequest() @pytest.mark.asyncio async def test_list_endpoints_async( transport: str = "grpc_asyncio", request_type=endpoint_service.ListEndpointsRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( endpoint_service.ListEndpointsResponse( next_page_token="next_page_token_value", ) ) response = await client.list_endpoints(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.ListEndpointsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListEndpointsAsyncPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio async def test_list_endpoints_async_from_dict(): await test_list_endpoints_async(request_type=dict) def test_list_endpoints_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.ListEndpointsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: call.return_value = endpoint_service.ListEndpointsResponse() client.list_endpoints(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] @pytest.mark.asyncio async def test_list_endpoints_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.ListEndpointsRequest() request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( endpoint_service.ListEndpointsResponse() ) await client.list_endpoints(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] def test_list_endpoints_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint_service.ListEndpointsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_endpoints(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].parent == "parent_value" def test_list_endpoints_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.list_endpoints( endpoint_service.ListEndpointsRequest(), parent="parent_value", ) @pytest.mark.asyncio async def test_list_endpoints_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = endpoint_service.ListEndpointsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( endpoint_service.ListEndpointsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.list_endpoints(parent="parent_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_endpoints_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.list_endpoints( endpoint_service.ListEndpointsRequest(), parent="parent_value", ) def test_list_endpoints_pager(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( endpoint_service.ListEndpointsResponse( endpoints=[ endpoint.Endpoint(), endpoint.Endpoint(), endpoint.Endpoint(), ], next_page_token="abc", ), endpoint_service.ListEndpointsResponse( endpoints=[], next_page_token="def", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(),], next_page_token="ghi", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),], ), RuntimeError, ) metadata = () metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_endpoints(request={}) assert pager._metadata == metadata results = [i for i in pager] assert len(results) == 6 assert all(isinstance(i, endpoint.Endpoint) for i in results) def test_list_endpoints_pages(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( endpoint_service.ListEndpointsResponse( endpoints=[ endpoint.Endpoint(), endpoint.Endpoint(), endpoint.Endpoint(), ], next_page_token="abc", ), endpoint_service.ListEndpointsResponse( endpoints=[], next_page_token="def", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(),], next_page_token="ghi", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),], ), RuntimeError, ) pages = list(client.list_endpoints(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio async def test_list_endpoints_async_pager(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( endpoint_service.ListEndpointsResponse( endpoints=[ endpoint.Endpoint(), endpoint.Endpoint(), endpoint.Endpoint(), ], next_page_token="abc", ), endpoint_service.ListEndpointsResponse( endpoints=[], next_page_token="def", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(),], next_page_token="ghi", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),], ), RuntimeError, ) async_pager = await client.list_endpoints(request={},) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: responses.append(response) assert len(responses) == 6 assert all(isinstance(i, endpoint.Endpoint) for i in responses) @pytest.mark.asyncio async def test_list_endpoints_async_pages(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( endpoint_service.ListEndpointsResponse( endpoints=[ endpoint.Endpoint(), endpoint.Endpoint(), endpoint.Endpoint(), ], next_page_token="abc", ), endpoint_service.ListEndpointsResponse( endpoints=[], next_page_token="def", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(),], next_page_token="ghi", ), endpoint_service.ListEndpointsResponse( endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),], ), RuntimeError, ) pages = [] async for page_ in (await client.list_endpoints(request={})).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token def test_update_endpoint( transport: str = "grpc", request_type=endpoint_service.UpdateEndpointRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_endpoint.Endpoint( name="name_value", display_name="display_name_value", description="description_value", etag="etag_value", ) response = client.update_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UpdateEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_endpoint.Endpoint) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" def test_update_endpoint_from_dict(): test_update_endpoint(request_type=dict) def test_update_endpoint_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: client.update_endpoint() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UpdateEndpointRequest() @pytest.mark.asyncio async def test_update_endpoint_async( transport: str = "grpc_asyncio", request_type=endpoint_service.UpdateEndpointRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_endpoint.Endpoint( name="name_value", display_name="display_name_value", description="description_value", etag="etag_value", ) ) response = await client.update_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UpdateEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, gca_endpoint.Endpoint) assert response.name == "name_value" assert response.display_name == "display_name_value" assert response.description == "description_value" assert response.etag == "etag_value" @pytest.mark.asyncio async def test_update_endpoint_async_from_dict(): await test_update_endpoint_async(request_type=dict) def test_update_endpoint_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.UpdateEndpointRequest() request.endpoint.name = "endpoint.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: call.return_value = gca_endpoint.Endpoint() client.update_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[ "metadata" ] @pytest.mark.asyncio async def test_update_endpoint_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.UpdateEndpointRequest() request.endpoint.name = "endpoint.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_endpoint.Endpoint() ) await client.update_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[ "metadata" ] def test_update_endpoint_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_endpoint.Endpoint() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_endpoint( endpoint=gca_endpoint.Endpoint(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value") assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) def test_update_endpoint_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.update_endpoint( endpoint_service.UpdateEndpointRequest(), endpoint=gca_endpoint.Endpoint(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_endpoint_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gca_endpoint.Endpoint() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( gca_endpoint.Endpoint() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.update_endpoint( endpoint=gca_endpoint.Endpoint(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value") assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) @pytest.mark.asyncio async def test_update_endpoint_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.update_endpoint( endpoint_service.UpdateEndpointRequest(), endpoint=gca_endpoint.Endpoint(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) def test_delete_endpoint( transport: str = "grpc", request_type=endpoint_service.DeleteEndpointRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.delete_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeleteEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_delete_endpoint_from_dict(): test_delete_endpoint(request_type=dict) def test_delete_endpoint_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: client.delete_endpoint() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeleteEndpointRequest() @pytest.mark.asyncio async def test_delete_endpoint_async( transport: str = "grpc_asyncio", request_type=endpoint_service.DeleteEndpointRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.delete_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeleteEndpointRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_delete_endpoint_async_from_dict(): await test_delete_endpoint_async(request_type=dict) def test_delete_endpoint_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.DeleteEndpointRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.delete_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] @pytest.mark.asyncio async def test_delete_endpoint_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.DeleteEndpointRequest() request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.delete_endpoint(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] def test_delete_endpoint_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_endpoint(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" def test_delete_endpoint_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.delete_endpoint( endpoint_service.DeleteEndpointRequest(), name="name_value", ) @pytest.mark.asyncio async def test_delete_endpoint_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.delete_endpoint(name="name_value",) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_endpoint_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.delete_endpoint( endpoint_service.DeleteEndpointRequest(), name="name_value", ) def test_deploy_model( transport: str = "grpc", request_type=endpoint_service.DeployModelRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_deploy_model_from_dict(): test_deploy_model(request_type=dict) def test_deploy_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: client.deploy_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeployModelRequest() @pytest.mark.asyncio async def test_deploy_model_async( transport: str = "grpc_asyncio", request_type=endpoint_service.DeployModelRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.DeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_deploy_model_async_from_dict(): await test_deploy_model_async(request_type=dict) def test_deploy_model_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.DeployModelRequest() request.endpoint = "endpoint/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] @pytest.mark.asyncio async def test_deploy_model_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.DeployModelRequest() request.endpoint = "endpoint/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.deploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] def test_deploy_model_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.deploy_model( endpoint="endpoint_value", deployed_model=gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ), traffic_split={"key_value": 541}, ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].endpoint == "endpoint_value" assert args[0].deployed_model == gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ) assert args[0].traffic_split == {"key_value": 541} def test_deploy_model_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.deploy_model( endpoint_service.DeployModelRequest(), endpoint="endpoint_value", deployed_model=gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ), traffic_split={"key_value": 541}, ) @pytest.mark.asyncio async def test_deploy_model_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.deploy_model( endpoint="endpoint_value", deployed_model=gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ), traffic_split={"key_value": 541}, ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].endpoint == "endpoint_value" assert args[0].deployed_model == gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ) assert args[0].traffic_split == {"key_value": 541} @pytest.mark.asyncio async def test_deploy_model_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.deploy_model( endpoint_service.DeployModelRequest(), endpoint="endpoint_value", deployed_model=gca_endpoint.DeployedModel( dedicated_resources=machine_resources.DedicatedResources( machine_spec=machine_resources.MachineSpec( machine_type="machine_type_value" ) ) ), traffic_split={"key_value": 541}, ) def test_undeploy_model( transport: str = "grpc", request_type=endpoint_service.UndeployModelRequest ): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") response = client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UndeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_undeploy_model_from_dict(): test_undeploy_model(request_type=dict) def test_undeploy_model_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: client.undeploy_model() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UndeployModelRequest() @pytest.mark.asyncio async def test_undeploy_model_async( transport: str = "grpc_asyncio", request_type=endpoint_service.UndeployModelRequest ): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) response = await client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == endpoint_service.UndeployModelRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_undeploy_model_async_from_dict(): await test_undeploy_model_async(request_type=dict) def test_undeploy_model_field_headers(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.UndeployModelRequest() request.endpoint = "endpoint/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] @pytest.mark.asyncio async def test_undeploy_model_field_headers_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = endpoint_service.UndeployModelRequest() request.endpoint = "endpoint/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) await client.undeploy_model(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"] def test_undeploy_model_flattened(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.undeploy_model( endpoint="endpoint_value", deployed_model_id="deployed_model_id_value", traffic_split={"key_value": 541}, ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0].endpoint == "endpoint_value" assert args[0].deployed_model_id == "deployed_model_id_value" assert args[0].traffic_split == {"key_value": 541} def test_undeploy_model_flattened_error(): client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.undeploy_model( endpoint_service.UndeployModelRequest(), endpoint="endpoint_value", deployed_model_id="deployed_model_id_value", traffic_split={"key_value": 541}, ) @pytest.mark.asyncio async def test_undeploy_model_flattened_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.undeploy_model( endpoint="endpoint_value", deployed_model_id="deployed_model_id_value", traffic_split={"key_value": 541}, ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0].endpoint == "endpoint_value" assert args[0].deployed_model_id == "deployed_model_id_value" assert args[0].traffic_split == {"key_value": 541} @pytest.mark.asyncio async def test_undeploy_model_flattened_error_async(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.undeploy_model( endpoint_service.UndeployModelRequest(), endpoint="endpoint_value", deployed_model_id="deployed_model_id_value", traffic_split={"key_value": 541}, ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.EndpointServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.EndpointServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = EndpointServiceClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) # It is an error to provide scopes and a transport instance. transport = transports.EndpointServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = EndpointServiceClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.EndpointServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) client = EndpointServiceClient(transport=transport) assert client.transport is transport def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.EndpointServiceGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.EndpointServiceGrpcAsyncIOTransport( credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),) assert isinstance(client.transport, transports.EndpointServiceGrpcTransport,) def test_endpoint_service_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.EndpointServiceTransport( credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) def test_endpoint_service_base_transport(): # Instantiate the base transport. with mock.patch( "google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__" ) as Transport: Transport.return_value = None transport = transports.EndpointServiceTransport( credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly # raise NotImplementedError. methods = ( "create_endpoint", "get_endpoint", "list_endpoints", "update_endpoint", "delete_endpoint", "deploy_model", "undeploy_model", ) for method in methods: with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): transport.operations_client @requires_google_auth_gte_1_25_0 def test_endpoint_service_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.EndpointServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @requires_google_auth_lt_1_25_0 def test_endpoint_service_base_transport_with_credentials_file_old_google_auth(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.EndpointServiceTransport( credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) def test_endpoint_service_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.EndpointServiceTransport() adc.assert_called_once() @requires_google_auth_gte_1_25_0 def test_endpoint_service_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) EndpointServiceClient() adc.assert_called_once_with( scopes=None, default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id=None, ) @requires_google_auth_lt_1_25_0 def test_endpoint_service_auth_adc_old_google_auth(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) EndpointServiceClient() adc.assert_called_once_with( scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id=None, ) @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) @requires_google_auth_gte_1_25_0 def test_endpoint_service_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], default_scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) @requires_google_auth_lt_1_25_0 def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class(quota_project_id="octopus") adc.assert_called_once_with( scopes=("https://www.googleapis.com/auth/cloud-platform",), quota_project_id="octopus", ) @pytest.mark.parametrize( "transport_class,grpc_helpers", [ (transports.EndpointServiceGrpcTransport, grpc_helpers), (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async), ], ) def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object( google.auth, "default", autospec=True ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: creds = ga_credentials.AnonymousCredentials() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) create_channel.assert_called_with( "aiplatform.googleapis.com:443", credentials=creds, credentials_file=None, quota_project_id="octopus", default_scopes=("https://www.googleapis.com/auth/cloud-platform",), scopes=["1", "2"], default_host="aiplatform.googleapis.com", ssl_credentials=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) def test_endpoint_service_grpc_transport_client_cert_source_for_mtls(transport_class): cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: mock_ssl_channel_creds = mock.Mock() transport_class( host="squid.clam.whelk", credentials=cred, ssl_channel_credentials=mock_ssl_channel_creds, ) mock_create_channel.assert_called_once_with( "squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls # is used. with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: transport_class( credentials=cred, client_cert_source_for_mtls=client_cert_source_callback, ) expected_cert, expected_key = client_cert_source_callback() mock_ssl_cred.assert_called_once_with( certificate_chain=expected_cert, private_key=expected_key ) def test_endpoint_service_host_no_port(): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com" ), ) assert client.transport._host == "aiplatform.googleapis.com:443" def test_endpoint_service_host_with_port(): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="aiplatform.googleapis.com:8000" ), ) assert client.transport._host == "aiplatform.googleapis.com:8000" def test_endpoint_service_grpc_transport_channel(): channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.EndpointServiceGrpcTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None def test_endpoint_service_grpc_asyncio_transport_channel(): channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.EndpointServiceGrpcAsyncIOTransport( host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" assert transport._ssl_channel_credentials == None # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) def test_endpoint_service_transport_channel_mtls_with_client_cert_source( transport_class, ): with mock.patch( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=client_cert_source_callback, ) adc.assert_called_once() grpc_ssl_channel_cred.assert_called_once_with( certificate_chain=b"cert bytes", private_key=b"key bytes" ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel assert transport._ssl_channel_credentials == mock_ssl_cred # Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are # removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport, ], ) def test_endpoint_service_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel mock_cred = mock.Mock() with pytest.warns(DeprecationWarning): transport = transport_class( host="squid.clam.whelk", credentials=mock_cred, api_mtls_endpoint="mtls.squid.clam.whelk", client_cert_source=None, ) grpc_create_channel.assert_called_once_with( "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) assert transport.grpc_channel == mock_grpc_channel def test_endpoint_service_grpc_lro_client(): client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_endpoint_service_grpc_lro_async_client(): client = EndpointServiceAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client def test_endpoint_path(): project = "squid" location = "clam" endpoint = "whelk" expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) actual = EndpointServiceClient.endpoint_path(project, location, endpoint) assert expected == actual def test_parse_endpoint_path(): expected = { "project": "octopus", "location": "oyster", "endpoint": "nudibranch", } path = EndpointServiceClient.endpoint_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_endpoint_path(path) assert expected == actual def test_model_path(): project = "cuttlefish" location = "mussel" model = "winkle" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) actual = EndpointServiceClient.model_path(project, location, model) assert expected == actual def test_parse_model_path(): expected = { "project": "nautilus", "location": "scallop", "model": "abalone", } path = EndpointServiceClient.model_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_model_path(path) assert expected == actual def test_common_billing_account_path(): billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) actual = EndpointServiceClient.common_billing_account_path(billing_account) assert expected == actual def test_parse_common_billing_account_path(): expected = { "billing_account": "clam", } path = EndpointServiceClient.common_billing_account_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_common_billing_account_path(path) assert expected == actual def test_common_folder_path(): folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = EndpointServiceClient.common_folder_path(folder) assert expected == actual def test_parse_common_folder_path(): expected = { "folder": "octopus", } path = EndpointServiceClient.common_folder_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_common_folder_path(path) assert expected == actual def test_common_organization_path(): organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = EndpointServiceClient.common_organization_path(organization) assert expected == actual def test_parse_common_organization_path(): expected = { "organization": "nudibranch", } path = EndpointServiceClient.common_organization_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_common_organization_path(path) assert expected == actual def test_common_project_path(): project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = EndpointServiceClient.common_project_path(project) assert expected == actual def test_parse_common_project_path(): expected = { "project": "mussel", } path = EndpointServiceClient.common_project_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_common_project_path(path) assert expected == actual def test_common_location_path(): project = "winkle" location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) actual = EndpointServiceClient.common_location_path(project, location) assert expected == actual def test_parse_common_location_path(): expected = { "project": "scallop", "location": "abalone", } path = EndpointServiceClient.common_location_path(**expected) # Check that the path construction is reversible. actual = EndpointServiceClient.parse_common_location_path(path) assert expected == actual def test_client_withDEFAULT_CLIENT_INFO(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( transports.EndpointServiceTransport, "_prep_wrapped_messages" ) as prep: client = EndpointServiceClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) with mock.patch.object( transports.EndpointServiceTransport, "_prep_wrapped_messages" ) as prep: transport_class = EndpointServiceClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info)
sasha-gitg/python-aiplatform
tests/unit/gapic/aiplatform_v1/test_endpoint_service.py
Python
apache-2.0
109,566
[ "Octopus" ]
26b9002fe4884b3a06faa6cc2ba5060a076eab6702a620436d98ddd4e6cd625f
""" Test topological fingerprints. """ import unittest from rdkit import Chem from deepchem.feat import fingerprints as fp class TestCircularFingerprint(unittest.TestCase): """ Tests for CircularFingerprint. """ def setUp(self): """ Set up tests. """ smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O' self.mol = Chem.MolFromSmiles(smiles) self.engine = fp.CircularFingerprint() def test_circular_fingerprints(self): """ Test CircularFingerprint. """ rval = self.engine([self.mol]) assert rval.shape == (1, self.engine.size) def test_sparse_circular_fingerprints(self): """ Test CircularFingerprint with sparse encoding. """ self.engine = fp.CircularFingerprint(sparse=True) rval = self.engine([self.mol]) assert rval.shape == (1,) assert isinstance(rval[0], dict) assert len(rval[0]) def test_sparse_circular_fingerprints_with_smiles(self): """ Test CircularFingerprint with sparse encoding and SMILES for each fragment. """ self.engine = fp.CircularFingerprint(sparse=True, smiles=True) rval = self.engine([self.mol]) assert rval.shape == (1,) assert isinstance(rval[0], dict) assert len(rval[0]) # check for separate count and SMILES entries for each fragment for fragment_id, value in rval[0].items(): assert 'count' in value assert 'smiles' in value
Agent007/deepchem
deepchem/feat/tests/test_fingerprints.py
Python
mit
1,540
[ "RDKit" ]
81c858ccf0b9328c5317683653c0012b984f0b26c344eb17aa85cb11dd0a0934
""" Creates a class structure for optical elements The key difference between an optical element and a ray matrix is that an optical element can consist of several ray matrices which together act as a single element. The most common example is that of a thick lens which consists of two curved interfaces and an intervening translation. The optical element class contains a position and label for the optical element as well as a list of the ray matrices which make up that element and a list of their positions relative to the element's overall position. """ from pylase import ray_matrix class OpticalElement: """ A class for the elements of an OpticalSystem instance """ def __init__(self, ray_matrices, relative_positions, position, label): """ The constructor An OpticalElement can consist of more than one RayMatrix, the most common example is a thick lens. When defining the element, it is still unnecessary to explicitly call out the translation matrices. * ray_matrices: A list of instances from the RayMatrix class. This should be a list even if there is only 1 RayMatrix * relative_positions: A list of floats which specify the position of the individual pieces relative to the `position` of the entire element. Should be the same length as `ray_matrices` * position: The position along the optical axis of the entire element * label: A string label for the element :param ray_matrices: A list of instances of the RayMatrix class :param relative_positions: A list of floats :param position: :param label: """ # Type assertions assert type(label) is str assert type(ray_matrices) is list for rm in ray_matrices: assert issubclass(type(rm), ray_matrix.RayMatrix) assert len(ray_matrices) == len(relative_positions) assert issubclass(type(position), float) or issubclass(type(position), int) assert type(label) is str # Assignment self.ray_matrices = ray_matrices self.relative_positions = relative_positions self.position = position self.label = label def __repr__(self): return "\'{0}\' @ {1:0.2g}".format(self.label, self.position) class ThinLensEL(OpticalElement): """ Creates an OpticalElement instance for a thin lens """ def __init__(self, z, label, f): """ OpticalElement for a thin lens with focal length `f` :param z: position :param label: string label for the element :param f: focal length :type z: float :type label: str :type f: float """ ray_matrices = [ray_matrix.ThinLensRM(f)] relative_positions = [0] super(ThinLensEL, self).__init__(ray_matrices, relative_positions, z, label) class ThickLensEL(OpticalElement): """ Creates an OpticalElement instance for a thick lens """ def __init__(self, z, label, r1, r2, t, ior_lens, ior_air=1): """ OpticalElement for a thick lens The input and output radii of curvature are specified such that a negative curvature is concave looking along the beam line while a positive curvature is convex. I.E. a negative input curvature will result in a negative lens while a positive input curvature will result in a positive lens. The output curvature, on the other hand, works in the opposite way; a negative output curvature will result in a positive lens while a positive output curvature will result in a negative lens. A flat interface can be specified by setting `r1` or `r2` to `None` Note that the position associated with the lens is the position of the input surface. I.E. the position of the input face is `z` while the position of the output face is `z+t`. The specified thickness of the lens should be the thickness along the optical axis, i.e. the center thickness. :param z: The position of the input face along the optical axis :param label: A string label associated with the thick lens :param r1: The input curvature in meters, negative = concave, None = flat :param r2: The output curvature in meters, negative = concave, None = flat :param t: The thickness of the lens in meters :param ior_lens: The index of refraction of the lens material :param ior_air: The index of refraction of the surrounding medium :type z: float :type label: str :type r1: float or NoneType :type r2: float or NoneType :type t: float :type ior_lens: float :type ior_air: float """ ray_matrices = [ray_matrix.InterfaceRM(ior_init=ior_air, ior_fin=ior_lens, roc=r1), ray_matrix.InterfaceRM(ior_init=ior_lens, ior_fin=ior_air, roc=r2)] relative_positions = [0, t] super(ThickLensEL, self).__init__(ray_matrices, relative_positions, z, label) class MirrorEL(OpticalElement): """ Creates an OpticalElement instance for a mirror """ def __init__(self, z, label, roc=None, aoi=None, orientation='sagittal'): """ OpticalElement for a mirror which can optionally be curved and/or tilted This method creates the OpticalElement instance for a curved or flat mirror which can be at normal incidence or at an angle. If the optical axis has a non-zero angle of incidence with the mirror, then it is important to specify if the ray matrix is for the sagittal or tangential rays. For a typical optical system where the optical axis stays in a plane parallel to the surface of the table, then sagittal rays are those in the vertical direction (y axis) and tangential rays are those in the horizontal direction (x axis). :param roc: radius of curvature of the mirror (None for flat) :param aoi: angle of incidence of the optical axis with the mirror in radians :param orientation: orientation of the tilted mirror, either `'sagittal'` or `'tangential'` :type roc: float or None :type aoi: float or None :type orientation: str """ ray_matrices = [ray_matrix.MirrorRM(roc, aoi, orientation)] relative_positions = [0] super(MirrorEL, self).__init__(ray_matrices, relative_positions, z, label) class PrismEL(OpticalElement): """ Creates an OpticalElement instance for a prism """ def __init__(self, z, label, n_air, n_mat, theta1, alpha, s): """ OpticalElement for a prism Prisms have the ability to shape the beam, and can be used as beam shaping devices along one of the axes. This method returns the Optical Element for a standard beam shaping prism. The index of refraction of the air, `n_air` (n_air=1 usually), and the material, `n_mat`, are the first two arguments. The last three arguments are the input angle `theta1`, the opening angle of the prism, `alpha`, and the vertical distance from the apex of the prism, `s`. The matrix is taken from: Kasuya, T., Suzuki, T. & Shimoda, K. A prism anamorphic system for Gaussian beam expander. Appl. Phys. 17, 131–136 (1978). and figure 1 of that paper has a clear definition of the different parameters. :param z: position along the optical axis :param label: a string label for the prism :param n_air: index of refraction of the air (usually 1) :param n_mat: index of refraction of the prism material :param theta1: input angle to the prism wrt the prism face :param alpha: opening angle of the prism (alpha=0 is equivalent to a glass plate) :param s: vertical distance from the apex of the prism. :type z: float :type label: str :type n_air: float :type n_mat: float :type theta1: float :type alpha: float :type s: float """ ray_matrices = [ray_matrix.PrismRM(n_air, n_mat, theta1, alpha, s)] relative_positions = [0] super(PrismEL, self).__init__(ray_matrices, relative_positions, z, label) class InterfaceEL(OpticalElement): """ Creates an OpticalElement instance for an interface """ def __init__(self, z, label, ior_init, ior_fin, roc=None, aoi=None, orientation='sagittal'): """ OpticalElement for an interface which can optionally be curved and/or tilted This method creates the OpticalElement instance for a curved or flat interface which can be at normal incidence or at an angle. If the optical axis has a non-zero angle of incidence with the mirror, then it is important to specify if the ray matrix is for the sagittal or tangential rays. For a typical optical system where the optical axis stays in a plane parallel to the surface of the table, then sagittal rays are those in the vertical direction (y axis) and tangential rays are those in the horizontal direction (x axis). :param z: position along the optical axis :param label: string label for the interface :param ior_init: initial index of refraction :param ior_fin: final index of refraction :param roc: radius of curvature of the mirror (None for flat) :param aoi: angle of incidence of the optical axis with the mirror in radians :param orientation: orientation of the tilted mirror, either `'sagittal'` or `'tangential'` :type z: float :type label: str :type ior_init: float :type ior_fin: float :type roc: float or None :type aoi: float or None :type orientation: str """ ray_matrices = [ray_matrix.InterfaceRM(ior_init, ior_fin, roc=roc, aoi=aoi, orientation=orientation)] relative_positions = [0] super(InterfaceEL, self).__init__(ray_matrices, relative_positions, z, label) class NullEL(OpticalElement): """ Crates a null optical element """ def __init__(self, z, label): """ Constructs a null optical element The null element acts as a placeholder, and does not alter the optical characteristics of the system. :param z: position along the optical axis :param label: string label for the interface :type z: float :type label: str """ ray_matrices = [ray_matrix.NullRM()] relative_positions = [0] super(NullEL, self).__init__(ray_matrices, relative_positions, z, label) class _EmptySystemEL(OpticalElement): """ Crates an empty system optical element This element is used when no other elements are in the optical system """ def __init__(self): """ Constructs a null optical element The null element acts as a placeholder, and does not alter the optical characteristics of the system. :param z: position along the optical axis :param label: string label for the interface :type z: float :type label: str """ z = 0 label = 'empty' ray_matrices = [ray_matrix._EmptySystemRM()] relative_positions = [0] super(_EmptySystemEL, self).__init__(ray_matrices, relative_positions, z, label)
chrisark7/pylase
pylase/optical_element.py
Python
gpl-3.0
11,728
[ "Gaussian" ]
6f6ffc379275b5c063ab981637e1f000a1a2e74cf5c456cb8c54b8ebd19bbf72
# encoding: utf-8 from Tkinter import * from itertools import izip from functools import partial from penview import * class XYPlot(Canvas): "a custom canvas to display xy-plots" def __init__(self, parent, window, width, height): self.canvas_color = "#eef" # ouff # # One most annoying spacing issue arises from the fact that highlightthickness doesn't default to 0 # for canvas widgets (and maybe others). The problem is that the widgets winfo_width() / winfo_height(): # will allways be 2*highlightthickness larger then the widget. This has implications especially when changing # the 'scrollregion' - e.g. if you whish to set it exactly to the size if the widget, you'd probably have to subtract # 2*highlightthickness first. The easier solution is to set highlightthickness to 0. # Thanks to "papageno", for sharing this: http://www.tek-tips.com/viewthread.cfm?qid=1161244&page=26 # Canvas.__init__(self, parent, width=width, height=height, bg=self.canvas_color, highlightthickness=0) # # on self.window = window self.width, self.height = width, height # The original with and height - this is a static variable # It is used by ScrollRegion to initially set the "scrollregion=" of this Canvas # The original _height is _also used later on for all coordinate system translations # When resizing the window, the original XYPlot Canvas is never destroyed. # It is automatically resized by the pack()er (expand=YES, fill=BOTH) and # the original (0,0) coordinates always stay where they are. # This is also where the axes are allways plotted. self.ppd = 100 # pixels per division self.upds = {} # the units per divisions we originally used to plot all self.lines self.lines = {} # this is a dict of dicts to which the keys are an ExperimentView and a values index self.axlines = () # a tuple lst of all axis related lines currently visible on the canvas window.conf.add_ox_listener(window.tk_cb(self.ox_update)) window.conf.add_x_listener(window.tk_cb(self.x_update)) window.conf.add_scale_listener(window.tk_cb(self.scale_update)) def add_line(self, view, index): """ plot a line for the values at index, against view.x_values and keep track of it color is determined by the view scale is taken from self.upds values are taken from the experiment associated with the view """ conf = self.window.conf self.lines[view][index] = \ self.data_line(view.ox.values[conf.x_values], view.ox.values[index], x_upd=self.upds[conf.x_values], y_upd=self.upds[index], fill=view.colors[index]) def remove_line(self, view, index): "delete the line that has been plotted for the values at index and loose track of it" self.delete(self.lines[view][index]) del self.lines[view][index] def ox_update(self, conf): "PVConf.ox_listener callback function" for view in conf.ox_views(): if view not in self.lines: # find added experiments, add our view_listener and call it once to plot the values for i, s in conf.values_upd.iteritems(): # if we haven't plotted this kind of values before, record the scale to use if i not in self.upds: self.upds[i] = s self.lines[view] = {} self.view_update(view) view.add_listener(self.window.tk_cb(self.view_update)) # from http://effbot.org/zone/python-list.htm: # Note that the for-in statement maintains an internal index, which is incremented for each loop iteration. # This means that if you modify the list you’re looping over, the indexes will get out of sync, and you may # end up skipping over items, or process the same item multiple times. # To work around this, you can loop over a copy of the list: # Seems to hold true for dicts as well. If you don't get a copy of the keys # using dict.keys() and modify the dict in the loop, a RuntimeError is raised for view in self.lines.keys(): if view.ox not in conf.open_experiments: for index in self.lines[view]: self.delete(self.lines[view][index]) del self.lines[view] def x_update(self, conf): "PVConf.x_listener callback (stub)" self.clear() # if we plot against different x_values, we have to start over for view in conf.ox_views(): self.lines[view] = {} # self.lines holds a dictionary containing all lines we have plotted self.view_update(view) def scale_update(self, conf): "PVConf.scale_listener callback" for i in self.upds: if self.upds[i] != conf.values_upd[i]: self.upds[i] = conf.values_upd[i] if i == conf.x_values: # if the x-scale has changed, we have to redraw every single line self.clear() for view in conf.ox_views(): self.lines[view] = {} self.view_update(view) continue # next - otherwise an y-scale has changed for view in self.lines: # FIXME: seems slow if i in view.y_values: # redraw only those that are visible self.remove_line(view, i) self.add_line(view, i) self._update_bbox() def _update_bbox(self): self.bbox = xmin, ymin, xmax, ymax = self.window.conf.bounding_box(self) # if the bounding box is higher or wider then 1000 times ppd, protect ourselves from performance shame # the tk widget is not very well suited to quickly draw arbitrary pixels on the (off-)screen, at least not on a canvas, # because each call has to be translated to the corresponding tcl script string... if xmax - xmin < 1000 * self.ppd and ymax - ymin < 1000 * self.ppd: self.redraw_axes() # translate the conventional to the canvas cordinate system self.config(scrollregion=(xmin, self.height - ymax, xmax, self.height - ymin)) def view_update(self, view): "ExperimentView.listener callback" for index in range(view.ox.nvalues + 1): # loop over all values (indexes) if index == self.window.conf.x_values: # if the values are used as the x_axis continue # next - otherwise, these are y-values if index not in view.y_values: # if the values should not be displayed if index in self.lines[view]: # but are currently visible self.remove_line(view, index) # hide them continue # next - otherwise these values ARE supposed to be visible if index not in self.lines[view]: # if the values are currently not visible self.add_line(view, index) # display them continue # next - otherwise these values ARE supposed to be and WERE already visible if self.itemcget(self.lines[view][index], "fill") != view.colors[index]: # if the color has changed self.itemconfig(self.lines[view][index], fill=view.colors[index]) # change the color self._update_bbox() def clear(self): "remove all lines from the canvas and empty the self.lines dictionary" for view in self.lines.keys(): for index in self.lines[view]: self.delete(self.lines[view][index]) del self.lines[view] def draw_line(self, points, **kwargs): """ draw a line along a list of point coordinates :parameters: points list of point coordinates in the form: ((x1, y1), (x2, y2)) """ # translate the conventional to the canvas cordinate system # using a generator expression avoids many copy operations return self.create_line(list((x, self.height - y) for x, y in points), **kwargs) def data_line(self, xlist, ylist, x_upd, y_upd, **kwargs): """ plot the points in ylist against the those in xlist scale the coordinate axes by y_upd and x_upd respectively """ xscale = lambda x: x / float(x_upd) * self.ppd yscale = lambda y: y / float(y_upd) * self.ppd # using izip and generator expressions avoids unnecessarily copying the data return self.draw_line(izip((xscale(x) for x in xlist), (yscale(y) for y in ylist)), **kwargs) def _draw_axes(self, color, grid_color): """" a generator which draws all lines related to the axes and yields their references draws the axes so they fill the current self.bbox """ xmin, ymin, xmax, ymax = (self.ppd * (v // self.ppd) for v in self.bbox) xmax += self.ppd ymax += self.ppd for x in range(xmin + self.ppd, xmax, self.ppd): yield self.draw_line(((x, ymin), (x, ymax)), width=1, fill=grid_color) yield self.draw_line(((x, -3), (x, 3)), width=1, fill=color) for y in range(ymin + self.ppd, ymax, self.ppd): yield self.draw_line(((xmin, y), (xmax, y)), width=1, fill=grid_color) yield self.draw_line(((-3, y), (3, y)), width=1, fill=color) yield self.draw_line(((xmin, 0), (xmax, 0)), width=1, fill=color) yield self.draw_line(((0, ymin), (0, ymax)), width=1, fill=color) def redraw_axes(self, color="black", grid_color="gray"): "remove and redraw all lines related to the axes on the canvas" map(self.delete, self.axlines) self.axlines = tuple(self._draw_axes(color, grid_color)) _Spinbox = Spinbox class Spinbox(_Spinbox): def set(self, value): self.delete(0, len(self.get())) self.insert(0, value) class PlotControls(Frame): "The frame which holds all the scale adjust spinboxes as well as the x-axe chooser below the plot region" def __init__(self, parent, window): Frame.__init__(self, parent) self.window = window self.labels = {} self.scalers = {} self.xchooser = None self.iscale = False # This variable is used to prevent race conditions when the scale is updated # The (prevetion) mechanism depends on there being no context switch between the two listeners/handlers # This is achieved by the "if current_thread() == self:" test at the top of in PVWindow.tk_do() in window.py # Probably it could be achieved by not wrapping self.scale_update() into window.tk_do() in the first place, # but this everything CAN be wrapped and we don't have to worry window.conf.add_ox_listener(window.tk_cb(self.ox_update)) window.conf.add_x_listener(window.tk_cb(self.x_update)) window.conf.add_scale_listener(window.tk_cb(self.scale_update)) def ox_update(self, conf): "PVConf.ox_listener callback function" if len(conf.open_experiments): self._update_controls(conf) for view in self.window.conf.ox_views(): view.add_listener(self.window.tk_cb(self.view_update)) def x_update(self, conf): "PVConf.x_listener callback function" self._update_controls(conf) def scale_update(self, conf): "PVConf.scale_listener callback function" if self.iscale: return for i in conf.values_upd: self.scalers[i].delete(0, len(self.scalers[i].get())) self.scalers[i].insert(0, conf.values_upd[i]) def view_update(self, view): "ExperimentView.listener callback function" self._update_controls(self.window.conf) def sb_handler(self, i, *event): "scalers spinboxes 'command=' and '<KeyRelease>' event handler" try: scale = float(self.scalers[i].get()) except: return if scale == 0: scale = 0.001 # don't interfere if somebody is typing in a value like "0.5" # (type "3" is KeyRelease, see http://infohost.nmt.edu/tcc/help/pubs/tkinter/events.html#event-types) if not (len(event) > 0 and event[0].type == "3"): self.scalers[i].set(scale) self.iscale = True self.window.conf.set_scale(i, scale) self.iscale = False def sw_handler(self, i, event): "scalers spinboxes scrollwheel event handler" scale = self.window.conf.values_upd[i] inc = self.scalers[i].config("increment")[4] adj = {4: inc, 5: -inc}[event.num] # button 4 => up; button 5 => down if scale < 1: # FIXME: there is a lot of room for improvement here adj /= 50 # the adjustment adj should be computed much more dynamically scale += adj if scale <= 0: scale = 0.001 self.scalers[i].set(scale) self.iscale = True self.window.conf.set_scale(i, scale) self.iscale = False def xv_handler(self, v, *ignored): "xchooser OptionMenu event handler (StringVar trace function)" self.window.conf.set_x_values(self.xchooser.vals[v.get()]) self.window.do(PVAction.reset_scale) # private helper function def _update_controls(self, conf): "set up the controls_region" # don't change the pack()ing order in this function # dispose old controls for l in self.labels.values(): l.pack_forget() # FIXME: we should reuse those widgets - shouldn't we ? for s in self.scalers.values(): s.pack_forget() # ...but it might not be worth the effort to code the housekeeping logic if self.xchooser: self.xchooser.pack_forget() # create y-axis controls for i in range(conf.nvalues + 1): if i == conf.x_values: # these are the x-axis values continue # next ## y-axis scaler sb = Spinbox(self, from_=0, to=99999, width=5, command=partial(self.sb_handler, i)) sb.set(conf.values_upd[i]) sb.pack(side=LEFT) self.scalers[i] = sb sb.bind("<Button-4>", partial(self.sw_handler, i)) # concerning windows and mac scrollwheel handling, sb.bind("<Button-5>", partial(self.sw_handler, i)) # see the commment in the ScrollRegion class at the bottom of this file sb.bind("<KeyRelease>", partial(self.sb_handler, i)) ## y-axis units label ul = Label(self, text=conf.units[i]+" / div ") ul.pack(side=LEFT) self.labels[i] = ul # create x-axis controls (starting from the right) ## x-axis units label ## keep the spaces at the end of the label - os/x aqua ui draws that ugly resizeer triangle there (bottom right of the window) self.labels[conf.x_values] = Label(self, text=conf.units[conf.x_values]+" / div ") self.labels[conf.x_values].pack(side=RIGHT) ## x-axis scaler sb = Spinbox(self, from_=0, to=99999, width=5, command=partial(self.sb_handler, conf.x_values)) sb.set(conf.values_upd[conf.x_values]) sb.pack(side=RIGHT) self.scalers[conf.x_values] = sb sb.bind("<Button-4>", partial(self.sw_handler, conf.x_values)) sb.bind("<Button-5>", partial(self.sw_handler, conf.x_values)) sb.bind("<KeyRelease>", partial(self.sb_handler, conf.x_values)) ## x-axis values chooser ### dictionary of possible values and their corresponding ox.values index vals = {} rvals = {} for i in range(min(ox.nvalues for ox in conf.open_experiments) + 1): # loop i=0 (Time) to minimum nvalues of all open experiments + 1 desc = "" for vdesc in [ox.get_desc(i) for ox in conf.open_experiments]: if not desc.startswith(vdesc): desc += " (%s)" % vdesc vals[vdesc] = i rvals[i] = vdesc # the reverse dictionary to look up the default setting string ### set up an OptionMenu with a StringVar traced variable v = StringVar() # create a StringVar and set its default value v.set(rvals[conf.x_values]) # FIRST set() v.trace("w", partial(self.xv_handler, v)) # THEN trace() - keep the order here! self.xchooser = OptionMenu(self, v, *vals.keys()) self.xchooser.pack(side=RIGHT) self.xchooser.vals = vals # keep a reference to the dict created above # helps ! # to make make the xyplot canvas initially be resized properly # -> reset_values_upd() setting proper scales -> xyplot bounding box matching canvas size/scroll region self.window.tk.update_idletasks() self.window.main_region.pack() self.window.main_region.add(self.window.data_region) class ScrollRegion(Frame): "A Frame which can wrap another (child) widget and scroll it" def __init__(self, parent): Frame.__init__(self, parent) self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(0, weight=1) self.xscrollbar = Scrollbar(self, orient=HORIZONTAL) self.yscrollbar = Scrollbar(self, orient=VERTICAL) self.xscrollbar.grid(row=1, column=0, sticky=E+W) self.yscrollbar.grid(row=0, column=1, sticky=N+S) def scroll_child(self, child_widget): self.child_widget = child_widget self.xscrollbar.config(command=child_widget.xview) self.yscrollbar.config(command=child_widget.yview) child_widget.grid(row=0, column=0, sticky=N+S+E+W) child_widget.config(scrollregion=(0, 0, child_widget.width, child_widget.height), xscrollcommand=self.xscrollbar.set, yscrollcommand=self.yscrollbar.set) child_widget.bind("<Button-4>", self.ywheel_handler) child_widget.bind("<Button-5>", self.ywheel_handler) # child_widget.bind("<Button-6>", self.xwheel_handler) # FIXME: fix tkinter ? # child_widget.bind("<Button-7>", self.xwheel_handler) # hmmm... I'm astonished that windows and mac handle mose scrollwheel events quiet child_widget.bind("<Button-1>", self.b1_handler) # differently from linux. *me* of course thinks linux does it best, assigning additional buttons child_widget.bind("<Button1-Motion>", self.b1m_handler) # To implement the scrollwheel handling on windows and mac would take too much time child_widget.bind("<ButtonRelease-1>", self.b1r_handler) # If you want to do it, have a look at the .delta event attribute, described here: # http://infohost.nmt.edu/tcc/help/pubs/tkinter/events.html#event-handlers def b1_handler(self, e): self.config(cursor="fleur") self.mark = e self.child_widget.scan_mark(e.x, e.y) def b1m_handler(self, e): self.child_widget.scan_dragto(self.mark.x + (e.x-self.mark.x)/10, self.mark.y + (e.y-self.mark.y)/10) def b1r_handler(self, e): self.config(cursor="arrow") def ywheel_handler(self, e): self.child_widget.yview_scroll({4: -1, 5: 1 }[e.num], 'units') # button 4 => up; button 5 => down def xwheel_handler(self, e): self.child_widget.xview_scroll({6: -1, 7: 1 }[e.num], 'units') # button 6 => left; button 7 => right # FIXME: correct ???
Pfiver/penview
graph_view.py
Python
gpl-3.0
20,879
[ "FLEUR" ]
a0b40d58db9778c244dc9e95fcf5d5484794d393a16d09dafd74c3f73b5e19a1
#!/usr/bin/env python import os import sys import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot, vtkGetTempDir gotWarning = False gotError = False def WarningCallback(obj, evt): global gotWarning gotWarning = True VTK_DATA_ROOT = vtkGetDataRoot() VTK_TEMP_DIR = vtkGetTempDir() # Image pipeline image1 = vtk.vtkTIFFReader() image1.SetFileName(VTK_DATA_ROOT + "/Data/beach.tif") # "beach.tif" image contains ORIENTATION tag which is # ORIENTATION_TOPLEFT (row 0 top, col 0 lhs) type. The TIFF # reader parses this tag and sets the internal TIFF image # orientation accordingly. To overwrite this orientation with a vtk # convention of ORIENTATION_BOTLEFT (row 0 bottom, col 0 lhs ), invoke # SetOrientationType method with parameter value of 4. image1.SetOrientationType(4) image1.Update() filename = VTK_TEMP_DIR + "/" + "pngw1.png" testKey = "test key" testValue = "test value" longKey = "0123456789012345678901234567890123456789"\ "0123456789012345678901234567890123456789" longKeyValue = "this also prints a warning" try: # Can we write to the directory? channel = open(filename, "wb") channel.close() writer = vtk.vtkPNGWriter() writer.SetInputConnection(image1.GetOutputPort()) writer.SetFileName(filename) writer.AddText(testKey, testValue); # this is fine writer.AddText(testKey, testValue); observerId = writer.AddObserver(vtk.vtkCommand.WarningEvent, WarningCallback) # this prints a warning and does not add the text chunk writer.AddText("", "this prints a warning") if (not gotWarning): print("Error: expect warning when adding a text chunk with empty key") gotError = True gotWarning = False # this prints a warning and add a text chunk with a truncated key writer.AddText(longKey, longKeyValue) if (not gotWarning): print("Error: expect warning when adding a text chunk "\ "with key length bigger than 79 characters") gotError = True writer.RemoveObserver(observerId) writer.Write() reader = vtk.vtkPNGReader() reader.SetFileName(filename); reader.Update(); if (reader.GetNumberOfTextChunks() != 3): print("Error: Expecting three text chunks in the PNG file but got",\ reader.GetNumberOfTextChunks()) gotError = True beginEnd = [0, 0] reader.GetTextChunks(testKey,beginEnd) # the key starting with 0 comes in first. if (beginEnd[0] != 1 and beginEnd[1] != 3): print("Error: expect \"%s\" at index 1 and 2 but got "\ "them at positions %d and %d" % (testKey, beginEnd[0], beginEnd[1])) gotError = True if (reader.GetTextKey(1) != testKey or reader.GetTextKey(2) != testKey): print("Error: expecting key \"%s\" at index 1 and 2 but got \"%s\"" % \ (testKey, reader.GetTextKey(1))) gotError = True if (reader.GetTextValue(1) != testValue or reader.GetTextValue(2) != testValue): print("Error: expecting value \"%s\" at index 1 and 2 but got \"%s\"" % \ (testValue, reader.GetTextValue(1))) gotError = True if (reader.GetTextKey(0) != longKey[:-1]): print("Error: expecting value \"%s\" at index but got \"%s\"" % \ (longKey[:-1], reader.GetTextKey(0))) gotError = True if (gotError): sys.exit(1) else: sys.exit(0) except IOError: print("Error: Unable to test PNG write/read of text chunks.") sys.exit(1)
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/Image/Testing/Python/TestPNGTextChunks.py
Python
gpl-3.0
3,519
[ "VTK" ]
4017112789883eddf64934663ea5363bf070b9fad3a27b8ee525daa57d586a78
tutorial_tests = """ Let's try a simple generator: >>> def f(): ... yield 1 ... yield 2 >>> for i in f(): ... print i 1 2 >>> g = f() >>> g.next() 1 >>> g.next() 2 "Falling off the end" stops the generator: >>> g.next() Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g StopIteration "return" also stops the generator: >>> def f(): ... yield 1 ... return ... yield 2 # never reached ... >>> g = f() >>> g.next() 1 >>> g.next() Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 3, in f StopIteration >>> g.next() # once stopped, can't be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration "raise StopIteration" stops the generator too: >>> def f(): ... yield 1 ... raise StopIteration ... yield 2 # never reached ... >>> g = f() >>> g.next() 1 >>> g.next() Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> g.next() Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration However, they are not exactly equivalent: >>> def g1(): ... try: ... return ... except: ... yield 1 ... >>> list(g1()) [] >>> def g2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print list(g2()) [42] This may be surprising at first: >>> def g3(): ... try: ... return ... finally: ... yield 1 ... >>> list(g3()) [1] Let's create an alternate range() function implemented as a generator: >>> def yrange(n): ... for i in range(n): ... yield i ... >>> list(yrange(5)) [0, 1, 2, 3, 4] Generators always return to the most recent caller: >>> def creator(): ... r = yrange(5) ... print "creator", r.next() ... return r ... >>> def caller(): ... r = creator() ... for i in r: ... print "caller", i ... >>> caller() creator 0 caller 1 caller 2 caller 3 caller 4 Generators can call other generators: >>> def zrange(n): ... for i in yrange(n): ... yield i ... >>> list(zrange(5)) [0, 1, 2, 3, 4] """ # The examples from PEP 255. pep_tests = """ Specification: Yield Restriction: A generator cannot be resumed while it is actively running: >>> def g(): ... i = me.next() ... yield i >>> me = g() >>> me.next() Traceback (most recent call last): ... File "<string>", line 2, in g ValueError: generator already executing Specification: Return Note that return isn't always equivalent to raising StopIteration: the difference lies in how enclosing try/except constructs are treated. For example, >>> def f1(): ... try: ... return ... except: ... yield 1 >>> print list(f1()) [] because, as in any function, return simply exits, but >>> def f2(): ... try: ... raise StopIteration ... except: ... yield 42 >>> print list(f2()) [42] because StopIteration is captured by a bare "except", as is any exception. Specification: Generators and Exception Propagation >>> def f(): ... return 1//0 >>> def g(): ... yield f() # the zero division exception propagates ... yield 42 # and we'll never get here >>> k = g() >>> k.next() Traceback (most recent call last): File "<stdin>", line 1, in ? File "<stdin>", line 2, in g File "<stdin>", line 2, in f ZeroDivisionError: integer division or modulo by zero >>> k.next() # and the generator cannot be resumed Traceback (most recent call last): File "<stdin>", line 1, in ? StopIteration >>> Specification: Try/Except/Finally >>> def f(): ... try: ... yield 1 ... try: ... yield 2 ... 1//0 ... yield 3 # never get here ... except ZeroDivisionError: ... yield 4 ... yield 5 ... raise ... except: ... yield 6 ... yield 7 # the "raise" above stops this ... except: ... yield 8 ... yield 9 ... try: ... x = 12 ... finally: ... yield 10 ... yield 11 >>> print list(f()) [1, 2, 4, 5, 8, 9, 10, 11] >>> Guido's binary tree example. >>> # A binary tree class. >>> class Tree: ... ... def __init__(self, label, left=None, right=None): ... self.label = label ... self.left = left ... self.right = right ... ... def __repr__(self, level=0, indent=" "): ... s = level*indent + repr(self.label) ... if self.left: ... s = s + "\\n" + self.left.__repr__(level+1, indent) ... if self.right: ... s = s + "\\n" + self.right.__repr__(level+1, indent) ... return s ... ... def __iter__(self): ... return inorder(self) >>> # Create a Tree from a list. >>> def tree(list): ... n = len(list) ... if n == 0: ... return [] ... i = n // 2 ... return Tree(list[i], tree(list[:i]), tree(list[i+1:])) >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # A recursive generator that generates Tree labels in in-order. >>> def inorder(t): ... if t: ... for x in inorder(t.left): ... yield x ... yield t.label ... for x in inorder(t.right): ... yield x >>> # Show it off: create a tree. >>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ") >>> # Print the nodes of the tree in in-order. >>> for x in t: ... print x, A B C D E F G H I J K L M N O P Q R S T U V W X Y Z >>> # A non-recursive generator. >>> def inorder(node): ... stack = [] ... while node: ... while node.left: ... stack.append(node) ... node = node.left ... yield node.label ... while not node.right: ... try: ... node = stack.pop() ... except IndexError: ... return ... yield node.label ... node = node.right >>> # Exercise the non-recursive generator. >>> for x in t: ... print x, A B C D E F G H I J K L M N O P Q R S T U V W X Y Z """ # Examples from Iterator-List and Python-Dev and c.l.py. email_tests = """ The difference between yielding None and returning it. >>> def g(): ... for i in range(3): ... yield None ... yield None ... return >>> list(g()) [None, None, None, None] Ensure that explicitly raising StopIteration acts like any other exception in try/except, not like a return. >>> def g(): ... yield 1 ... try: ... raise StopIteration ... except: ... yield 2 ... yield 3 >>> list(g()) [1, 2, 3] Next one was posted to c.l.py. >>> def gcomb(x, k): ... "Generate all combinations of k elements from list x." ... ... if k > len(x): ... return ... if k == 0: ... yield [] ... else: ... first, rest = x[0], x[1:] ... # A combination does or doesn't contain first. ... # If it does, the remainder is a k-1 comb of rest. ... for c in gcomb(rest, k-1): ... c.insert(0, first) ... yield c ... # If it doesn't contain first, it's a k comb of rest. ... for c in gcomb(rest, k): ... yield c >>> seq = range(1, 5) >>> for k in range(len(seq) + 2): ... print "%d-combs of %s:" % (k, seq) ... for c in gcomb(seq, k): ... print " ", c 0-combs of [1, 2, 3, 4]: [] 1-combs of [1, 2, 3, 4]: [1] [2] [3] [4] 2-combs of [1, 2, 3, 4]: [1, 2] [1, 3] [1, 4] [2, 3] [2, 4] [3, 4] 3-combs of [1, 2, 3, 4]: [1, 2, 3] [1, 2, 4] [1, 3, 4] [2, 3, 4] 4-combs of [1, 2, 3, 4]: [1, 2, 3, 4] 5-combs of [1, 2, 3, 4]: From the Iterators list, about the types of these things. >>> def g(): ... yield 1 ... >>> type(g) <type 'function'> >>> i = g() >>> type(i) <type 'generator'> >>> [s for s in dir(i) if not s.startswith('_')] ['close', 'gi_frame', 'gi_running', 'next', 'send', 'throw'] >>> print i.next.__doc__ x.next() -> the next value, or raise StopIteration >>> iter(i) is i True >>> import types >>> isinstance(i, types.GeneratorType) True And more, added later. >>> i.gi_running 0 >>> type(i.gi_frame) <type 'frame'> >>> i.gi_running = 42 Traceback (most recent call last): ... TypeError: readonly attribute >>> def g(): ... yield me.gi_running >>> me = g() >>> me.gi_running 0 >>> me.next() 1 >>> me.gi_running 0 A clever union-find implementation from c.l.py, due to David Eppstein. Sent: Friday, June 29, 2001 12:16 PM To: python-list@python.org Subject: Re: PEP 255: Simple Generators >>> class disjointSet: ... def __init__(self, name): ... self.name = name ... self.parent = None ... self.generator = self.generate() ... ... def generate(self): ... while not self.parent: ... yield self ... for x in self.parent.generator: ... yield x ... ... def find(self): ... return self.generator.next() ... ... def union(self, parent): ... if self.parent: ... raise ValueError("Sorry, I'm not a root!") ... self.parent = parent ... ... def __str__(self): ... return self.name >>> names = "ABCDEFGHIJKLM" >>> sets = [disjointSet(name) for name in names] >>> roots = sets[:] >>> import random >>> gen = random.WichmannHill(42) >>> while 1: ... for s in sets: ... print "%s->%s" % (s, s.find()), ... print ... if len(roots) > 1: ... s1 = gen.choice(roots) ... roots.remove(s1) ... s2 = gen.choice(roots) ... s1.union(s2) ... print "merged", s1, "into", s2 ... else: ... break A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M merged D into G A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M merged C into F A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M merged L into A A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M merged H into E A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M merged B into E A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M merged J into G A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M merged E into G A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M merged M into G A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G merged I into K A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G merged K into A A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G merged F into A A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G merged A into G A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G """ # Emacs turd ' # Fun tests (for sufficiently warped notions of "fun"). fun_tests = """ Build up to a recursive Sieve of Eratosthenes generator. >>> def firstn(g, n): ... return [g.next() for i in range(n)] >>> def intsfrom(i): ... while 1: ... yield i ... i += 1 >>> firstn(intsfrom(5), 7) [5, 6, 7, 8, 9, 10, 11] >>> def exclude_multiples(n, ints): ... for i in ints: ... if i % n: ... yield i >>> firstn(exclude_multiples(3, intsfrom(1)), 6) [1, 2, 4, 5, 7, 8] >>> def sieve(ints): ... prime = ints.next() ... yield prime ... not_divisible_by_prime = exclude_multiples(prime, ints) ... for p in sieve(not_divisible_by_prime): ... yield p >>> primes = sieve(intsfrom(2)) >>> firstn(primes, 20) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71] Another famous problem: generate all integers of the form 2**i * 3**j * 5**k in increasing order, where i,j,k >= 0. Trickier than it may look at first! Try writing it without generators, and correctly, and without generating 3 internal results for each result output. >>> def times(n, g): ... for i in g: ... yield n * i >>> firstn(times(10, intsfrom(1)), 10) [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] >>> def merge(g, h): ... ng = g.next() ... nh = h.next() ... while 1: ... if ng < nh: ... yield ng ... ng = g.next() ... elif ng > nh: ... yield nh ... nh = h.next() ... else: ... yield ng ... ng = g.next() ... nh = h.next() The following works, but is doing a whale of a lot of redundant work -- it's not clear how to get the internal uses of m235 to share a single generator. Note that me_times2 (etc) each need to see every element in the result sequence. So this is an example where lazy lists are more natural (you can look at the head of a lazy list any number of times). >>> def m235(): ... yield 1 ... me_times2 = times(2, m235()) ... me_times3 = times(3, m235()) ... me_times5 = times(5, m235()) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Don't print "too many" of these -- the implementation above is extremely inefficient: each call of m235() leads to 3 recursive calls, and in turn each of those 3 more, and so on, and so on, until we've descended enough levels to satisfy the print stmts. Very odd: when I printed 5 lines of results below, this managed to screw up Win98's malloc in "the usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting address space, and it *looked* like a very slow leak. >>> result = m235() >>> for i in range(3): ... print firstn(result, 15) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] Heh. Here's one way to get a shared list, complete with an excruciating namespace renaming trick. The *pretty* part is that the times() and merge() functions can be reused as-is, because they only assume their stream arguments are iterable -- a LazyList is the same as a generator to times(). >>> class LazyList: ... def __init__(self, g): ... self.sofar = [] ... self.fetch = g.next ... ... def __getitem__(self, i): ... sofar, fetch = self.sofar, self.fetch ... while i >= len(sofar): ... sofar.append(fetch()) ... return sofar[i] >>> def m235(): ... yield 1 ... # Gack: m235 below actually refers to a LazyList. ... me_times2 = times(2, m235) ... me_times3 = times(3, m235) ... me_times5 = times(5, m235) ... for i in merge(merge(me_times2, ... me_times3), ... me_times5): ... yield i Print as many of these as you like -- *this* implementation is memory- efficient. >>> m235 = LazyList(m235()) >>> for i in range(5): ... print [m235[j] for j in range(15*i, 15*(i+1))] [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] Ye olde Fibonacci generator, LazyList style. >>> def fibgen(a, b): ... ... def sum(g, h): ... while 1: ... yield g.next() + h.next() ... ... def tail(g): ... g.next() # throw first away ... for x in g: ... yield x ... ... yield a ... yield b ... for s in sum(iter(fib), ... tail(iter(fib))): ... yield s >>> fib = LazyList(fibgen(1, 2)) >>> firstn(iter(fib), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] Running after your tail with itertools.tee (new in version 2.4) The algorithms "m235" (Hamming) and Fibonacci presented above are both examples of a whole family of FP (functional programming) algorithms where a function produces and returns a list while the production algorithm suppose the list as already produced by recursively calling itself. For these algorithms to work, they must: - produce at least a first element without presupposing the existence of the rest of the list - produce their elements in a lazy manner To work efficiently, the beginning of the list must not be recomputed over and over again. This is ensured in most FP languages as a built-in feature. In python, we have to explicitly maintain a list of already computed results and abandon genuine recursivity. This is what had been attempted above with the LazyList class. One problem with that class is that it keeps a list of all of the generated results and therefore continually grows. This partially defeats the goal of the generator concept, viz. produce the results only as needed instead of producing them all and thereby wasting memory. Thanks to itertools.tee, it is now clear "how to get the internal uses of m235 to share a single generator". >>> from itertools import tee >>> def m235(): ... def _m235(): ... yield 1 ... for n in merge(times(2, m2), ... merge(times(3, m3), ... times(5, m5))): ... yield n ... m1 = _m235() ... m2, m3, m5, mRes = tee(m1, 4) ... return mRes >>> it = m235() >>> for i in range(5): ... print firstn(it, 15) [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24] [25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80] [81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192] [200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384] [400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675] The "tee" function does just what we want. It internally keeps a generated result for as long as it has not been "consumed" from all of the duplicated iterators, whereupon it is deleted. You can therefore print the hamming sequence during hours without increasing memory usage, or very little. The beauty of it is that recursive running-after-their-tail FP algorithms are quite straightforwardly expressed with this Python idiom. Ye olde Fibonacci generator, tee style. >>> def fib(): ... ... def _isum(g, h): ... while 1: ... yield g.next() + h.next() ... ... def _fib(): ... yield 1 ... yield 2 ... fibTail.next() # throw first away ... for res in _isum(fibHead, fibTail): ... yield res ... ... realfib = _fib() ... fibHead, fibTail, fibRes = tee(realfib, 3) ... return fibRes >>> firstn(fib(), 17) [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584] """ # syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0 # hackery. syntax_tests = """ >>> def f(): ... return 22 ... yield 1 Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3) >>> def f(): ... yield 1 ... return 22 Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3) "return None" is not the same as "return" in a generator: >>> def f(): ... yield 1 ... return None Traceback (most recent call last): .. SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3) These are fine: >>> def f(): ... yield 1 ... return >>> def f(): ... try: ... yield 1 ... finally: ... pass >>> def f(): ... try: ... try: ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... pass ... finally: ... pass >>> def f(): ... try: ... try: ... yield 12 ... 1//0 ... except ZeroDivisionError: ... yield 666 ... except: ... try: ... x = 12 ... finally: ... yield 12 ... except: ... return >>> list(f()) [12, 666] >>> def f(): ... yield >>> type(f()) <type 'generator'> >>> def f(): ... if 0: ... yield >>> type(f()) <type 'generator'> >>> def f(): ... if 0: ... yield 1 >>> type(f()) <type 'generator'> >>> def f(): ... if "": ... yield None >>> type(f()) <type 'generator'> >>> def f(): ... return ... try: ... if x==4: ... pass ... elif 0: ... try: ... 1//0 ... except SyntaxError: ... pass ... else: ... if 0: ... while 12: ... x += 1 ... yield 2 # don't blink ... f(a, b, c, d, e) ... else: ... pass ... except: ... x = 1 ... return >>> type(f()) <type 'generator'> >>> def f(): ... if 0: ... def g(): ... yield 1 ... >>> type(f()) <type 'NoneType'> >>> def f(): ... if 0: ... class C: ... def __init__(self): ... yield 1 ... def f(self): ... yield 2 >>> type(f()) <type 'NoneType'> >>> def f(): ... if 0: ... return ... if 0: ... yield 2 >>> type(f()) <type 'generator'> >>> def f(): ... if 0: ... lambda x: x # shouldn't trigger here ... return # or here ... def f(i): ... return 2*i # or here ... if 0: ... return 3 # but *this* sucks (line 8) ... if 0: ... yield 2 # because it's a generator (line 10) Traceback (most recent call last): SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10) This one caused a crash (see SF bug 567538): >>> def f(): ... for i in range(3): ... try: ... continue ... finally: ... yield i ... >>> g = f() >>> print g.next() 0 >>> print g.next() 1 >>> print g.next() 2 >>> print g.next() Traceback (most recent call last): StopIteration """ # conjoin is a simple backtracking generator, named in honor of Icon's # "conjunction" control structure. Pass a list of no-argument functions # that return iterable objects. Easiest to explain by example: assume the # function list [x, y, z] is passed. Then conjoin acts like: # # def g(): # values = [None] * 3 # for values[0] in x(): # for values[1] in y(): # for values[2] in z(): # yield values # # So some 3-lists of values *may* be generated, each time we successfully # get into the innermost loop. If an iterator fails (is exhausted) before # then, it "backtracks" to get the next value from the nearest enclosing # iterator (the one "to the left"), and starts all over again at the next # slot (pumps a fresh iterator). Of course this is most useful when the # iterators have side-effects, so that which values *can* be generated at # each slot depend on the values iterated at previous slots. def conjoin(gs): values = [None] * len(gs) def gen(i, values=values): if i >= len(gs): yield values else: for values[i] in gs[i](): for x in gen(i+1): yield x for x in gen(0): yield x # That works fine, but recursing a level and checking i against len(gs) for # each item produced is inefficient. By doing manual loop unrolling across # generator boundaries, it's possible to eliminate most of that overhead. # This isn't worth the bother *in general* for generators, but conjoin() is # a core building block for some CPU-intensive generator applications. def conjoin(gs): n = len(gs) values = [None] * n # Do one loop nest at time recursively, until the # of loop nests # remaining is divisible by 3. def gen(i, values=values): if i >= n: yield values elif (n-i) % 3: ip1 = i+1 for values[i] in gs[i](): for x in gen(ip1): yield x else: for x in _gen3(i): yield x # Do three loop nests at a time, recursing only if at least three more # remain. Don't call directly: this is an internal optimization for # gen's use. def _gen3(i, values=values): assert i < n and (n-i) % 3 == 0 ip1, ip2, ip3 = i+1, i+2, i+3 g, g1, g2 = gs[i : ip3] if ip3 >= n: # These are the last three, so we can yield values directly. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): yield values else: # At least 6 loop nests remain; peel off 3 and recurse for the # rest. for values[i] in g(): for values[ip1] in g1(): for values[ip2] in g2(): for x in _gen3(ip3): yield x for x in gen(0): yield x # And one more approach: For backtracking apps like the Knight's Tour # solver below, the number of backtracking levels can be enormous (one # level per square, for the Knight's Tour, so that e.g. a 100x100 board # needs 10,000 levels). In such cases Python is likely to run out of # stack space due to recursion. So here's a recursion-free version of # conjoin too. # NOTE WELL: This allows large problems to be solved with only trivial # demands on stack space. Without explicitly resumable generators, this is # much harder to achieve. OTOH, this is much slower (up to a factor of 2) # than the fancy unrolled recursive conjoin. def flat_conjoin(gs): # rename to conjoin to run tests with this instead n = len(gs) values = [None] * n iters = [None] * n _StopIteration = StopIteration # make local because caught a *lot* i = 0 while 1: # Descend. try: while i < n: it = iters[i] = gs[i]().next values[i] = it() i += 1 except _StopIteration: pass else: assert i == n yield values # Backtrack until an older iterator can be resumed. i -= 1 while i >= 0: try: values[i] = iters[i]() # Success! Start fresh at next level. i += 1 break except _StopIteration: # Continue backtracking. i -= 1 else: assert i < 0 break # A conjoin-based N-Queens solver. class Queens: def __init__(self, n): self.n = n rangen = range(n) # Assign a unique int to each column and diagonal. # columns: n of those, range(n). # NW-SE diagonals: 2n-1 of these, i-j unique and invariant along # each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0- # based. # NE-SW diagonals: 2n-1 of these, i+j unique and invariant along # each, smallest i+j is 0, largest is 2n-2. # For each square, compute a bit vector of the columns and # diagonals it covers, and for each row compute a function that # generates the possiblities for the columns in that row. self.rowgenerators = [] for i in rangen: rowuses = [(1L << j) | # column ordinal (1L << (n + i-j + n-1)) | # NW-SE ordinal (1L << (n + 2*n-1 + i+j)) # NE-SW ordinal for j in rangen] def rowgen(rowuses=rowuses): for j in rangen: uses = rowuses[j] if uses & self.used == 0: self.used |= uses yield j self.used &= ~uses self.rowgenerators.append(rowgen) # Generate solutions. def solve(self): self.used = 0 for row2col in conjoin(self.rowgenerators): yield row2col def printsolution(self, row2col): n = self.n assert n == len(row2col) sep = "+" + "-+" * n print sep for i in range(n): squares = [" " for j in range(n)] squares[row2col[i]] = "Q" print "|" + "|".join(squares) + "|" print sep # A conjoin-based Knight's Tour solver. This is pretty sophisticated # (e.g., when used with flat_conjoin above, and passing hard=1 to the # constructor, a 200x200 Knight's Tour was found quickly -- note that we're # creating 10s of thousands of generators then!), and is lengthy. class Knights: def __init__(self, m, n, hard=0): self.m, self.n = m, n # solve() will set up succs[i] to be a list of square #i's # successors. succs = self.succs = [] # Remove i0 from each of its successor's successor lists, i.e. # successors can't go back to i0 again. Return 0 if we can # detect this makes a solution impossible, else return 1. def remove_from_successors(i0, len=len): # If we remove all exits from a free square, we're dead: # even if we move to it next, we can't leave it again. # If we create a square with one exit, we must visit it next; # else somebody else will have to visit it, and since there's # only one adjacent, there won't be a way to leave it again. # Finelly, if we create more than one free square with a # single exit, we can only move to one of them next, leaving # the other one a dead end. ne0 = ne1 = 0 for i in succs[i0]: s = succs[i] s.remove(i0) e = len(s) if e == 0: ne0 += 1 elif e == 1: ne1 += 1 return ne0 == 0 and ne1 < 2 # Put i0 back in each of its successor's successor lists. def add_to_successors(i0): for i in succs[i0]: succs[i].append(i0) # Generate the first move. def first(): if m < 1 or n < 1: return # Since we're looking for a cycle, it doesn't matter where we # start. Starting in a corner makes the 2nd move easy. corner = self.coords2index(0, 0) remove_from_successors(corner) self.lastij = corner yield corner add_to_successors(corner) # Generate the second moves. def second(): corner = self.coords2index(0, 0) assert self.lastij == corner # i.e., we started in the corner if m < 3 or n < 3: return assert len(succs[corner]) == 2 assert self.coords2index(1, 2) in succs[corner] assert self.coords2index(2, 1) in succs[corner] # Only two choices. Whichever we pick, the other must be the # square picked on move m*n, as it's the only way to get back # to (0, 0). Save its index in self.final so that moves before # the last know it must be kept free. for i, j in (1, 2), (2, 1): this = self.coords2index(i, j) final = self.coords2index(3-i, 3-j) self.final = final remove_from_successors(this) succs[final].append(corner) self.lastij = this yield this succs[final].remove(corner) add_to_successors(this) # Generate moves 3 thru m*n-1. def advance(len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, i)] break candidates.append((e, i)) else: candidates.sort() for e, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate moves 3 thru m*n-1. Alternative version using a # stronger (but more expensive) heuristic to order successors. # Since the # of backtracking levels is m*n, a poor move early on # can take eons to undo. Smallest square board for which this # matters a lot is 52x52. def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len): # If some successor has only one exit, must take it. # Else favor successors with fewer exits. # Break ties via max distance from board centerpoint (favor # corners and edges whenever possible). candidates = [] for i in succs[self.lastij]: e = len(succs[i]) assert e > 0, "else remove_from_successors() pruning flawed" if e == 1: candidates = [(e, 0, i)] break i1, j1 = self.index2coords(i) d = (i1 - vmid)**2 + (j1 - hmid)**2 candidates.append((e, -d, i)) else: candidates.sort() for e, d, i in candidates: if i != self.final: if remove_from_successors(i): self.lastij = i yield i add_to_successors(i) # Generate the last move. def last(): assert self.final in succs[self.lastij] yield self.final if m*n < 4: self.squaregenerators = [first] else: self.squaregenerators = [first, second] + \ [hard and advance_hard or advance] * (m*n - 3) + \ [last] def coords2index(self, i, j): assert 0 <= i < self.m assert 0 <= j < self.n return i * self.n + j def index2coords(self, index): assert 0 <= index < self.m * self.n return divmod(index, self.n) def _init_board(self): succs = self.succs del succs[:] m, n = self.m, self.n c2i = self.coords2index offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2), (-1, -2), (-2, -1), (-2, 1), (-1, 2)] rangen = range(n) for i in range(m): for j in rangen: s = [c2i(i+io, j+jo) for io, jo in offsets if 0 <= i+io < m and 0 <= j+jo < n] succs.append(s) # Generate solutions. def solve(self): self._init_board() for x in conjoin(self.squaregenerators): yield x def printsolution(self, x): m, n = self.m, self.n assert len(x) == m*n w = len(str(m*n)) format = "%" + str(w) + "d" squares = [[None] * n for i in range(m)] k = 1 for i in x: i1, j1 = self.index2coords(i) squares[i1][j1] = format % k k += 1 sep = "+" + ("-" * w + "+") * n print sep for i in range(m): row = squares[i] print "|" + "|".join(row) + "|" print sep conjoin_tests = """ Generate the 3-bit binary numbers in order. This illustrates dumbest- possible use of conjoin, just to generate the full cross-product. >>> for c in conjoin([lambda: iter((0, 1))] * 3): ... print c [0, 0, 0] [0, 0, 1] [0, 1, 0] [0, 1, 1] [1, 0, 0] [1, 0, 1] [1, 1, 0] [1, 1, 1] For efficiency in typical backtracking apps, conjoin() yields the same list object each time. So if you want to save away a full account of its generated sequence, you need to copy its results. >>> def gencopy(iterator): ... for x in iterator: ... yield x[:] >>> for n in range(10): ... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n))) ... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n 0 1 True True 1 2 True True 2 4 True True 3 8 True True 4 16 True True 5 32 True True 6 64 True True 7 128 True True 8 256 True True 9 512 True True And run an 8-queens solver. >>> q = Queens(8) >>> LIMIT = 2 >>> count = 0 >>> for row2col in q.solve(): ... count += 1 ... if count <= LIMIT: ... print "Solution", count ... q.printsolution(row2col) Solution 1 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ Solution 2 +-+-+-+-+-+-+-+-+ |Q| | | | | | | | +-+-+-+-+-+-+-+-+ | | | | | |Q| | | +-+-+-+-+-+-+-+-+ | | | | | | | |Q| +-+-+-+-+-+-+-+-+ | | |Q| | | | | | +-+-+-+-+-+-+-+-+ | | | | | | |Q| | +-+-+-+-+-+-+-+-+ | | | |Q| | | | | +-+-+-+-+-+-+-+-+ | |Q| | | | | | | +-+-+-+-+-+-+-+-+ | | | | |Q| | | | +-+-+-+-+-+-+-+-+ >>> print count, "solutions in all." 92 solutions in all. And run a Knight's Tour on a 10x10 board. Note that there are about 20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion. >>> k = Knights(10, 10) >>> LIMIT = 2 >>> count = 0 >>> for x in k.solve(): ... count += 1 ... if count <= LIMIT: ... print "Solution", count ... k.printsolution(x) ... else: ... break Solution 1 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 91| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 88| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 92| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 89| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ Solution 2 +---+---+---+---+---+---+---+---+---+---+ | 1| 58| 27| 34| 3| 40| 29| 10| 5| 8| +---+---+---+---+---+---+---+---+---+---+ | 26| 35| 2| 57| 28| 33| 4| 7| 30| 11| +---+---+---+---+---+---+---+---+---+---+ | 59|100| 73| 36| 41| 56| 39| 32| 9| 6| +---+---+---+---+---+---+---+---+---+---+ | 74| 25| 60| 55| 72| 37| 42| 49| 12| 31| +---+---+---+---+---+---+---+---+---+---+ | 61| 86| 99| 76| 63| 52| 47| 38| 43| 50| +---+---+---+---+---+---+---+---+---+---+ | 24| 75| 62| 85| 54| 71| 64| 51| 48| 13| +---+---+---+---+---+---+---+---+---+---+ | 87| 98| 89| 80| 77| 84| 53| 46| 65| 44| +---+---+---+---+---+---+---+---+---+---+ | 90| 23| 92| 95| 70| 79| 68| 83| 14| 17| +---+---+---+---+---+---+---+---+---+---+ | 97| 88| 21| 78| 81| 94| 19| 16| 45| 66| +---+---+---+---+---+---+---+---+---+---+ | 22| 91| 96| 93| 20| 69| 82| 67| 18| 15| +---+---+---+---+---+---+---+---+---+---+ """ weakref_tests = """\ Generators are weakly referencable: >>> import weakref >>> def gen(): ... yield 'foo!' ... >>> wr = weakref.ref(gen) >>> wr() is gen True >>> p = weakref.proxy(gen) Generator-iterators are weakly referencable as well: >>> gi = gen() >>> wr = weakref.ref(gi) >>> wr() is gi True >>> p = weakref.proxy(gi) >>> list(p) ['foo!'] """ coroutine_tests = """\ Sending a value into a started generator: >>> def f(): ... print (yield 1) ... yield 2 >>> g = f() >>> g.next() 1 >>> g.send(42) 42 2 Sending a value into a new generator produces a TypeError: >>> f().send("foo") Traceback (most recent call last): ... TypeError: can't send non-None value to a just-started generator Yield by itself yields None: >>> def f(): yield >>> list(f()) [None] An obscene abuse of a yield expression within a generator expression: >>> list((yield 21) for i in range(4)) [21, None, 21, None, 21, None, 21, None] And a more sane, but still weird usage: >>> def f(): list(i for i in [(yield 26)]) >>> type(f()) <type 'generator'> A yield expression with augmented assignment. >>> def coroutine(seq): ... count = 0 ... while count < 200: ... count += yield ... seq.append(count) >>> seq = [] >>> c = coroutine(seq) >>> c.next() >>> print seq [] >>> c.send(10) >>> print seq [10] >>> c.send(10) >>> print seq [10, 20] >>> c.send(10) >>> print seq [10, 20, 30] Check some syntax errors for yield expressions: >>> f=lambda: (yield 1),(yield 2) Traceback (most recent call last): ... SyntaxError: 'yield' outside function (<doctest test.test_generators.__test__.coroutine[21]>, line 1) >>> def f(): return lambda x=(yield): 1 Traceback (most recent call last): ... SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1) >>> def f(): x = yield = y Traceback (most recent call last): ... SyntaxError: assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[23]>, line 1) >>> def f(): (yield bar) = y Traceback (most recent call last): ... SyntaxError: can't assign to yield expression (<doctest test.test_generators.__test__.coroutine[24]>, line 1) >>> def f(): (yield bar) += y Traceback (most recent call last): ... SyntaxError: augmented assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[25]>, line 1) Now check some throw() conditions: >>> def f(): ... while True: ... try: ... print (yield) ... except ValueError,v: ... print "caught ValueError (%s)" % (v), >>> import sys >>> g = f() >>> g.next() >>> g.throw(ValueError) # type only caught ValueError () >>> g.throw(ValueError("xyz")) # value only caught ValueError (xyz) >>> g.throw(ValueError, ValueError(1)) # value+matching type caught ValueError (1) >>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped caught ValueError (1) >>> g.throw(ValueError, ValueError(1), None) # explicit None traceback caught ValueError (1) >>> g.throw(ValueError(1), "foo") # bad args Traceback (most recent call last): ... TypeError: instance exception may not have a separate value >>> g.throw(ValueError, "foo", 23) # bad args Traceback (most recent call last): ... TypeError: throw() third argument must be a traceback object >>> def throw(g,exc): ... try: ... raise exc ... except: ... g.throw(*sys.exc_info()) >>> throw(g,ValueError) # do it with traceback included caught ValueError () >>> g.send(1) 1 >>> throw(g,TypeError) # terminate the generator Traceback (most recent call last): ... TypeError >>> print g.gi_frame None >>> g.send(2) Traceback (most recent call last): ... StopIteration >>> g.throw(ValueError,6) # throw on closed generator Traceback (most recent call last): ... ValueError: 6 >>> f().throw(ValueError,7) # throw on just-opened generator Traceback (most recent call last): ... ValueError: 7 >>> f().throw("abc") # throw on just-opened generator Traceback (most recent call last): ... abc Now let's try closing a generator: >>> def f(): ... try: yield ... except GeneratorExit: ... print "exiting" >>> g = f() >>> g.next() >>> g.close() exiting >>> g.close() # should be no-op now >>> f().close() # close on just-opened generator should be fine >>> def f(): yield # an even simpler generator >>> f().close() # close before opening >>> g = f() >>> g.next() >>> g.close() # close normally And finalization: >>> def f(): ... try: yield ... finally: ... print "exiting" >>> g = f() >>> g.next() >>> del g exiting Now let's try some ill-behaved generators: >>> def f(): ... try: yield ... except GeneratorExit: ... yield "foo!" >>> g = f() >>> g.next() >>> g.close() Traceback (most recent call last): ... RuntimeError: generator ignored GeneratorExit >>> g.close() Our ill-behaved code should be invoked during GC: >>> import sys, StringIO >>> old, sys.stderr = sys.stderr, StringIO.StringIO() >>> g = f() >>> g.next() >>> del g >>> sys.stderr.getvalue().startswith( ... "Exception exceptions.RuntimeError: 'generator ignored GeneratorExit' in " ... ) True >>> sys.stderr = old And errors thrown during closing should propagate: >>> def f(): ... try: yield ... except GeneratorExit: ... raise TypeError("fie!") >>> g = f() >>> g.next() >>> g.close() Traceback (most recent call last): ... TypeError: fie! Ensure that various yield expression constructs make their enclosing function a generator: >>> def f(): x += yield >>> type(f()) <type 'generator'> >>> def f(): x = yield >>> type(f()) <type 'generator'> >>> def f(): lambda x=(yield): 1 >>> type(f()) <type 'generator'> >>> def f(): x=(i for i in (yield) if (yield)) >>> type(f()) <type 'generator'> >>> def f(d): d[(yield "a")] = d[(yield "b")] = 27 >>> data = [1,2] >>> g = f(data) >>> type(g) <type 'generator'> >>> g.send(None) 'a' >>> data [1, 2] >>> g.send(0) 'b' >>> data [27, 2] >>> try: g.send(1) ... except StopIteration: pass >>> data [27, 27] """ refleaks_tests = """ Prior to adding cycle-GC support to itertools.tee, this code would leak references. We add it to the standard suite so the routine refleak-tests would trigger if it starts being uncleanable again. >>> import itertools >>> def leak(): ... class gen: ... def __iter__(self): ... return self ... def next(self): ... return self.item ... g = gen() ... head, tail = itertools.tee(g) ... g.item = head ... return head >>> it = leak() Make sure to also test the involvement of the tee-internal teedataobject, which stores returned items. >>> item = it.next() This test leaked at one point due to generator finalization/destruction. It was copied from Lib/test/leakers/test_generator_cycle.py before the file was removed. >>> def leak(): ... def gen(): ... while True: ... yield g ... g = gen() >>> leak() This test isn't really generator related, but rather exception-in-cleanup related. The coroutine tests (above) just happen to cause an exception in the generator's __del__ (tp_del) method. We can also test for this explicitly, without generators. We do have to redirect stderr to avoid printing warnings and to doublecheck that we actually tested what we wanted to test. >>> import sys, StringIO >>> old = sys.stderr >>> try: ... sys.stderr = StringIO.StringIO() ... class Leaker: ... def __del__(self): ... raise RuntimeError ... ... l = Leaker() ... del l ... err = sys.stderr.getvalue().strip() ... err.startswith( ... "Exception exceptions.RuntimeError: RuntimeError() in <" ... ) ... err.endswith("> ignored") ... len(err.splitlines()) ... finally: ... sys.stderr = old True True 1 These refleak tests should perhaps be in a testfile of their own, test_generators just happened to be the test that drew these out. """ __test__ = {"tut": tutorial_tests, "pep": pep_tests, "email": email_tests, "fun": fun_tests, "syntax": syntax_tests, "conjoin": conjoin_tests, "weakref": weakref_tests, "coroutine": coroutine_tests, "refleaks": refleaks_tests, } # Magic test name that regrtest.py invokes *after* importing this module. # This worms around a bootstrap problem. # Note that doctest and regrtest both look in sys.argv for a "-v" argument, # so this works as expected in both ways of running regrtest. def test_main(verbose=None): from test import test_support, test_generators test_support.run_doctest(test_generators, verbose) # This part isn't needed for regrtest, but for running the test directly. if __name__ == "__main__": test_main(1)
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.5/Lib/test/test_generators.py
Python
mit
49,661
[ "VisIt" ]
0fc1cc0937ad13986c8b7a006db31cd81adf8c29e451bab78246d2394b298a13
# Author: Samuel Ponc\'e # Date: 30/04/2013 -- 11/09/2014 # Version 1.3 # Classes needed for the temperature.py script # Last devel info: Dynamical coding done import numpy as N from numpy import zeros import itertools as Iter from functools import partial import multiprocessing import netCDF4 as nc import sys import os # Variables tol6 = 1E-6 tol8 = 1E-8 Ha2eV = 27.21138386 kb_HaK = 3.1668154267112283e-06 ########### # CLASSES # ########### class system: natom = None ntypat = None nkpt = None kpt = None Kptns = None EIG = None nband = None acell = None occ = None amu = None rprim = N.empty((3,3)) iqpt = None IFC = None filename = None filefullpath = None def __init__(self,directory=None,filename=None): if filename == None:return if directory == None:directory='.' self.filename = filename self.filefullpath = '%s/%s' %(directory,filename) if self.filefullpath[-4:] == '_DDB': self.DDB_file_open(self.filefullpath) if self.filefullpath[-10:] == '_EIGR2D.nc' or self.filefullpath[-10:] == '_EIGI2D.nc': self.EIG2Dnc_file_open(self.filefullpath) if self.filefullpath[-7:] == '_EIGR2D' or self.filefullpath[-7:] == '_EIGI2D': self.EIG2D_file_open(self.filefullpath) if self.filefullpath[-7:] == '_EIG.nc': self.EIG_file_open(self.filefullpath) if self.filefullpath[-4:] == '_EIG': raise Exception('Please provide a netCDF _EIG.nc file!\n\ This is mandatory for good accuracy.' ) if self.filefullpath[-7:] == '_FAN.nc': self.FANnc_file_open(self.filefullpath) if self.filefullpath[-4:] == '_FAN': self.FAN_file_open(self.filefullpath) if self.filefullpath[-6:] == '_EP.nc': self.EP_file_open(self.filefullpath) # Read _EP.nc file def EP_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) root = nc.Dataset(filefullpath,'r') self.natom = len(root.dimensions['number_of_atoms']) self.nkpt = len(root.dimensions['number_of_kpoints']) self.nband = len(root.dimensions['max_number_of_states']) self.ntemp = len(root.dimensions['number_of_temperature']) self.nsppol = len(root.dimensions['number_of_spins']) self.nbQ = len(root.dimensions['number_of_qpoints']) self.temp = root.variables['temperature'][:] self.occ = root.variables['occupations'][:,:,:] # number_of_spins, number_of_kpoints, max_number_of_states self.kpt = root.variables['reduced_coordinates_of_kpoints'][:,:] self.eigenvalues = root.variables['eigenvalues'][:,:,:] #number_of_spins, number_of_kpoints, max_number_of_states self.rprimd = root.variables['primitive_vectors'][:,:] self.zpm = root.variables['zero_point_motion'][:,:,:,:,:] # nsppol, number_of_temperature, # number_of_kpoints, max_number_of_states, cplex root.close() # Read _EIG.nc file def EIG_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) root = nc.Dataset(filefullpath,'r') self.EIG = root.variables['Eigenvalues'][:,:] self.Kptns = root.variables['Kptns'][:,:] NBandK = root.variables['NBandK'][:] self.nband = N.int(NBandK[0,0]) root.close() # Open the Fan.nc file and read it def FANnc_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) root = nc.Dataset(filefullpath,'r') self.natom = len(root.dimensions['number_of_atoms']) self.nkpt = len(root.dimensions['number_of_kpoints']) self.nband = len(root.dimensions['max_number_of_states']) self.nsppol = len(root.dimensions['number_of_spins']) self.occ = root.variables['occupations'][:,:,:] # number_of_spins, number_of_kpoints, max_number_of_states FANtmp = root.variables['second_derivative_eigenenergies_actif'][:,:,:,:,:,:,:] #product_mband_nsppol,number_of_atoms, # number_of_cartesian_directions, number_of_atoms, number_of_cartesian_directions, # number_of_kpoints, product_mband_nsppol*2 FANtmp2 = zeros((self.nkpt,2*self.nband,3,self.natom,3,self.natom,self.nband)) FANtmp2 = N.einsum('ijklmno->nomlkji', FANtmp) FANtmp3 = FANtmp2[:, ::2, ...] # Slice the even numbers FANtmp4 = FANtmp2[:, 1::2, ...] # Slice the odd numbers self.FAN = 1j*FANtmp4 self.FAN += FANtmp3 self.eigenvalues = root.variables['eigenvalues'][:,:,:] #number_of_spins, number_of_kpoints, max_number_of_states self.kpt = root.variables['reduced_coordinates_of_kpoints'][:,:] self.iqpt = root.variables['current_q_point'][:] self.wtq = root.variables['current_q_point_weight'][:] self.rprimd = root.variables['primitive_vectors'][:,:] root.close() # Open the EIG2D.nc file and read it def EIG2Dnc_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) root = nc.Dataset(filefullpath,'r') self.natom = len(root.dimensions['number_of_atoms']) self.nkpt = len(root.dimensions['number_of_kpoints']) self.nband = len(root.dimensions['max_number_of_states']) self.nsppol = len(root.dimensions['number_of_spins']) self.occ = root.variables['occupations'][:,:,:] # number_of_spins, number_of_kpoints, max_number_of_states EIG2Dtmp = root.variables['second_derivative_eigenenergies'][:,:,:,:,:,:,:] #number_of_atoms, # number_of_cartesian_directions, number_of_atoms, number_of_cartesian_directions, # number_of_kpoints, product_mband_nsppol, cplex EIG2Dtmp2 = zeros((self.nkpt,2*self.nband,3,self.natom,3,self.natom,self.nband)) EIG2Dtmp2 = N.einsum('ijklmno->mnlkjio', EIG2Dtmp) self.EIG2D = 1j*EIG2Dtmp2[...,1] self.EIG2D += EIG2Dtmp2[...,0] self.eigenvalues = root.variables['eigenvalues'][:,:,:] #number_of_spins, number_of_kpoints, max_number_of_states self.kpt = root.variables['reduced_coordinates_of_kpoints'][:,:] self.iqpt = root.variables['current_q_point'][:] self.wtq = root.variables['current_q_point_weight'][:] self.rprimd = root.variables['primitive_vectors'][:,:] root.close() # Open the EIG2D file and read it def EIG2D_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) self.EIG2D = None with open(filefullpath,'r') as EIG2D: Flag = 0 Flagocc = False ikpt = 0 iocc = 0 vv = 1 for line in EIG2D: if line.find('natom') > -1: self.natom = N.int(line.split()[1]) if line.find('nkpt') > -1: self.nkpt = N.int(line.split()[1]) self.kpt = N.empty((self.nkpt,3)) if line.find('nband') > -1: self.nband = N.int(line.split()[1]) # Initialize the EIGR2D or EIGI2D matrix (nkpt,nband,3dir,natom,3dir,natom) self.EIG2D = N.zeros((self.nkpt,self.nband,3,self.natom,3,self.natom),dtype=complex) # Initialize the occupation vector self.occ = N.zeros((self.nband)) if line.find('occ ') > -1: line = line.replace('D','E') self.occ[iocc] = N.int(N.float(line.split()[1])) if self.nband > 1: self.occ[iocc+1] = N.int(N.float(line.split()[2])) if self.nband > 2: self.occ[iocc+2] = N.int(N.float(line.split()[3])) if self.nband > 3: Flagocc = True iocc = 3 continue # Go to the next iteration of the for loop if Flagocc: line = line.replace('D','E') vv +=1 if vv < self.nband/3: self.occ[iocc] = N.int(N.float(line.split()[0])) self.occ[iocc+1] = N.int(N.float(line.split()[1])) self.occ[iocc+2] = N.int(N.float(line.split()[2])) iocc += 3 continue # Go to the next iteration of the for loop elif vv == self.nband/3: Flagocc = False if self.nband%3 > 0: if self.nband%3 == 1: self.occ[iocc] = N.int(N.float(line.split()[0])) if self.nband%3 == 2: self.occ[iocc+1] = N.int(N.float(line.split()[1])) # Read the current Q-point if line.find('qpt') > -1: line = line.replace('D','E') tmp = line.split() self.iqpt = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])] # Read the current K-point if line.find('K-point') > -1: line = line.replace('D','E') tmp = line.split() self.kpt[ikpt,:] = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])] ikpt +=1 ibd = 0 continue # Go to the next iteration of the for loop # Read the current Bands if line.find('Band:') > -1: ibd += 1 Flag = 1 continue # Read the EIG2RD or EIGI2D matrix if Flag == 1: line = line.replace('D','E') tmp = line.split() self.EIG2D[ikpt-1,ibd-1,int(tmp[0])-1,int(tmp[1])-1,int(tmp[2])-1,int(tmp[3])-1] = \ complex(float(tmp[4]),float(tmp[5])) # Open the DDB file and read it def DDB_file_open(self,filefullpath): if not (os.path.isfile(filefullpath)): raise Exception('The file "%s" does not exists!' %filefullpath) with open(filefullpath,'r') as DDB: Flag = 0 Flag2 = False Flag3 = False ikpt = 0 for line in DDB: if line.find('natom') > -1: self.natom = N.int(line.split()[1]) if line.find('nkpt') > -1: self.nkpt = N.int(line.split()[1]) self.kpt = zeros((self.nkpt,3)) if line.find('ntypat') > -1: self.ntypat = N.int(line.split()[1]) if line.find('nband') > -1: self.nband = N.int(line.split()[1]) if line.find('acell') > -1: line = line.replace('D','E') tmp = line.split() self.acell = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])] if Flag2: line = line.replace('D','E') for ii in N.arange(3,self.ntypat): self.amu[ii] = N.float(line.split()[ii-3]) Flag2 = False if line.find('amu') > -1: line = line.replace('D','E') self.amu = zeros((self.ntypat)) if self.ntypat > 3: for ii in N.arange(3): self.amu[ii] = N.float(line.split()[ii+1]) Flag2 = True else: for ii in N.arange(self.ntypat): self.amu[ii] = N.float(line.split()[ii+1]) if line.find(' kpt ') > -1: line = line.replace('D','E') tmp = line.split() self.kpt[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])] ikpt = 1 continue if ikpt < self.nkpt and ikpt > 0: line = line.replace('D','E') tmp = line.split() self.kpt[ikpt,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])] ikpt += 1 continue if Flag == 2: line = line.replace('D','E') tmp = line.split() self.rprim[2,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])] Flag = 0 if Flag == 1: line = line.replace('D','E') tmp = line.split() self.rprim[1,0:3] = [float(tmp[0]),float(tmp[1]),float(tmp[2])] Flag = 2 if line.find('rprim') > -1: line = line.replace('D','E') tmp = line.split() self.rprim[0,0:3] = [float(tmp[1]),float(tmp[2]),float(tmp[3])] Flag = 1 if Flag3: line = line.replace('D','E') for ii in N.arange(12,self.natom): self.typat[ii] = N.float(line.split()[ii-12]) Flag3 = False if line.find(' typat') > -1: self.typat = zeros((self.natom)) if self.natom > 12: for ii in N.arange(12): self.typat[ii] = N.float(line.split()[ii+1]) Flag3 = True else: for ii in N.arange(self.natom): self.typat[ii] = N.float(line.split()[ii+1]) # Read the actual d2E/dRdR matrix if Flag == 3: line = line.replace('D','E') tmp = line.split() self.IFC[int(tmp[0])-1,int(tmp[1])-1,int(tmp[2])-1,int(tmp[3])-1] = \ complex(float(tmp[4]),float(tmp[5])) # Read the current Q-point if line.find('qpt') > -1: line = line.replace('D','E') tmp = line.split() self.iqpt = [N.float(tmp[1]),N.float(tmp[2]),N.float(tmp[3])] Flag = 3 self.IFC = zeros((3,self.natom,3,self.natom),dtype=complex) ################################################# # Usefull definition to avoid code duplications # ################################################# def compute_dynmat(DDB): # Retrive the amu for each atom amu = zeros(DDB.natom) for ii in N.arange(DDB.natom): jj = DDB.typat[ii] amu[ii] = DDB.amu[jj-1] # Calcul of gprimd from rprimd rprimd = DDB.rprim*DDB.acell gprimd = N.linalg.inv(N.matrix(rprimd)) # Transform from 2nd-order matrix (non-cartesian coordinates, # masses not included, asr not included ) from DDB to # dynamical matrix, in cartesian coordinates, asr not imposed. IFC_cart = zeros((3,DDB.natom,3,DDB.natom),dtype=complex) for ii in N.arange(DDB.natom): for jj in N.arange(DDB.natom): for dir1 in N.arange(3): for dir2 in N.arange(3): for dir3 in N.arange(3): for dir4 in N.arange(3): IFC_cart[dir1,ii,dir2,jj] += gprimd[dir1,dir3]*DDB.IFC[dir3,ii,dir4,jj] \ *gprimd[dir2,dir4] # Reduce the 4 dimensional IFC_cart matrice to 2 dimensional Dynamical matrice. ipert1 = 0 Dyn_mat = zeros((3*DDB.natom,3*DDB.natom),dtype=complex) while ipert1 < 3*DDB.natom: for ii in N.arange(DDB.natom): for dir1 in N.arange(3): ipert2 = 0 while ipert2 < 3*DDB.natom: for jj in N.arange(DDB.natom): for dir2 in N.arange(3): Dyn_mat[ipert1,ipert2] = IFC_cart[dir1,ii,dir2,jj]*(5.4857990965007152E-4)/ \ N.sqrt(amu[ii]*amu[jj]) ipert2 += 1 ipert1 += 1 # Hermitianize the dynamical matrix dynmat = N.matrix(Dyn_mat) dynmat = 0.5*(dynmat + dynmat.transpose().conjugate()) # Solve the eigenvalue problem with linear algebra (Diagonalize the matrix) [eigval,eigvect]=N.linalg.eigh(Dyn_mat) # Orthonormality relation ipert = 0 for ii in N.arange(DDB.natom): for dir1 in N.arange(3): eigvect[ipert] = (eigvect[ipert])*N.sqrt(5.4857990965007152E-4/amu[ii]) ipert += 1 kk = 0 for jj in eigval: if jj < 0.0: print "WARNING: An eigenvalue is negative with value: ",jj," ... but proceed with value 0.0" eigval[kk] = 0.0 kk += 1 else: kk += 1 omega = N.sqrt(eigval) #*5.4857990965007152E-4) # print "omega",omega # The acoustic phonon at Gamma should NOT contribute because they should be zero. # Moreover with the translational invariance the ZPM will be 0 anyway for these # modes but the FAN and DDW will have a non physical value. We should therefore # neglect these values. # if N.allclose(DDB.iqpt,[0.0,0.0,0.0]) == True: # omega[0] = 0.0 # omega[1] = 0.0 # omega[2] = 0.0 return omega,eigvect,gprimd # ----------------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------- def get_reduced_displ(natom,eigvect,omega,gprimd): displ_FAN = zeros((3,3),dtype=complex) displ_DDW = zeros((3,3),dtype=complex) displ_red_FAN2 = zeros((3*natom,natom,natom,3,3),dtype=complex) displ_red_DDW2 = zeros((3*natom,natom,natom,3,3),dtype=complex) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) if omega[imode].real > tol6: for iatom1 in N.arange(natom): for iatom2 in N.arange(natom): for idir1 in N.arange(0,3): for idir2 in N.arange(0,3): displ_FAN[idir1,idir2] = eigvect[3*iatom2+idir2,imode].conj()\ *eigvect[3*iatom1+idir1,imode]/(2.0*omega[imode].real) displ_DDW[idir1,idir2] = (eigvect[3*iatom2+idir2,imode].conj()\ *eigvect[3*iatom2+idir1,imode]+eigvect[3*iatom1+idir2,imode].conj()\ *eigvect[3*iatom1+idir1,imode])/(4.0*omega[imode].real) # Now switch to reduced coordinates in 2 steps (more efficient) tmp_displ_FAN = zeros((3,3),dtype=complex) tmp_displ_DDW = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): tmp_displ_FAN[:,idir1] = tmp_displ_FAN[:,idir1]+displ_FAN[:,idir2]*gprimd[idir2,idir1] tmp_displ_DDW[:,idir1] = tmp_displ_DDW[:,idir1]+displ_DDW[:,idir2]*gprimd[idir2,idir1] displ_red_FAN = zeros((3,3),dtype=complex) displ_red_DDW = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): displ_red_FAN[idir1,:] = displ_red_FAN[idir1,:] + tmp_displ_FAN[idir2,:]*gprimd[idir2,idir1] displ_red_DDW[idir1,:] = displ_red_DDW[idir1,:] + tmp_displ_DDW[idir2,:]*gprimd[idir2,idir1] displ_red_FAN2[imode,iatom1,iatom2,:,:] = displ_red_FAN[:,:] displ_red_DDW2[imode,iatom1,iatom2,:,:] = displ_red_DDW[:,:] return displ_red_FAN2,displ_red_DDW2 # ---------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------------- def make_average(nkpt,nband,degen,total_corr,temp=False): if temp: for ikpt in N.arange(nkpt): count = 0 iband = 0 while iband < nband: if iband < nband-2: if ((degen[ikpt,iband] == degen[ikpt,iband+1]) and (degen[ikpt,iband] == degen[ikpt,iband+2])): total_corr[:,:,ikpt,iband] = (total_corr[:,:,ikpt,iband]+total_corr[:,:,ikpt,iband+1]+total_corr[:,:,ikpt,iband+2])/3 total_corr[:,:,ikpt,iband+1] = total_corr[:,:,ikpt,iband] total_corr[:,:,ikpt,iband+2] = total_corr[:,:,ikpt,iband] iband += 3 continue if iband < nband-1: if (degen[ikpt,iband] == degen[ikpt,iband+1]): total_corr[:,:,ikpt,iband] = (total_corr[:,:,ikpt,iband]+total_corr[:,:,ikpt,iband+1])/2 total_corr[:,:,ikpt,iband+1]=total_corr[:,:,ikpt,iband] iband +=2 continue iband += 1 else: for ikpt in N.arange(nkpt): count = 0 iband = 0 while iband < nband: if iband < nband-2: if ((degen[ikpt,iband] == degen[ikpt,iband+1]) and (degen[ikpt,iband] == degen[ikpt,iband+2])): total_corr[:,ikpt,iband] = (total_corr[:,ikpt,iband]+total_corr[:,ikpt,iband+1]+\ total_corr[:,ikpt,iband+2])/3 total_corr[:,ikpt,iband+1] = total_corr[:,ikpt,iband] total_corr[:,ikpt,iband+2] = total_corr[:,ikpt,iband] iband += 3 continue if iband < nband-1: if (degen[ikpt,iband] == degen[ikpt,iband+1]): total_corr[:,ikpt,iband] = (total_corr[:,ikpt,iband]+total_corr[:,ikpt,iband+1])/2 total_corr[:,ikpt,iband+1]=total_corr[:,ikpt,iband] iband +=2 continue iband += 1 return total_corr # --------------------------------------------------------------------------------------------------------- # --------------------------------------------------------------------------------------------------------- def get_bose(natom,omega,temp_info): bose = N.array(zeros((3*natom,len(temp_info)))) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) if omega[imode].real > tol6: tt = 0 for T in temp_info: if T < tol6: bose[imode,tt] = 0.0 else: bose[imode,tt] = 1.0/(N.exp(omega[imode].real/(kb_HaK*T))-1) tt += 1 #print bose[:,0] return bose ##################### # Compute temp. dep # ##################### # Compute the static ZPR only def static_zpm(arguments,ddw_save,degen): nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files = arguments DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) # Get reduced displacement (scaled with frequency) displ_red_FAN2,displ_red_DDW2 = get_reduced_displ(EIGR2D.natom,eigvect,omega,gprimd) # Einstein sum make the vector matrix multiplication ont the correct indices fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIGR2D.EIG2D,displ_red_FAN2) ddw_corrQ = N.einsum('ijklmn,olnkm->oij',ddw_save,displ_red_DDW2) fan_corr = N.sum(fan_corrQ,axis=0) ddw_corr = N.sum(ddw_corrQ,axis=0) eigen_corr = (fan_corr[:,:]- ddw_corr[:,:])*wtq total_corr[0,:,:] = eigen_corr[:,:] total_corr[1,:,:] = fan_corr[:,:]*wtq total_corr[2,:,:] = ddw_corr[:,:]*wtq total_corr = make_average(EIGR2D.nkpt,EIGR2D.nband,degen,total_corr) return total_corr ############################################################################################################### # Compute the static ZPR with temperature-dependence def static_zpm_temp(arguments,ddw_save,temp_info,degen): sys.stdout.flush() nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files = arguments DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) fan_corr = zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) ddw_corr = zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) bose = get_bose(EIGR2D.natom,omega,temp_info) # Get reduced displacement (scaled with frequency) displ_red_FAN2,displ_red_DDW2 = get_reduced_displ(EIGR2D.natom,eigvect,omega,gprimd) fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIGR2D.EIG2D,displ_red_FAN2) ddw_corrQ = N.einsum('ijklmn,olnkm->oij',ddw_save,displ_red_DDW2) fan_corr = N.einsum('ijk,il->ljk',fan_corrQ,2*bose+1.0) ddw_corr = N.einsum('ijk,il->ljk',ddw_corrQ,2*bose+1.0) eigen_corr = (fan_corr[:,:,:]- ddw_corr[:,:,:])*wtq total_corr[0,:,:,:] = eigen_corr[:,:,:] total_corr[1,:,:,:] = fan_corr[:,:,:]*wtq total_corr[2,:,:,:] = ddw_corr[:,:,:]*wtq make_average(EIGR2D.nkpt,EIGR2D.nband,degen,total_corr,temp=True) return total_corr ######################################################################################################### # Compute the dynamical ZPR only def dynamic_zpm(arguments,ddw_save,ddw_save2,type,smearing,eig0,degen): nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files = arguments FANterm = system(directory='.',filename=FAN_files) FAN = FANterm.FAN DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) fan_corr = zeros((EIGR2D.nkpt,EIGR2D.nband),dtype=complex) ddw_corr = zeros((EIGR2D.nkpt,EIGR2D.nband),dtype=complex) fan_add = zeros((EIGR2D.nkpt,EIGR2D.nband),dtype=complex) ddw_add = zeros((EIGR2D.nkpt,EIGR2D.nband),dtype=complex) # Get reduced displacement (scaled with frequency) displ_red_FAN2,displ_red_DDW2 = get_reduced_displ(EIGR2D.natom,eigvect,omega,gprimd) # Einstein sum make the vector matrix multiplication ont the correct indices # fan_corrQ and ddw_corrQ contains the ZPR on stern space. fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIGR2D.EIG2D,displ_red_FAN2) ddw_corrQ = N.einsum('ijklmn,olnkm->oij',ddw_save,displ_red_DDW2) fan_corr = N.sum(fan_corrQ,axis=0) ddw_corr = N.sum(ddw_corrQ,axis=0) print "Now compute active space ..." # Now computa active space fan_addQ = N.einsum('ijklmno,plnkm->ijop',FAN,displ_red_FAN2) ddw_addQ = N.einsum('ijklmno,plnkm->ijop',ddw_save2,displ_red_DDW2) if type == 3: fan_tmp = N.sum(fan_addQ,axis=3) ddw_tmp = N.sum(ddw_addQ,axis=3) delta_E = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eigq.EIG[0,:,:].real,N.ones(EIGR2D.nband)) # ikpt,iband,jband delta_E_ddw = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eig0[0,:,:].real,N.ones(EIGR2D.nband)) div = delta_E/(delta_E**2 +smearing**2) # ikpt,iband,jband fan_add = N.einsum('ijk,ijk->ij',fan_tmp,div) #(ikpt,iband,jband,imode),(ikpt,iband,jband)->ikpt,iband div = delta_E_ddw/(delta_E_ddw**2 +smearing**2) # ikpt,iband,jband ddw_add = N.einsum('ijk,ijk->ij',ddw_tmp,div) #(ikpt,iband,jband),(ikpt,iband,jband)->ikpt,iband if type == 2: ddw_tmp = N.sum(ddw_addQ,axis=3) occtmp = EIGR2D.occ[0,0,:]/2 # jband delta_E = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eigq.EIG[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ijk',N.ones((EIGR2D.nkpt,EIGR2D.nband)),(2*occtmp-1))*smearing*1j # ikpt,iband,jband delta_E_ddw = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ijk',N.ones((EIGR2D.nkpt,EIGR2D.nband)),(2*occtmp-1))*smearing*1j ddw_add = N.einsum('ijk,ijk->ij',ddw_tmp,1.0/delta_E_ddw) omegatmp = omega[:].real # imode num1 = 1.0-occtmp # jband deno1 = N.einsum('ijk,l->ijkl',delta_E,N.ones(3*EIGR2D.natom)) \ - N.einsum('ijk,l->ijkl',N.ones((EIGR2D.nkpt,EIGR2D.nband,EIGR2D.nband)),omegatmp) #ikpt,iband,jband,imode div1 = N.einsum('i,jkil->lijk',num1,1.0/deno1) # (jband)/(ikpt,iband,jband,imode) ==> imode,jband,ikpt,iband deno2 = N.einsum('ijk,l->ijkl',delta_E,N.ones(3*EIGR2D.natom)) \ + N.einsum('ijk,l->ijkl',N.ones((EIGR2D.nkpt,EIGR2D.nband,EIGR2D.nband)),omegatmp) #ikpt,iband,jband,imode div2 = N.einsum('i,jkil->lijk',occtmp,1.0/deno2) # (jband)/(ikpt,iband,jband,imode) ==> imode,jband,ikpt,iband fan_add = N.einsum('ijkl,lkij->ij',fan_addQ,div1+div2) # ikpt,iband,jband,imod # if type == 3: # fan_tmp = N.sum(fan_addQ,axis=3) # ddw_tmp = N.sum(ddw_addQ,axis=3) # if type == 2: # ddw_tmp = N.sum(ddw_addQ,axis=3) # # for ikpt in N.arange(EIGR2D.nkpt): # for iband in N.arange(EIGR2D.nband): # for jband in N.arange(EIGR2D.nband): # if type == 3: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real # fan_add[ikpt,iband] += fan_tmp[ikpt,iband,jband]*(delta_E/(delta_E**2+smearing**2)) # ddw_add[ikpt,iband] += ddw_tmp[ikpt,iband,jband]*(delta_E_ddw/(delta_E_ddw**2+smearing**2)) # if type == 2: # occtmp = EIGR2D.occ[0,0,jband]/2 # electronic occ should be 1 # if occtmp > tol6: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real - smearing*1j # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real - smearing*1j # else: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real + smearing*1j # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real + smearing*1j # # DW is not affected by the dynamical equations # ddw_add[ikpt,iband] += ddw_tmp[ikpt,iband,jband]*(1.0/delta_E_ddw) # for imode in N.arange(3*EIGR2D.natom): # omegatmp = omega[imode].real # fan_add[ikpt,iband] += fan_addQ[ikpt,iband,jband,imode]*(\ # (1.0-occtmp)/(delta_E-omegatmp) + (occtmp)/(delta_E+omegatmp)) # Correction from active space fan_corr += fan_add ddw_corr += ddw_add eigen_corr = (fan_corr[:,:] - ddw_corr[:,:])*wtq total_corr[0,:,:] = eigen_corr[:,:] total_corr[1,:,:] = fan_corr[:,:]*wtq total_corr[2,:,:] = ddw_corr[:,:]*wtq total_corr = make_average(EIGR2D.nkpt,EIGR2D.nband,degen,total_corr) return total_corr ######################################################################################################### # Compute the dynamical ZPR with temperature dependence def dynamic_zpm_temp(arguments,ddw_save,ddw_save2,type,temp_info,smearing,eig0,degen): nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files = arguments FANterm = system(directory='.',filename=FAN_files) FAN = FANterm.FAN DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) # Compute the displacement = eigenvectors of the DDB. # Due to metric problem in reduce coordinate we have to work in cartesian # but then go back to reduce because our EIGR2D matrix elements are in reduced coord. fan_corr = zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) ddw_corr = zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) fan_add = N.array(zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex)) ddw_add = N.array(zeros((len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex)) bose = get_bose(EIGR2D.natom,omega,temp_info) # Get reduced displacement (scaled with frequency) displ_red_FAN2,displ_red_DDW2 = get_reduced_displ(EIGR2D.natom,eigvect,omega,gprimd) # Einstein sum make the vector matrix multiplication ont the correct indices fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIGR2D.EIG2D,displ_red_FAN2) ddw_corrQ = N.einsum('ijklmn,olnkm->oij',ddw_save,displ_red_DDW2) fan_corr = N.einsum('ijk,il->ljk',fan_corrQ,2*bose+1.0) ddw_corr = N.einsum('ijk,il->ljk',ddw_corrQ,2*bose+1.0) print "Now compute active space ..." # Now compute active space fan_addQ = N.einsum('ijklmno,plnkm->ijop',FAN,displ_red_FAN2) ddw_addQ = N.einsum('ijklmno,plnkm->ijop',ddw_save2,displ_red_DDW2) if type == 2: occtmp = EIGR2D.occ[0,0,:]/2 # jband delta_E_ddw = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ijk',N.ones((EIGR2D.nkpt,EIGR2D.nband)),(2*occtmp-1))*smearing*1j tmp = N.einsum('ijkl,lm->mijk',ddw_addQ,2*bose+1.0) # tmp,ikpt,iband,jband ddw_add = N.einsum('ijkl,jkl->ijk',tmp,1.0/delta_E_ddw) delta_E = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eigq.EIG[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ijk',N.ones((EIGR2D.nkpt,EIGR2D.nband)),(2*occtmp-1))*smearing*1j # ikpt,iband,jband omegatmp = omega[:].real # imode num1 = N.einsum('ij,k->ijk',bose,N.ones(EIGR2D.nband)) +1.0 \ - N.einsum('ij,k->ijk',N.ones((3*EIGR2D.natom,len(temp_info))),occtmp) #imode,tmp,jband deno1 = N.einsum('ijk,l->ijkl',delta_E,N.ones(3*EIGR2D.natom)) \ - N.einsum('ijk,l->ijkl',N.ones((EIGR2D.nkpt,EIGR2D.nband,EIGR2D.nband)),omegatmp) #ikpt,iband,jband,imode div1 = N.einsum('ijk,lmki->ijklm',num1,1.0/deno1) # (imode,tmp,jband)/(ikpt,iband,jband,imode) ==> imode,tmp,jband,ikpt,iband num2 = N.einsum('ij,k->ijk',bose,N.ones(EIGR2D.nband)) \ + N.einsum('ij,k->ijk',N.ones((3*EIGR2D.natom,len(temp_info))),occtmp) #imode,tmp,jband deno2 = N.einsum('ijk,l->ijkl',delta_E,N.ones(3*EIGR2D.natom)) \ + N.einsum('ijk,l->ijkl',N.ones((EIGR2D.nkpt,EIGR2D.nband,EIGR2D.nband)),omegatmp) #ikpt,iband,jband,imode div2 = N.einsum('ijk,lmki->ijklm',num2,1.0/deno2) # (imode,tmp,jband)/(ikpt,iband,jband,imode) ==> imode,tmp,jband,ikpt,iband fan_add = N.einsum('ijkl,lmkij->mij',fan_addQ,div1+div2) # ikpt,iband,jband,imode if type ==3: delta_E = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eigq.EIG[0,:,:].real,N.ones(EIGR2D.nband)) # ikpt,iband,jband delta_E_ddw = N.einsum('ij,k->ijk',eig0[0,:,:].real,N.ones(EIGR2D.nband)) - \ N.einsum('ij,k->ikj',eig0[0,:,:].real,N.ones(EIGR2D.nband)) num = N.einsum('ij,klm->ijklm',2*bose+1.0,delta_E) # imode,tmp,ikpt,iband,jband deno = delta_E**2 +smearing**2 # ikpt,iband,jband div = N.einsum('ijklm,klm->ijklm',num,1.0/deno) # imode,tmp,ikpt,iband,jband fan_add = N.einsum('ijkl,lmijk->mij',fan_addQ,div) #(ikpt,iband,jband,imode),(imode,tmp,ikpt,iband,jband)->tmp,ikpt,iband num = N.einsum('ij,klm->ijklm',2*bose+1.0,delta_E_ddw) # imode,tmp,ikpt,iband,jband deno = delta_E_ddw**2 +smearing**2 # ikpt,iband,jband div = N.einsum('ijklm,klm->ijklm',num,1.0/deno) ddw_add = N.einsum('ijkl,lmijk->mij',ddw_addQ,div) #(ikpt,iband,jband,imode),(imode,tmp,ikpt,iband,jband)->tmp,ikpt,iband # The code above corresponds to the following loops: # for ikpt in N.arange(EIGR2D.nkpt): # for iband in N.arange(EIGR2D.nband): # for jband in N.arange(EIGR2D.nband): # if type == 2: # occtmp = EIGR2D.occ[0,0,jband]/2 # electronic occ should be 1 # if occtmp > tol6: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real - smearing*1j # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real - smearing*1j # else: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real + smearing*1j # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real + smearing*1j # for imode in N.arange(3*EIGR2D.natom): # # DW is not affected by the dynamical equations # ddw_add[:,ikpt,iband] += ddw_addQ[ikpt,iband,jband,imode]*(2*bose[imode,:]+1.0)\ # *(1.0/delta_E_ddw) # omegatmp = omega[imode].real # fan_add[:,ikpt,iband] += fan_addQ[ikpt,iband,jband,imode]*(\ # (bose[imode,:]+1.0-occtmp)/(delta_E-omegatmp) \ # + (bose[imode,:]+occtmp)/(delta_E+omegatmp)) # if type == 3: # delta_E = eig0[0,ikpt,iband].real-eigq.EIG[0,ikpt,jband].real # delta_E_ddw = eig0[0,ikpt,iband].real-eig0[0,ikpt,jband].real # for imode in N.arange(3*EIGR2D.natom): # fan_add[:,ikpt,iband] += fan_addQ[ikpt,iband,jband,imode]*(2*bose[imode,:]+1.0)\ # *(delta_E/(delta_E**2+smearing**2)) # ddw_add[:,ikpt,iband] += ddw_addQ[ikpt,iband,jband,imode]*(2*bose[imode,:]+1.0)\ # *(delta_E_ddw/(delta_E_ddw**2+smearing**2)) # fan_corr += fan_add ddw_corr += ddw_add eigen_corr = (fan_corr[:,:,:] - ddw_corr[:,:,:])*wtq total_corr[0,:,:,:] = eigen_corr[:,:,:] total_corr[1,:,:,:] = fan_corr[:,:,:]*wtq total_corr[2,:,:,:] = ddw_corr[:,:,:]*wtq total_corr = make_average(EIGR2D.nkpt,EIGR2D.nband,degen,total_corr,temp=True) return total_corr ######################################################################################################### ############ # LIFETIME # ############ ######################################################################################################## # Compute the static ZPR only with lifetime def static_zpm_lifetime(arguments,degen): nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files = arguments DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) # For efficiency it is beter not to call a function EIG2D = EIGR2D.EIG2D nkpt = EIGR2D.nkpt nband = EIGR2D.nband natom = EIGR2D.natom # Compute the displacement = eigenvectors of the DDB. # Due to metric problem in reduce coordinate we have to work in cartesian # but then go back to reduce because our EIGR2D matrix elements are in reduced coord. displ_FAN = zeros((3,3),dtype=complex) broadening = zeros((nkpt,nband),dtype=complex) displ_red_FAN2 = zeros((3*natom,natom,natom,3,3),dtype=complex) displ_red_DDW2 = zeros((3*natom,natom,natom,3,3),dtype=complex) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) if omega[imode].real > tol6: for iatom1 in N.arange(natom): for iatom2 in N.arange(natom): for idir1 in N.arange(0,3): for idir2 in N.arange(0,3): displ_FAN[idir1,idir2] = eigvect[3*iatom2+idir2,imode].conj()\ *eigvect[3*iatom1+idir1,imode]/(2.0*omega[imode].real) # Now switch to reduced coordinates in 2 steps (more efficient) tmp_displ_FAN = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): tmp_displ_FAN[:,idir1] = tmp_displ_FAN[:,idir1]+displ_FAN[:,idir2]*gprimd[idir2,idir1] displ_red_FAN = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): displ_red_FAN[idir1,:] = displ_red_FAN[idir1,:] + tmp_displ_FAN[idir2,:]*gprimd[idir2,idir1] displ_red_FAN2[imode,iatom1,iatom2,:,:] = displ_red_FAN[:,:] # Einstein sum make the vector matrix multiplication ont the correct indices fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIG2D,displ_red_FAN2) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) broadening[:,:] += N.pi*fan_corrQ[imode,:,:] broadening = broadening*wtq if N.any(broadening[:,:].imag > 1E-12): print "WARNING: The real part of the broadening is non zero." print broadening for ikpt in N.arange(nkpt): count = 0 iband = 0 while iband < nband: if iband < nband-2: if ((degen[ikpt,iband] == degen[ikpt,iband+1]) and (degen[ikpt,iband] == degen[ikpt,iband+2])): broadening[ikpt,iband] = (broadening[ikpt,iband]+broadening[ikpt,iband+1]+broadening[ikpt,iband+2])/3 broadening[ikpt,iband+1] = broadening[ikpt,iband] broadening[ikpt,iband+2] = broadening[ikpt,iband] iband += 3 continue if iband < nband-1: if (degen[ikpt,iband] == degen[ikpt,iband+1]): broadening[ikpt,iband] = (broadening[ikpt,iband]+broadening[ikpt,iband+1])/2 broadening[ikpt,iband+1]= broadening[ikpt,iband] iband +=2 continue iband += 1 return broadening ############################################################################################################### # Compute the static ZPR with temperature-dependence with lifetime def static_zpm_temp_lifetime(arguments,ddw_save,temp_info,degen): nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files = arguments DDB = system(directory='.',filename=DDB_files) EIGR2D = system(directory='.',filename=EIGR2D_files) total_corr = zeros((3,len(temp_info),EIGR2D.nkpt,EIGR2D.nband),dtype=complex) eigq = system(directory='.',filename=eigq_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] # Current Q-point calculated print "Q-point: ",nbqpt," with wtq =",wtq," and reduced coord.",EIGR2D.iqpt current = multiprocessing.current_process() file_name = str('PYLOG_')+str(current.pid) if os.path.isfile(file_name) : with open(file_name,'a') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") else: with open(file_name,'w') as F: F.write("Q-point: "+str(nbqpt)+" with wtq ="+str(wtq)+" and reduced coord."+str(EIGR2D.iqpt)+"\n") # Find phonon freq and eigendisplacement from _DDB omega,eigvect,gprimd=compute_dynmat(DDB) # For efficiency it is beter not to call a function EIG2D = EIGR2D.EIG2D nkpt = EIGR2D.nkpt nband = EIGR2D.nband natom = EIGR2D.natom # Compute the displacement = eigenvectors of the DDB. # Due to metric problem in reduce coordinate we have to work in cartesian # but then go back to reduce because our EIGR2D matrix elements are in reduced coord. displ_FAN = zeros((3,3),dtype=complex) displ_red_FAN2 = zeros((3*natom,natom,natom,3,3),dtype=complex) broadening = zeros((len(temp_info),nkpt,nband),dtype=complex) bose = get_bose(EIGR2D.natom,omega,temp_info) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) if omega[imode].real > tol6: for iatom1 in N.arange(natom): for iatom2 in N.arange(natom): for idir1 in N.arange(0,3): for idir2 in N.arange(0,3): displ_FAN[idir1,idir2] = eigvect[3*iatom2+idir2,imode].conj()\ *eigvect[3*iatom1+idir1,imode]/(2.0*omega[imode].real) # Now switch to reduced coordinates in 2 steps (more efficient) tmp_displ_FAN = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): tmp_displ_FAN[:,idir1] = tmp_displ_FAN[:,idir1]+displ_FAN[:,idir2]*gprimd[idir2,idir1] displ_red_FAN = zeros((3,3),dtype=complex) for idir1 in N.arange(3): for idir2 in N.arange(3): displ_red_FAN[idir1,:] = displ_red_FAN[idir1,:] + tmp_displ_FAN[idir2,:]*gprimd[idir2,idir1] displ_red_FAN2[imode,iatom1,iatom2,:,:] = displ_red_FAN[:,:] fan_corrQ = N.einsum('ijklmn,olnkm->oij',EIG2D,displ_red_FAN2) for imode in N.arange(3*natom): #Loop on perturbation (6 for 2 atoms) tt = 0 for T in temp_info: broadening[tt,:,:] += N.pi*fan_corrQ[imode,:,:]*(2*bose[imode,tt]+1.0) tt += 1 broadening = broadening*wtq for ikpt in N.arange(nkpt): count = 0 iband = 0 while iband < nband: if iband < nband-2: if ((degen[ikpt,iband] == degen[ikpt,iband+1]) and (degen[ikpt,iband] == degen[ikpt,iband+2])): broadening[:,ikpt,iband] = (broadening[:,ikpt,iband]+broadening[:,ikpt,iband+1]+broadening[:,ikpt,iband+2])/3 broadening[:,ikpt,iband+1] = broadening[:,ikpt,iband] broadening[:,ikpt,iband+2] = broadening[:,ikpt,iband] iband += 3 continue if iband < nband-1: if (degen[ikpt,iband] == degen[ikpt,iband+1]): broadening[:,ikpt,iband] = (broadening[:,ikpt,iband]+broadening[:,ikpt,iband+1])/2 broadening[:,ikpt,iband+1] = broadening[:,ikpt,iband] iband +=2 continue iband += 1 return broadening ######################################################################################################### # Compute total weigth def compute_wtq(arguments,type): if type ==1: nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files = arguments if type == 2 or type == 3: nbqpt,wtq,eigq_files,DDB_files,EIGR2D_files,FAN_files = arguments EIGR2D = system(directory='.',filename=EIGR2D_files) # If the calculation is on a Homogenous q-point mesh # retreve the weight of the q-point if (wtq == 0): wtq = EIGR2D.wtq wtq = wtq[0] return wtq class zpm: total_corr = None def __init__(self,arguments,ddw_save,ddw_save2,nb_cpus,type,temperature,temp_info,smearing,eig0,degen,lifetime): # Parallelize the work over cpus pool = multiprocessing.Pool(processes=nb_cpus) if not lifetime: partial_compute_wtq = partial(compute_wtq,type=type) total = pool.map(partial_compute_wtq,arguments) self.total_wtq = sum(total) # TYPE 1 if (type == 1 and not temperature): if lifetime: partial_static_zpm_lifetime = partial(static_zpm_lifetime,degen=degen) total = pool.map(partial_static_zpm_lifetime,arguments) self.broadening = sum(total) else: partial_static_zpm = partial(static_zpm,ddw_save=ddw_save,degen=degen) total = pool.map(partial_static_zpm,arguments) self.total_corr = sum(total) if (type == 1 and temperature): if lifetime: partial_static_zpm_temp_lifetime = partial(static_zpm_temp_lifetime,ddw_save=ddw_save,temp_info=temp_info,degen=degen) total = pool.map(partial_static_zpm_temp_lifetime,arguments) self.broadening = sum(total) else: partial_static_zpm_temp = partial(static_zpm_temp,ddw_save=ddw_save,temp_info=temp_info,degen=degen) total = pool.map(partial_static_zpm_temp,arguments) self.total_corr = sum(total) # TYPE 2 if (type == 2 and not temperature): if lifetime: partial_static_zpm_lifetime = partial(static_zpm_lifetime,degen=degen) total = pool.map(partial_static_zpm_lifetime,arguments) self.broadening = sum(total) else: partial_dynamic_zpm = partial(dynamic_zpm,ddw_save=ddw_save,ddw_save2=ddw_save2,type=type,smearing=smearing,eig0=eig0,degen=degen) total = pool.map(partial_dynamic_zpm,arguments) self.total_corr = sum(total) if (type == 2 and temperature): if lifetime: partial_static_zpm_temp_lifetime = partial(static_zpm_temp_lifetime,ddw_save=ddw_save,temp_info=temp_info,degen=degen) total = pool.map(partial_static_zpm_temp_lifetime,arguments) self.broadening = sum(total) else: partial_dynamic_zpm_temp = partial(dynamic_zpm_temp,ddw_save=ddw_save,ddw_save2=ddw_save2,type=type,temp_info=temp_info,\ smearing=smearing,eig0=eig0,degen=degen) total = pool.map(partial_dynamic_zpm_temp,arguments) self.total_corr = sum(total) # TYPE 3 if (type == 3 and not temperature): if lifetime: partial_static_zpm_lifetime = partial(static_zpm_lifetime,degen=degen) total = pool.map(partial_static_zpm_lifetime,arguments) self.broadening = sum(total) else: partial_dynamic_zpm = partial(dynamic_zpm,ddw_save=ddw_save,ddw_save2=ddw_save2,type=type,smearing=smearing,eig0=eig0,degen=degen) total = pool.map(partial_dynamic_zpm,arguments) self.total_corr = sum(total) if (type == 3 and temperature): if lifetime: partial_static_zpm_temp_lifetime = partial(static_zpm_temp_lifetime,ddw_save=ddw_save,temp_info=temp_info,degen=degen) total = pool.map(partial_static_zpm_temp_lifetime,arguments) self.broadening = sum(total) else: partial_dynamic_zpm_temp = partial(dynamic_zpm_temp,ddw_save=ddw_save,ddw_save2=ddw_save2,type=type,temp_info=temp_info,\ smearing=smearing,eig0=eig0,degen=degen) total = pool.map(partial_dynamic_zpm_temp,arguments) self.total_corr = sum(total) pool.close() pool.join()
jmbeuken/abinit
scripts/deprecated/rf_mods.py
Python
gpl-3.0
50,782
[ "NetCDF" ]
1f15c2eb92b00108328e2feafbc80c2c7c6b5ca1ea5e757dbf62e1f2787a8d33
#!/usr/bin/env python from optparse import OptionParser from ase.io import filetype from ase.io.aff import print_aff_info from ase.io.pickletrajectory import print_trajectory_info from ase.io.bundletrajectory import print_bundletrajectory_info description = 'Print summary of information from trajectory files.' def main(): p = OptionParser(usage='%prog file.traj [file2.traj ...]', description=description) opts, args = p.parse_args() if len(args) == 0: p.error('Incorrect number of arguments') for f in args: ft = filetype(f) print("File type of '{0}' appears to be of type '{1}'".format(f, ft)) if ft == 'traj': print_aff_info(f) elif tf == 'trj': print_trajectory_info(f) elif ft == 'bundle': print_bundletrajectory_info(f) else: p.error('%s is of type %s; cannot print info about this type of file' % f)
misdoro/python-ase
ase/cli/info.py
Python
gpl-2.0
957
[ "ASE" ]
dd7002767c77ccd2fe3256c5d3b36887fc0a9bf245202523d23ebc2652183383
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'EphysConceptMap.added_by_old' db.delete_column('neuroelectro_ephysconceptmap', 'added_by_old') # Deleting field 'NeuronEphysDataMap.added_by_old' db.delete_column('neuroelectro_neuronephysdatamap', 'added_by_old') # Deleting field 'NeuronConceptMap.added_by_old' db.delete_column('neuroelectro_neuronconceptmap', 'added_by_old') def backwards(self, orm): # Adding field 'EphysConceptMap.added_by_old' db.add_column('neuroelectro_ephysconceptmap', 'added_by_old', self.gf('django.db.models.fields.CharField')(default='robot', max_length=200), keep_default=False) # Adding field 'NeuronEphysDataMap.added_by_old' db.add_column('neuroelectro_neuronephysdatamap', 'added_by_old', self.gf('django.db.models.fields.CharField')(default='robot', max_length=200), keep_default=False) # Adding field 'NeuronConceptMap.added_by_old' db.add_column('neuroelectro_neuronconceptmap', 'added_by_old', self.gf('django.db.models.fields.CharField')(default='robot', max_length=200), keep_default=False) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'neuroelectro.article': { 'Meta': {'object_name': 'Article'}, 'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}), 'author_list_str': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}), 'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Author']", 'null': 'True', 'symmetrical': 'False'}), 'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}), 'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': "orm['neuroelectro.MetaData']", 'null': 'True', 'symmetrical': 'False'}), 'pmid': ('django.db.models.fields.IntegerField', [], {}), 'pub_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}), 'suggester': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['neuroelectro.User']", 'null': 'True'}), 'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.articlefulltext': { 'Meta': {'object_name': 'ArticleFullText'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}), 'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'neuroelectro.articlesummary': { 'Meta': {'object_name': 'ArticleSummary'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}), 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'neuroelectro.author': { 'Meta': {'object_name': 'Author'}, 'first': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'initials': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'last': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'middle': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}) }, 'neuroelectro.brainregion': { 'Meta': {'object_name': 'BrainRegion'}, 'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}), 'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'neuroelectro.datasource': { 'Meta': {'object_name': 'DataSource'}, 'data_table': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataTable']", 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user_submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.UserSubmission']", 'null': 'True'}), 'user_upload': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.UserUpload']", 'null': 'True'}) }, 'neuroelectro.datatable': { 'Meta': {'object_name': 'DataTable'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}), 'needs_expert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}) }, 'neuroelectro.ephysconceptmap': { 'Meta': {'object_name': 'EphysConceptMap'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}), 'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}), 'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'neuroelectro.ephysprop': { 'Meta': {'object_name': 'EphysProp'}, 'definition': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.EphysPropSyn']", 'symmetrical': 'False'}), 'units': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Unit']", 'null': 'True'}) }, 'neuroelectro.ephyspropsummary': { 'Meta': {'object_name': 'EphysPropSummary'}, 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_neurons': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'value_mean_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'value_mean_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'value_sd_articles': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'value_sd_neurons': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'neuroelectro.ephyspropsyn': { 'Meta': {'object_name': 'EphysPropSyn'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'neuroelectro.insituexpt': { 'Meta': {'object_name': 'InSituExpt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imageseriesid': ('django.db.models.fields.IntegerField', [], {}), 'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}), 'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'neuroelectro.institution': { 'Meta': {'object_name': 'Institution'}, 'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}) }, 'neuroelectro.journal': { 'Meta': {'object_name': 'Journal'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'short_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.mailinglistentry': { 'Meta': {'object_name': 'MailingListEntry'}, 'comments': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}) }, 'neuroelectro.meshterm': { 'Meta': {'object_name': 'MeshTerm'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.metadata': { 'Meta': {'object_name': 'MetaData'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'neuroelectro.neuron': { 'Meta': {'object_name': 'Neuron'}, 'added_by': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'}) }, 'neuroelectro.neuronarticlemap': { 'Meta': {'object_name': 'NeuronArticleMap'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}), 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']", 'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}), 'num_mentions': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'neuroelectro.neuronconceptmap': { 'Meta': {'object_name': 'NeuronConceptMap'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}), 'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}), 'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'neuroelectro.neuronephysdatamap': { 'Meta': {'object_name': 'NeuronEphysDataMap'}, 'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']", 'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'dt_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}), 'ephys_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysConceptMap']"}), 'err': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'match_quality': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'metadata': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MetaData']", 'symmetrical': 'False'}), 'n': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'neuron_concept_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.NeuronConceptMap']"}), 'ref_text': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.DataSource']"}), 'times_validated': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'val': ('django.db.models.fields.FloatField', [], {}), 'val_norm': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'neuroelectro.neuronephyssummary': { 'Meta': {'object_name': 'NeuronEphysSummary'}, 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'ephys_prop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.EphysProp']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}), 'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'value_mean': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'value_sd': ('django.db.models.fields.FloatField', [], {'null': 'True'}) }, 'neuroelectro.neuronsummary': { 'Meta': {'object_name': 'NeuronSummary'}, 'cluster_xval': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'cluster_yval': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'default': "''"}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'neuron': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Neuron']"}), 'num_articles': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_ephysprops': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'num_nedms': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'neuroelectro.neuronsyn': { 'Meta': {'object_name': 'NeuronSyn'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.protein': { 'Meta': {'object_name': 'Protein'}, 'allenid': ('django.db.models.fields.IntegerField', [], {}), 'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}), 'entrezid': ('django.db.models.fields.IntegerField', [], {}), 'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}), 'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'}) }, 'neuroelectro.proteinsyn': { 'Meta': {'object_name': 'ProteinSyn'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.regionexpr': { 'Meta': {'object_name': 'RegionExpr'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}), 'val': ('django.db.models.fields.FloatField', [], {}) }, 'neuroelectro.species': { 'Meta': {'object_name': 'Species'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.substance': { 'Meta': {'object_name': 'Substance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.unit': { 'Meta': {'object_name': 'Unit'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'prefix': ('django.db.models.fields.CharField', [], {'max_length': '1'}) }, 'neuroelectro.user': { 'Meta': {'object_name': 'User', '_ormbases': ['auth.User']}, 'assigned_neurons': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Neuron']", 'null': 'True', 'symmetrical': 'False'}), 'institution': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Institution']", 'null': 'True'}), 'is_curator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'lab_head': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}), 'lab_website_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}) }, 'neuroelectro.usersubmission': { 'Meta': {'object_name': 'UserSubmission'}, 'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']"}) }, 'neuroelectro.userupload': { 'Meta': {'object_name': 'UserUpload'}, 'data': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'date_mod': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.FilePathField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.User']"}) } } complete_apps = ['neuroelectro']
neuroelectro/neuroelectro_org
neuroelectro/south_migrations/0052_auto__del_field_ephysconceptmap_added_by_old__del_field_neuronephysdat.py
Python
gpl-2.0
27,484
[ "NEURON" ]
a25655366b89871dc12c9e7cffa0f90ace9ed8c0f8a023bfcddb37cfc9523f42
"""Plots spatially subset model evaluation.""" import os.path import warnings import argparse import numpy import matplotlib matplotlib.use('agg') from matplotlib import pyplot from mpl_toolkits.basemap import Basemap from generalexam.ge_utils import utils as ge_utils from gewittergefahr.gg_utils import general_utils from gewittergefahr.gg_utils import grids from gewittergefahr.gg_utils import projections from gewittergefahr.gg_utils import model_evaluation as model_eval from gewittergefahr.gg_utils import file_system_utils from gewittergefahr.gg_utils import error_checking from gewittergefahr.plotting import plotting_utils from gewittergefahr.plotting import imagemagick_utils SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n' LAMBERT_CONFORMAL_STRING = 'lcc' NUM_PARALLELS = 8 NUM_MERIDIANS = 6 RESOLUTION_STRING = 'l' BORDER_COLOUR = numpy.full(3, 0.) FIGURE_RESOLUTION_DPI = 300 FIGURE_WIDTH_INCHES = 15 FIGURE_HEIGHT_INCHES = 15 NUM_PANEL_ROWS = 3 NUM_PANEL_COLUMNS = 2 CONCAT_FIGURE_SIZE_PX = int(1e7) INPUT_DIR_ARG_NAME = 'input_dir_name' SMOOTHING_RADIUS_ARG_NAME = 'smoothing_radius_grid_cells' SCORE_CMAP_ARG_NAME = 'score_colour_map_name' NUM_EXAMPLES_CMAP_ARG_NAME = 'num_ex_colour_map_name' MAX_PERCENTILE_ARG_NAME = 'max_colour_percentile' OUTPUT_DIR_ARG_NAME = 'output_dir_name' INPUT_DIR_HELP_STRING = ( 'Name of input directory. Evaluation files therein will be found by ' '`model_evaluation.find_file` and read by ' '`model_evaluation.read_evaluation`.' ) SMOOTHING_RADIUS_HELP_STRING = ( 'e-folding radius for Gaussian smoother. If you do not want to smooth, ' 'leave this alone.' ) SCORE_CMAP_HELP_STRING = ( 'Name of colour map for scores (must be accepted by `pyplot.get_cmap`).' ) NUM_EXAMPLES_CMAP_HELP_STRING = ( 'Name of colour map for number of examples (must be accepted by ' '`pyplot.get_cmap`).' ) MAX_PERCENTILE_HELP_STRING = ( 'Used to determine min and max values in each colour map. Max value will ' 'be [q]th percentile over all grid cells, and min value will be [100 - q]th' ' percentile, where q = `{0:s}`.' ).format(MAX_PERCENTILE_ARG_NAME) OUTPUT_DIR_HELP_STRING = ( 'Name of output directory. Figures will be saved here.' ) INPUT_ARG_PARSER = argparse.ArgumentParser() INPUT_ARG_PARSER.add_argument( '--' + INPUT_DIR_ARG_NAME, type=str, required=True, help=INPUT_DIR_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + SMOOTHING_RADIUS_ARG_NAME, type=float, required=False, default=-1, help=SMOOTHING_RADIUS_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + SCORE_CMAP_ARG_NAME, type=str, required=False, default='plasma', help=SCORE_CMAP_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + NUM_EXAMPLES_CMAP_ARG_NAME, type=str, required=False, default='viridis', help=NUM_EXAMPLES_CMAP_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + MAX_PERCENTILE_ARG_NAME, type=float, required=False, default=99., help=MAX_PERCENTILE_HELP_STRING ) INPUT_ARG_PARSER.add_argument( '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True, help=OUTPUT_DIR_HELP_STRING ) def _get_lcc_params(projection_object): """Finds parameters for LCC (Lambert conformal conic) projection. :param projection_object: Instance of `pyproj.Proj`. :return: standard_latitudes_deg: length-2 numpy array of standard latitudes (deg N). :return: central_longitude_deg: Central longitude (deg E). :raises: ValueError: if projection is not LCC. """ projection_string = projection_object.srs words = projection_string.split() property_names = [w.split('=')[0][1:] for w in words] property_values = [w.split('=')[1] for w in words] projection_dict = dict(list( zip(property_names, property_values) )) if projection_dict['proj'] != LAMBERT_CONFORMAL_STRING: error_string = 'Grid projection should be "{0:s}", not "{1:s}".'.format( LAMBERT_CONFORMAL_STRING, projection_dict['proj'] ) raise ValueError(error_string) central_longitude_deg = float(projection_dict['lon_0']) standard_latitudes_deg = numpy.array([ float(projection_dict['lat_1']), float(projection_dict['lat_2']) ]) return standard_latitudes_deg, central_longitude_deg def _get_basemap(grid_metadata_dict): """Creates basemap. M = number of rows in grid M = number of columns in grid :param grid_metadata_dict: Dictionary returned by `grids.read_equidistant_metafile`. :return: basemap_object: Basemap handle (instance of `mpl_toolkits.basemap.Basemap`). :return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under Basemap projection (different than pyproj projection). :return: basemap_y_matrix_metres: Same but for y-coordinates. """ x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices( x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY], y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY] ) projection_object = grid_metadata_dict[grids.PROJECTION_KEY] latitude_matrix_deg, longitude_matrix_deg = ( projections.project_xy_to_latlng( x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres, projection_object=projection_object) ) standard_latitudes_deg, central_longitude_deg = _get_lcc_params( projection_object) basemap_object = Basemap( projection='lcc', lat_1=standard_latitudes_deg[0], lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg, rsphere=projections.DEFAULT_EARTH_RADIUS_METRES, ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING, llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0], urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1] ) basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object( longitude_matrix_deg, latitude_matrix_deg) return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres def _plot_one_value( data_matrix, grid_metadata_dict, colour_map_object, min_colour_value, max_colour_value, plot_cbar_min_arrow, plot_cbar_max_arrow, log_scale=False): """Plots one value (score, num examples, or num positive examples). M = number of rows in grid N = number of columns in grid :param data_matrix: M-by-N numpy array of values to plot. :param grid_metadata_dict: Dictionary returned by `grids.read_equidistant_metafile`. :param colour_map_object: See documentation at top of file. :param min_colour_value: Minimum value in colour scheme. :param max_colour_value: Max value in colour scheme. :param plot_cbar_min_arrow: Boolean flag. If True, will plot arrow at bottom of colour bar (to signify that lower values are possible). :param plot_cbar_max_arrow: Boolean flag. If True, will plot arrow at top of colour bar (to signify that higher values are possible). :param log_scale: Boolean flag (True if `data_matrix` contains data in log scale). :return: figure_object: Figure handle (instance of `matplotlib.figure.Figure`). :return: axes_object: Axes handle (instance of `matplotlib.axes._subplots.AxesSubplot`). """ figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = ( _get_basemap(grid_metadata_dict) ) num_grid_rows = data_matrix.shape[0] num_grid_columns = data_matrix.shape[1] x_spacing_metres = ( (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) / (num_grid_columns - 1) ) y_spacing_metres = ( (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) / (num_grid_rows - 1) ) data_matrix_at_edges, edge_x_coords_metres, edge_y_coords_metres = ( grids.xy_field_grid_points_to_edges( field_matrix=data_matrix, x_min_metres=basemap_x_matrix_metres[0, 0], y_min_metres=basemap_y_matrix_metres[0, 0], x_spacing_metres=x_spacing_metres, y_spacing_metres=y_spacing_metres) ) data_matrix_at_edges = numpy.ma.masked_where( numpy.isnan(data_matrix_at_edges), data_matrix_at_edges ) # data_matrix_at_edges[numpy.isnan(data_matrix_at_edges)] = -1 plotting_utils.plot_coastlines( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_countries( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_states_and_provinces( basemap_object=basemap_object, axes_object=axes_object, line_colour=BORDER_COLOUR) plotting_utils.plot_parallels( basemap_object=basemap_object, axes_object=axes_object, num_parallels=NUM_PARALLELS) plotting_utils.plot_meridians( basemap_object=basemap_object, axes_object=axes_object, num_meridians=NUM_MERIDIANS) basemap_object.pcolormesh( edge_x_coords_metres, edge_y_coords_metres, data_matrix_at_edges, cmap=colour_map_object, vmin=min_colour_value, vmax=max_colour_value, shading='flat', edgecolors='None', axes=axes_object, zorder=-1e12) colour_bar_object = plotting_utils.plot_linear_colour_bar( axes_object_or_matrix=axes_object, data_matrix=data_matrix, colour_map_object=colour_map_object, min_value=min_colour_value, max_value=max_colour_value, orientation_string='horizontal', extend_min=plot_cbar_min_arrow, extend_max=plot_cbar_max_arrow, padding=0.05) tick_values = colour_bar_object.get_ticks() if log_scale: tick_strings = [ '{0:d}'.format(int(numpy.round(10 ** v))) for v in tick_values ] elif numpy.nanmax(data_matrix) >= 6: tick_strings = [ '{0:d}'.format(int(numpy.round(v))) for v in tick_values ] else: tick_strings = ['{0:.2f}'.format(v) for v in tick_values] colour_bar_object.set_ticks(tick_values) colour_bar_object.set_ticklabels(tick_strings) return figure_object, axes_object def _run(evaluation_dir_name, smoothing_radius_grid_cells, score_colour_map_name, num_ex_colour_map_name, max_colour_percentile, output_dir_name): """Plots spatially subset model evaluation. This is effectively the main method. :param evaluation_dir_name: See documentation at top of file. :param smoothing_radius_grid_cells: Same. :param score_colour_map_name: Same. :param num_ex_colour_map_name: Same. :param max_colour_percentile: Same. :param output_dir_name: Same. """ if smoothing_radius_grid_cells <= 0: smoothing_radius_grid_cells = None score_colour_map_object = pyplot.get_cmap(score_colour_map_name) num_ex_colour_map_object = pyplot.get_cmap(num_ex_colour_map_name) error_checking.assert_is_geq(max_colour_percentile, 90.) error_checking.assert_is_leq(max_colour_percentile, 100.) grid_metafile_name = grids.find_equidistant_metafile( directory_name=evaluation_dir_name, raise_error_if_missing=True ) print('Reading grid metadata from: "{0:s}"...'.format(grid_metafile_name)) grid_metadata_dict = grids.read_equidistant_metafile(grid_metafile_name) print(SEPARATOR_STRING) num_grid_rows = len(grid_metadata_dict[grids.Y_COORDS_KEY]) num_grid_columns = len(grid_metadata_dict[grids.X_COORDS_KEY]) auc_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan) csi_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan) pod_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan) far_matrix = numpy.full((num_grid_rows, num_grid_columns), numpy.nan) num_examples_matrix = numpy.full( (num_grid_rows, num_grid_columns), 0, dtype=int ) num_positive_examples_matrix = numpy.full( (num_grid_rows, num_grid_columns), 0, dtype=int ) for i in range(num_grid_rows): for j in range(num_grid_columns): this_eval_file_name = model_eval.find_file( directory_name=evaluation_dir_name, grid_row=i, grid_column=j, raise_error_if_missing=False) if not os.path.isfile(this_eval_file_name): warning_string = ( 'Cannot find file (this may or may not be a problem). ' 'Expected at: "{0:s}"' ).format(this_eval_file_name) warnings.warn(warning_string) continue print('Reading data from: "{0:s}"...'.format(this_eval_file_name)) this_evaluation_dict = model_eval.read_evaluation( this_eval_file_name) num_examples_matrix[i, j] = len( this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY] ) num_positive_examples_matrix[i, j] = numpy.sum( this_evaluation_dict[model_eval.OBSERVED_LABELS_KEY] ) this_evaluation_table = this_evaluation_dict[ model_eval.EVALUATION_TABLE_KEY] auc_matrix[i, j] = numpy.nanmean( this_evaluation_table[model_eval.AUC_KEY].values ) csi_matrix[i, j] = numpy.nanmean( this_evaluation_table[model_eval.CSI_KEY].values ) pod_matrix[i, j] = numpy.nanmean( this_evaluation_table[model_eval.POD_KEY].values ) far_matrix[i, j] = 1. - numpy.nanmean( this_evaluation_table[model_eval.SUCCESS_RATIO_KEY].values ) print(SEPARATOR_STRING) auc_matrix[num_positive_examples_matrix == 0] = numpy.nan csi_matrix[num_positive_examples_matrix == 0] = numpy.nan pod_matrix[num_positive_examples_matrix == 0] = numpy.nan far_matrix[num_positive_examples_matrix == 0] = numpy.nan if smoothing_radius_grid_cells is not None: print(( 'Applying Gaussian smoother with e-folding radius of {0:.1f} grid ' 'cells...' ).format( smoothing_radius_grid_cells )) orig_num_examples_matrix = num_examples_matrix + 0 num_examples_matrix = general_utils.apply_gaussian_filter( input_matrix=num_examples_matrix.astype(float), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) num_examples_matrix = numpy.round(num_examples_matrix).astype(int) num_examples_matrix[orig_num_examples_matrix == 0] = 0 # HACK num_positive_examples_matrix = general_utils.apply_gaussian_filter( input_matrix=num_positive_examples_matrix.astype(float), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) num_positive_examples_matrix = ( numpy.round(num_positive_examples_matrix).astype(int) ) num_positive_examples_matrix[num_examples_matrix == 0] = 0 auc_matrix = general_utils.apply_gaussian_filter( input_matrix=ge_utils.fill_nans(auc_matrix), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) csi_matrix = general_utils.apply_gaussian_filter( input_matrix=ge_utils.fill_nans(csi_matrix), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) pod_matrix = general_utils.apply_gaussian_filter( input_matrix=ge_utils.fill_nans(pod_matrix), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) far_matrix = general_utils.apply_gaussian_filter( input_matrix=ge_utils.fill_nans(far_matrix), e_folding_radius_grid_cells=smoothing_radius_grid_cells ) auc_matrix[num_positive_examples_matrix == 0] = numpy.nan csi_matrix[num_positive_examples_matrix == 0] = numpy.nan pod_matrix[num_positive_examples_matrix == 0] = numpy.nan far_matrix[num_positive_examples_matrix == 0] = numpy.nan panel_file_names = [] file_system_utils.mkdir_recursive_if_necessary( directory_name=output_dir_name) # Plot number of examples. this_data_matrix = numpy.maximum(numpy.log10(num_examples_matrix), 0.) this_data_matrix[this_data_matrix == 0] = numpy.nan max_colour_value = numpy.nanpercentile( this_data_matrix, max_colour_percentile) figure_object, axes_object = _plot_one_value( data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=num_ex_colour_map_object, min_colour_value=0., max_colour_value=max_colour_value, plot_cbar_min_arrow=False, plot_cbar_max_arrow=True, log_scale=True) axes_object.set_title(r'Number of examples') plotting_utils.label_axes(axes_object=axes_object, label_string='(a)') panel_file_names.append('{0:s}/num_examples.jpg'.format(output_dir_name)) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Plot number of positive examples. this_data_matrix = num_positive_examples_matrix.astype(float) this_data_matrix[this_data_matrix == 0] = numpy.nan max_colour_value = numpy.nanpercentile( this_data_matrix, max_colour_percentile) min_colour_value = numpy.nanpercentile( this_data_matrix, 100. - max_colour_percentile) figure_object, axes_object = _plot_one_value( data_matrix=this_data_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=num_ex_colour_map_object, min_colour_value=min_colour_value, max_colour_value=max_colour_value, plot_cbar_min_arrow=True, plot_cbar_max_arrow=True) axes_object.set_title('Number of tornadic examples') plotting_utils.label_axes(axes_object=axes_object, label_string='(b)') panel_file_names.append( '{0:s}/num_positive_examples.jpg'.format(output_dir_name) ) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Plot AUC. max_colour_value = numpy.nanpercentile(auc_matrix, max_colour_percentile) min_colour_value = numpy.maximum( numpy.nanpercentile(auc_matrix, 100. - max_colour_percentile), 0.5 ) figure_object, axes_object = _plot_one_value( data_matrix=auc_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=score_colour_map_object, min_colour_value=min_colour_value, max_colour_value=max_colour_value, plot_cbar_min_arrow=True, plot_cbar_max_arrow=max_colour_value < 1.) axes_object.set_title('AUC (area under ROC curve)') plotting_utils.label_axes(axes_object=axes_object, label_string='(c)') panel_file_names.append('{0:s}/auc.jpg'.format(output_dir_name)) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Plot CSI. max_colour_value = numpy.nanpercentile(csi_matrix, max_colour_percentile) min_colour_value = numpy.nanpercentile( csi_matrix, 100. - max_colour_percentile) figure_object, axes_object = _plot_one_value( data_matrix=csi_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=score_colour_map_object, min_colour_value=min_colour_value, max_colour_value=max_colour_value, plot_cbar_min_arrow=min_colour_value > 0., plot_cbar_max_arrow=max_colour_value < 1.) axes_object.set_title('CSI (critical success index)') plotting_utils.label_axes(axes_object=axes_object, label_string='(d)') panel_file_names.append('{0:s}/csi.jpg'.format(output_dir_name)) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Plot POD. max_colour_value = numpy.nanpercentile(pod_matrix, max_colour_percentile) min_colour_value = numpy.nanpercentile( pod_matrix, 100. - max_colour_percentile) figure_object, axes_object = _plot_one_value( data_matrix=pod_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=score_colour_map_object, min_colour_value=min_colour_value, max_colour_value=max_colour_value, plot_cbar_min_arrow=min_colour_value > 0., plot_cbar_max_arrow=max_colour_value < 1.) axes_object.set_title('POD (probability of detection)') plotting_utils.label_axes(axes_object=axes_object, label_string='(e)') panel_file_names.append('{0:s}/pod.jpg'.format(output_dir_name)) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Plot FAR. max_colour_value = numpy.nanpercentile(far_matrix, max_colour_percentile) min_colour_value = numpy.nanpercentile( far_matrix, 100. - max_colour_percentile) figure_object, axes_object = _plot_one_value( data_matrix=far_matrix, grid_metadata_dict=grid_metadata_dict, colour_map_object=score_colour_map_object, min_colour_value=min_colour_value, max_colour_value=max_colour_value, plot_cbar_min_arrow=min_colour_value > 0., plot_cbar_max_arrow=max_colour_value < 1.) axes_object.set_title('FAR (false-alarm ratio)') plotting_utils.label_axes(axes_object=axes_object, label_string='(f)') panel_file_names.append('{0:s}/far.jpg'.format(output_dir_name)) print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1])) figure_object.savefig( panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0, bbox_inches='tight') pyplot.close(figure_object) # Concatenate panels. concat_file_name = '{0:s}/spatially_subset_evaluation.jpg'.format( output_dir_name) print('Concatenating panels to: "{0:s}"...'.format(concat_file_name)) imagemagick_utils.concatenate_images( input_file_names=panel_file_names, output_file_name=concat_file_name, num_panel_rows=NUM_PANEL_ROWS, num_panel_columns=NUM_PANEL_COLUMNS) imagemagick_utils.resize_image( input_file_name=concat_file_name, output_file_name=concat_file_name, output_size_pixels=CONCAT_FIGURE_SIZE_PX) if __name__ == '__main__': INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args() _run( evaluation_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME), smoothing_radius_grid_cells=getattr( INPUT_ARG_OBJECT, SMOOTHING_RADIUS_ARG_NAME ), score_colour_map_name=getattr(INPUT_ARG_OBJECT, SCORE_CMAP_ARG_NAME), num_ex_colour_map_name=getattr( INPUT_ARG_OBJECT, NUM_EXAMPLES_CMAP_ARG_NAME ), max_colour_percentile=getattr( INPUT_ARG_OBJECT, MAX_PERCENTILE_ARG_NAME ), output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME) )
thunderhoser/GewitterGefahr
gewittergefahr/scripts/plot_spatially_subset_eval.py
Python
mit
23,603
[ "Gaussian" ]
ed41fcdd176b150b301a5379677425ff76b8b4abd8aad63598c8ff994bbe6384
''' This __init__ file is for a PySCeS "eXtension module" (pysx), the following variables can be set for each module ''' pysx_name = 'PySCeS SBW interface' pysx_oscompat = ['nt'] pysx_base_class = 'CONTRIB_sbw' pysx_author = 'Herbert Sauro et al.' pysx_email = '' pysx_affiliation = 'Kegg Graduate Institute' pysx_web = 'http://www.sys-bio.org' pysx_notes = 'This is an interface to the Systems Biology Workbench' from sbw_func import *
asttra/pysces
pysces/contrib/sbw/__init__.py
Python
bsd-3-clause
439
[ "PySCeS" ]
417e867a44881ceb02e14915548535134b5fb11901cfe4cf1f1eac02ce662b30
__doc__="""Optimal photometry, for one faint star and one bright reference star. Optimised for timeseries creation. Data variables are module wide. optext: runs the routine""" from Scientific.Functions.LeastSquares import leastSquaresFit as LS #fitting data (used but not changed in functions) FWHM = 3. inannmult = 2. #normaly ~twice FWHM inannguess = 6. outannmult = 4.5. #should have few hundred sky pixels outannguess = 13. #NB: make sure if using a median-flat, that it is made of more images #than the number of sky pixels skyguess = 10000. skyvar = 200. relx = -20. rely = 35. sigmas = 5 def 2dgauss(height,x,y,width1,width2,posangle,inannulus): """returns a model psf, whose size is no more than inannulus""" return def skewgauss(height,x,width,skew): """returns a skewed gaussian for fitting the sky - see Irwin(1995)""" return def fit_reference(image, sky, skyvar, xguess, yguess): """Fit 2D elliptical gaussian to image, using *guess as initial parameters and ignoring all values sigmas number of skyvar above the sky or less. Returns position x,y, perpendicular widths width1,width2 and position angle""" return def fit_sky(image, x, y, inannulus, outannulus): """Fit skewed Gaussian distribution to the sky values within the annuli centred on x,y in image. Returns sky value and varience.""" return def fit_object(image, x, y, width1, width2, posangle, sky): """Find the flux in the object at x,y using the gaussian shape deffined. Returns flux and error""" return def optext(image, refx, refy): """Use Tim Naylor's formulism for optimal photometry. First, fits sky around bright reference star, then finds the star shape, then fits for the faint star of interest. Uses 2 iterations for the bright star, first to get rough parameters which set the annuli for the sky, then with the imporoved sky value.""" brightsky,brightvar = fit_sky(image, refx, refy, inannguess, outannguess) #fit rough sky with typical values x,y,width1,width2,posangle = fit_reference(image,brightsky,brightskyvar,refx,refy) # get rough fit inannulus = max((width1,width2)) * inannmult outannulus = max((width1,width2)) * outannmult brightsky,brightvar = fit_sky(image, x, y, inannulus, outannulus) # refit sky around reference x,y,width1,width2,posangle = fit_reference(image,brightsky,brightskyvar,x,y) # good fit for reference faintsky,faintvar = fit_sky(image, x+relx, y+rely, inannulus, outannulus) # fit sky for faint object flux, error = fit_object(image, x+relx, y+rely, width1, width2, posangle, faintsky) # fit faint object return flux, error
martindurant/astrobits
optext.py
Python
mit
2,700
[ "Gaussian" ]
fc20d799d79a2f56629f2ab3f337a3d639f126384c12b5598bdb6d46f19449b4
# OpenRTS - Copyright (C) 2006 The OpenRTS Project # # OpenRTS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # OpenRTS is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import sys, os import pygame from pygame.locals import * import gui import gettext from random import * from networkscreen import * import tileset #**************************************************************************** # The MainMenu class shows buttons with choices for what game-mode # which will be used. #**************************************************************************** class MainMenu: def __init__(self, client): self.client = client; self.app = gui.Desktop(); self.app.connect(gui.QUIT, self.app.quit, None); container = gui.Container(align=-1, valign=-1); menu_table = gui.Table(width=200,height=220); network_start_button = gui.Button(_("Start Multiplayer Game")); network_start_button.connect(gui.CLICK, self.network_start, None); menu_table.add(network_start_button, 0, 0); menu_table.add(gui.Widget(width=1, height=5), 0, 1); network_join_button = gui.Button(_("Join Multiplayer Game")); network_join_button.connect(gui.CLICK, self.network_join, None); menu_table.add(network_join_button, 0, 2); menu_table.add(gui.Widget(width=1, height=5), 0, 3); single_button = gui.Button(_("Start Singleplayer Game")); menu_table.add(single_button, 0, 4); menu_table.add(gui.Widget(width=1, height=5), 0, 5); settings_button = gui.Button(_("Settings")); menu_table.add(settings_button, 0, 6); menu_table.add(gui.Widget(width=1, height=5), 0, 7); credits_button = gui.Button(_("Credits")); menu_table.add(credits_button, 0, 8); menu_table.add(gui.Widget(width=1, height=5), 0, 9); quit_button = gui.Button(_("Quit")); quit_button.connect(gui.CLICK, self.client.quit); menu_table.add(quit_button, 0, 10); intro_label = gui.Label(_("Open Source Real-Time Strategy Game")); tip_label = gui.Label(_("Tip of the day:")); container.add(MenuBackground(client=self.client, width = self.client.screen.get_width(), height = self.client.screen.get_height()), 0, 0); container.add(menu_table, self.client.screen.get_width() / 2 - 100, self.client.screen.get_height() / 2 - 100); container.add(intro_label, self.client.screen.get_width() / 2 - 160, self.client.screen.get_height() * 0.315); container.add(tip_label, self.client.screen.get_width() * 0.3, self.client.screen.get_height() * 0.71); container.add(self.get_tip_of_the_day(), self.client.screen.get_width() * 0.3, self.client.screen.get_height() * 0.74); self.app.run(container); #**************************************************************************** # Each tip must not be more than 80 characters (fit on one line). #**************************************************************************** def get_tip_of_the_day(self): tips = []; tips.append(_("To get updates of OpenRTS, visit www.openrts.org.")); tips.append(_("OpenRTS is licensed under the GNU General Public License.")); tips.append(_("The game can be translated to several languages.")); return gui.Label(choice(tips)); #**************************************************************************** # Start a network game. #**************************************************************************** def network_start(self, obj): self.app.quit(); ns = NetworkScreen(self.client); ns.start(); #**************************************************************************** # Join a network game. #**************************************************************************** def network_join(self, obj): self.app.quit(); ns = NetworkScreen(self.client); ns.join(); #**************************************************************************** # #**************************************************************************** class ErrorMenu: def __init__(self, client, error_message): self.client = client; self.app = gui.Desktop(); self.app.connect(gui.QUIT, sys.exit, None); menu_table = gui.Table(width=200,height=120); error_label = gui.Label(error_message); menu_table.add(error_label, 0, 0); accept_button = gui.Button(_("OK")); menu_table.add(accept_button, 0, 1); accept_button.connect(gui.CLICK, self.recover, None); self.app.run(menu_table); #**************************************************************************** # Return to main menu. #**************************************************************************** def recover(self, obj): self.app.quit(); MainMenu(self.client); #**************************************************************************** # #**************************************************************************** class MenuBackground(gui.Widget): def __init__(self,**params): gui.Widget.__init__(self,**params) client = params['client']; filename = os.path.join('data', 'graphics', 'menubackground.jpg'); surface = tileset.load(filename); scale = float(client.screen.get_width()) / surface.get_width(); self.surface = pygame.transform.rotozoom(surface, 0, scale); #**************************************************************************** # #**************************************************************************** def paint(self,s): s.blit(self.surface,(0,0));
Donkyhotay/openrts
client/mainmenu.py
Python
gpl-2.0
5,932
[ "VisIt" ]
cc915d0d526a27833adbfcb74966bad9776314b76aaf156dde6d4d154d8eb4a2
# -*- coding: utf-8 -*- ''' Display nice images of the model. ''' __author__= "Luis C. Pérez Tato (LCPT) , Ana Ortega (AO_O) " __copyright__= "Copyright 2016, LCPT, AO_O" __license__= "GPL" __version__= "3.0" __email__= "l.pereztato@ciccp.es, ana.ortega@ciccp.es " import sys import vtk import xc_base import geom from postprocess.xcVtk import screen_annotation as sa from miscUtils import LogMessages as lmsg class RecordDefGrid(object): '''Provide the variables involved in the VTK grid representation :ivar xcSet: set to be represented :ivar entToLabel: entities to be labeled (defaults to "nodes") :ivar cellType: specifies the type of data cells (defaults to "nil"). Data cells are simple topological elements like points, lines, polygons and tetrahedra of which visualization data sets are composed. :ivar uGrid: unstructure grid (defaults to None). An unstructure grid is a concrete implementation of a vtk data set; represents any combination of any cell types. This includes 0D (e.g. points), 1D (e.g., lines, polylines), 2D (e.g., triangles, polygons), and 3D (e.g., hexahedron, tetrahedron, polyhedron, etc.). ''' def __init__(self): self.xcSet= None self.entToLabel= "nodes" self.cellType= "nil" self.uGrid= None def getBND(self): ''' Returns the grid boundary''' retval= geom.BND3d() points= self.uGrid.GetPoints() if(points.GetNumberOfPoints()>0): bounds= points.GetBounds() retval= geom.BND3d(geom.Pos3d(bounds[0],bounds[2],bounds[4]),geom.Pos3d(bounds[1],bounds[3],bounds[5])) else: warnMsg= 'there are no points in the grid: ' warnMsg+= self.uGrid.name warnMsg+= '. Maybe you must call fillDownwards on the set to display.' lmsg.warning('Warning; '+warnMsg) return retval class CameraParameters(object): ''' Provides the parameters to define the camera. :ivar viewName: name of the view that contains the renderer (defaults to "XYZPos") :ivar viewUpVc: vector defined as [x,y,z] to orient the view. This vector of the model is placed in vertical orientation in the display :ivar posCVc: vector defined as [x,y,z] that points to the camera position :ivar zoom: (defaults to 1.0) :ivar hCamFct: factor that applies to the height of the camera position in order to change perspective of isometric views (defaults to 1, usual values 0.1 to 10) ''' def __init__(self, viewNm= 'XYZPos', hCamF= 1.0): self.viewName= viewNm self.viewUpVc= [0,1,0] self.posCVc= [0,0,100] self.zoom= 1.0 self.hCamFct= hCamF self.defineViewParametersFromViewName() def defineViewParametersFromViewName(self): '''Sets the values of the view parameters from the following predefined viewNames: "ZPos","ZNeg","YPos","YNeg","XPos","XNeg","XYZPos" Zpos: View from positive Z axis (Z+) Zneg: View from negative Z axis (Z-) Ypos: View from positive Y axis (Y+) Yneg: View from negative Y axis (Y-) Xpos: View from positive X axis (X+) Xneg: View from negative X axis (X-) XYZPos or +X+Y+Z: View from point (1,1,1) +X+Y-Z: View from point (1,1,-1) +X-Y+Z: View from point (1,-1,1) +X-Y-Z: View from point (1,-1,-1) -X+Y+Z: View from point (-1,1,1) -X+Y-Z: View from point (-1,1,-1) -X-Y+Z: View from point (-1,-1,1) XYZNeg or -X-Y-Z: View from point (-1,-1,-1) ''' if(self.viewName=="ZPos"): self.viewUpVc= [0,1,0] self.posCVc= [0,0,100] elif(self.viewName=="ZNeg"): self.viewUpVc= [0,1,0] self.posCVc= [0,0,-100] elif(self.viewName=="YPos"): self.viewUpVc= [0,0,1] self.posCVc= [0,100,0] elif(self.viewName=="YNeg"): self.viewUpVc= [0,0,1] self.posCVc= [0,-100,0] elif(self.viewName=="XPos"): self.viewUpVc= [0,0,1] self.posCVc= [100,0,0] elif(self.viewName=="XNeg"): self.viewUpVc= [0,0,1] self.posCVc= [-100,0,0] elif(self.viewName=="XYZPos" or self.viewName=="+X+Y+Z"): self.viewUpVc= [-1,-1,1] self.posCVc= [100,100,self.hCamFct*100] elif(self.viewName=="+X+Y-Z"): self.viewUpVc= [1,1,1] self.posCVc= [100,100,-1*self.hCamFct*100] elif(self.viewName=="+X-Y+Z"): self.viewUpVc= [-1,1,1] self.posCVc= [100,-100,self.hCamFct*100] elif(self.viewName=="+X-Y-Z"): self.viewUpVc= [1,-1,1] self.posCVc= [100,-100,-1*self.hCamFct*100] elif(self.viewName=="-X+Y+Z"): self.viewUpVc= [1,-1,1] self.posCVc= [-100,100,self.hCamFct*100] elif(self.viewName=="-X+Y-Z"): self.viewUpVc= [-1,+1,1] self.posCVc= [-100,100,-1*self.hCamFct*100] elif(self.viewName=="-X-Y+Z"): self.viewUpVc= [1,1,1] self.posCVc= [-100,-100,self.hCamFct*100] elif(self.viewName=="XYZNeg" or self.viewName=="-X-Y-Z"): self.viewUpVc= [-1,-1,1] self.posCVc= [-100,-100,-1*self.hCamFct*100] elif(self.viewName!="Custom"): sys.stderr.write("View name: '"+self.viewName+"' unknown.") def setView(self, camera): '''Sets the camera parameters.''' if(self.viewName!="Custom"): self.defineViewParametersFromViewName() camera.SetViewUp(self.viewUpVc[0],self.viewUpVc[1],self.viewUpVc[2]) camera.SetPosition(self.posCVc[0],self.posCVc[1],self.posCVc[2]) camera.SetParallelProjection(1) camera.Zoom(self.zoom) class RecordDefDisplay(object): ''' Provides the variables to define the output device. :ivar renderer: specification of renderer. A renderer is an object that controls the rendering process for objects. Rendering is the process of converting geometry, a specification for lights, and a camera view into an image. (defaults to None) :ivar renWin: rendering window (defaults to None). A rendering window is a window in a graphical user interface where renderers draw their images. :ivar windowWidth: resolution expresed in pixels in the width direction of the window (defaults to 800) :ivar windowHeight: resolution expresed in pixels in the height direction of the window (defaults to 600) :ivar cameraParameters: parameters that define the camera position, zoom, etc. :ivar bgRComp: red component (defaults to 0.65) :ivar bgGComp: green component (defaults to 0.65) :ivar bgBComp: blue component (defaults to 0.65) ''' def __init__(self): self.renderer= None self.renWin= None self.windowWidth= 800 self.windowHeight= 600 self.annotation= sa.ScreenAnnotation() self.bgRComp= 0.65 self.bgGComp= 0.65 self.bgBComp= 0.65 self.cameraParameters= CameraParameters() def setView(self): '''Sets the view''' self.renderer.ResetCamera() cam= self.renderer.GetActiveCamera() self.cameraParameters.setView(cam) self.renderer.ResetCameraClippingRange() def setupAxes(self): '''Add an vtkAxesActor to the renderer.''' bnd= self.gridRecord.getBND() offsetVector= bnd.diagonal*0.1 offset= offsetVector.getModulo() axesPosition= bnd.pMin-offsetVector transform = vtk.vtkTransform() transform.Translate(axesPosition.x, axesPosition.y, axesPosition.z) axes= vtk.vtkAxesActor() # The axes are positioned with a user transform axes.SetUserTransform(transform) length= offset axes.SetTotalLength(length,length,length) textSize= int(3*offset) axes.GetXAxisCaptionActor2D().GetTextActor().SetTextScaleMode(False) axes.GetXAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(textSize) axes.GetYAxisCaptionActor2D().GetTextActor().SetTextScaleMode(False) axes.GetYAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(textSize) axes.GetZAxisCaptionActor2D().GetTextActor().SetTextScaleMode(False) axes.GetZAxisCaptionActor2D().GetTextActor().GetTextProperty().SetFontSize(textSize) # properties of the axes labels can be set as follows # this sets the x axis label to red # axes.GetXAxisCaptionActor2D().GetCaptionTextProperty().SetColor(1,0,0) # the actual text of the axis label can be changed: # axes.SetXAxisLabelText("test") self.renderer.AddActor(axes) def setupWindow(self,caption= ''): '''sets the rendering window. A rendering window is a window in a graphical user interface where renderers draw their images. ''' self.renWin= vtk.vtkRenderWindow() self.renWin.SetSize(self.windowWidth,self.windowHeight) self.renWin.AddRenderer(self.renderer) #Axes self.setupAxes() #Time stamp and window decorations. if(caption==''): lmsg.warning('setupWindow; window caption empty.') vtkCornerAnno= self.annotation.getVtkCornerAnnotation(caption) self.renderer.AddActor(vtkCornerAnno) return self.renWin def setupWindowInteractor(self): '''sets the window interactor, which provides a platform-independent interaction mechanism for mouse/key/time events. ''' iren= vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(self.renWin) iren.SetSize(self.windowWidth,self.windowHeight) iren.Initialize() return iren def displayScene(self,caption= '', fName= None): ''' Displaying scene :param caption: caption to display with the scene. :param fName: name of the image file, in none -> screen window. ''' self.setView() self.setupWindow(caption) if(fName): self.plot(fName) else: iren= self.setupWindowInteractor() iren.Start() def muestraEscena(self): lmsg.warning('muestraEscena is deprecated. Use displayScene') self.displayScene('noCaption', None) def setupGrid(self,xcSet): ''' Parameters: xcSet: set to be represented ''' self.gridRecord= RecordDefGrid() self.gridRecord.xcSet= xcSet return self.gridRecord def displayGrid(self, caption= ''): '''Displays the grid in the output device :param caption: caption to display with the scene. ''' self.defineMeshScene(None) self.displayScene(caption) def plot(self,fName): '''Plots window contents''' w2i = vtk.vtkWindowToImageFilter() writer = vtk.vtkJPEGWriter() w2i.SetInput(self.renWin) w2i.Update() writer.SetInputConnection(w2i.GetOutputPort()) writer.SetFileName(fName) self.renWin.Render() w2i.Update() writer.Write()
lcpt/xc
python_modules/postprocess/xcVtk/vtk_graphic_base.py
Python
gpl-3.0
10,495
[ "VTK" ]
49e70010f8a2f45cb57d79a9080c4cddce2305151f53f275eab0758fffb054e8
"""This is the homerwork for CS8803 ASE implemented using Google Cloud Endpoints. Defined here are the ProtoRPC messages needed to define Schemas for methods as well as those methods defined in an API. """ import sys sys.path.insert(0, 'lib') import webapp2 import string from textblob import TextBlob from textblob.sentiments import NaiveBayesAnalyzer from collections import Counter import endpoints import json from protorpc import messages from protorpc import message_types from protorpc import remote import time import urllib2 from google.appengine.api import mail # import pandas as pd import numpy as np import csv # # import os # # # import math # # from sklearn import datasets # # from sklearn import datasets # # from sklearn import metrics # # from sklearn.ensemble import RandomForestRegressor # # from sklearn.cross_validation import cross_val_score # # from sklearn.cross_validation import train_test_split # from sklearn.preprocessing import normalize package = 'Hello' class KNNLearner: def __init__(self, k=3): self.k = k self.Xtrain = [] self.Ytrain = [] def addEvidence(self, Xtrain, Ytrain): self.Xtrain = Xtrain self.Ytrain = Ytrain def query(self, Xtest): train = np.c_[self.Xtrain, self.Ytrain] delta = [] for j in range(train.shape[0]): dist = 0 for i in range(len(Xtest)): dist = dist + (train[j][i] - Xtest[i])**2 dist = np.sqrt(dist) delta.append(dist) delta = np.c_[delta, train] delta = delta[delta[:,0].argsort()] delta = delta[0:self.k] Ytest = delta[:,-1].mean() return Ytest class Input(messages.Message): titleName = messages.StringField(1, required = True) linksNumber = messages.IntegerField(2, required = True) imagesNumber = messages.IntegerField(3, required = True) videosNumber = messages.IntegerField(4, required = True) keywords = messages.IntegerField(5, required = True) articleContent = messages.StringField(6, required = True) day = messages.StringField(7, required = True) category = messages.StringField(8, required = True) class Output(messages.Message): """shares and sentiment.""" shares = messages.IntegerField(1) contentSubjectivity = messages.FloatField(2) contentSentimentPolarity = messages.FloatField(3) contentPositiveRate = messages.FloatField(4) contentNegativeRate = messages.FloatField(5) titleSubjectivity = messages.FloatField(6) titlePolarity = messages.FloatField(7) @endpoints.api(name='prediction', version='v1') class PredictorApi(remote.Service): """Predictor API v1.""" # @endpoints.method(message_types.VoidMessage, Output, # path='prediction', http_method='GET', # name='getting.tuple') # def give_output(self, unused_request): # return Output(shares=5) @endpoints.method(Input, Output, path='prediction', http_method='POST', name='sending.shares') def take_input(self, request): # n = self.compute(request) n_tokens_title, n_tokens_content, n_unique_tokens,n_non_stop_words, n_non_stop_unique_tokens, average_token_length, global_subjectivity, global_sentiment_polarity,global_rate_positive_words, global_rate_negative_words, rate_positive_words, rate_negative_words, title_subjectivity,title_polarity = self.compute(request) testX = [n_tokens_title, n_tokens_content, n_unique_tokens,n_non_stop_words, n_non_stop_unique_tokens] linksNumber = request.linksNumber imagesNumber = request.imagesNumber videosNumber = request.videosNumber keywords = request.keywords day = request.day category = request.category testX = testX + [linksNumber,imagesNumber,videosNumber,average_token_length,keywords] day_list = [] if day=="Monday": day_list = [1,0,0,0,0,0,0,0] elif day=="Tuesday": day_list = [0,1,0,0,0,0,0,0] elif day=="Wednesday": day_list = [0,0,1,0,0,0,0,0] elif day=="Thursday": day_list = [0,0,0,1,0,0,0,0] elif day=="Friday": day_list = [0,0,0,0,1,0,0,0] elif day=="Saturday": day_list = [0,0,0,0,0,1,0,1] elif day=="Sunday": day_list = [0,0,0,0,0,0,1,1] cat_list = [] if category=="Lifestyle": cat_list = [1,0,0,0,0,0] elif category=="Entertainment": cat_list = [0,1,0,0,0,0] elif category=="Business": cat_list = [0,0,1,0,0,0] elif category=="Social Media": cat_list = [0,0,0,1,0,0] elif category=="Tech": cat_list = [0,0,0,0,1,0] elif category=="World": cat_list = [0,0,0,0,0,1] testX = testX + cat_list testX = testX + day_list testX = testX + [global_subjectivity, global_sentiment_polarity,global_rate_positive_words, global_rate_negative_words, rate_positive_words, rate_negative_words, title_subjectivity,title_polarity] n = int(self.testLearner(testX)) return Output(shares=n, contentSubjectivity=global_subjectivity,contentSentimentPolarity= global_sentiment_polarity,contentPositiveRate=global_rate_positive_words, contentNegativeRate=global_rate_negative_words, titleSubjectivity=title_subjectivity,titlePolarity=title_polarity) def readwords(self, filename): f = open(filename) words = [ line.rstrip() for line in f.readlines()] return words def testLearner(self, testX): # my_data = np.genfromtxt('data4.csv', delimiter=',') # fileReader = csv.reader(csv_file.split("\n")) # testX = np.array(testX) with open("OnlinePopularityAnalysis.csv", 'r') as f: data = [row for row in csv.reader(f.read().splitlines())] data = np.array(data) data = data[1:,[1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,30,31,32,33,34,35,36,37,43,44,45,46,47,48,55,56,59]] data = data.astype(np.float) X = data[:,0:-1] trainX = data[:-1,0:-1] trainY = data[:-1,-1] learner = KNNLearner(k = 3) # constructor # testX = data[-1,1:-1] learner.addEvidence(trainX, trainY) # training step predY = learner.query(testX) # get the predictions return int(predY) def non_stop_words(self,n,s): stop_words = self.readwords('stopwords.txt') n_stop_words = 0 count = Counter(s.split()) for key, val in count.iteritems(): key = key.rstrip('.,?!\n') # removing possible punctuation signs if key in stop_words: n_stop_words = n_stop_words + 1 n_non_stop_words = n - n_stop_words return (n_non_stop_words) def non_stop_unique_tokens(self,n,s): stop_words = self.readwords('stopwords.txt') n_stop_unique_tokens = 0 for key in s: if key in stop_words: n_stop_unique_tokens = n_stop_unique_tokens + 1 n_non_stop_unique_tokens = n - n_stop_unique_tokens return (n_non_stop_unique_tokens) def sentiment_analysis(self,s,n_tokens_content): positive = self.readwords('positive.txt') negative = self.readwords('negative.txt') count = Counter(s.split()) pos = 0 neg = 0 for key, val in count.iteritems(): key = key.rstrip('.,?!\n') # removing possible punctuation signs if key in positive: pos = pos + 1 if key in negative: neg = neg + 1 non_neutral_tokens = pos+neg global_rate_positive_words = float(pos)/float(n_tokens_content) global_rate_negative_words = float(neg)/float(n_tokens_content) if non_neutral_tokens>0: rate_positive_words = float(pos)/float(non_neutral_tokens) rate_negative_words = float(neg)/float(non_neutral_tokens) else: rate_positive_words = 0 rate_negative_words = 0 return (global_rate_positive_words,global_rate_negative_words,rate_positive_words,rate_negative_words) def compute(self,request): title = request.titleName n_tokens_title = len(title) s=request.articleContent # s = "It was November. Although it was not yet late, the sky was dark when I turned into Laundress Passage. Father had finished for the day, switched off the shop lights and closed the shutters; but so I would not come home to darkness he had left on the light over the stairs to the flat. Through the glass in the door it cast a foolscap rectangle of paleness onto the wet pavement, and it was while I was standing in that rectangle, about to turn my key in the door, that I first saw the letter. Another white rectangle, it was on the fifth step from the bottom, where I couldn't miss it." # number of tokens in content tokens = s.split() n_tokens_content = len(tokens) # number of unique tokens unique_tokens = set(tokens) n_unique_tokens = len(unique_tokens) # number of non-stop words n_non_stop_words = self.non_stop_words(n_tokens_content,s) # number of non-stop unique tokens n_non_stop_unique_tokens = self.non_stop_unique_tokens(n_unique_tokens,unique_tokens) #average token length count = lambda l1, l2: len(list(filter(lambda c: c in l2, l1))) s_chars = count(s, string.ascii_letters) average_token_length = s_chars/n_tokens_content # content polarity and subjectivity blob = TextBlob(s) global_sentiment_polarity = blob.sentiment.polarity global_subjectivity = blob.sentiment.subjectivity #rate of positivity and negativity global_rate_positive_words, global_rate_negative_words, rate_positive_words, rate_negative_words = self.sentiment_analysis(s,n_tokens_content) # title polarity and subjectivity blob = TextBlob(title) title_polarity = blob.sentiment.polarity title_subjectivity = blob.sentiment.subjectivity return(n_tokens_title, n_tokens_content, n_unique_tokens,n_non_stop_words, n_non_stop_unique_tokens, average_token_length, global_subjectivity, global_sentiment_polarity,global_rate_positive_words, global_rate_negative_words, rate_positive_words, rate_negative_words, title_subjectivity,title_polarity) # return Input(titleName=request.titleName, imagesNumber=request.imagesNumber, videosNumber=request.videosNumber, # keywords=request.keywords, articleContent=request.articleContent, day=request.day, category=request.category) APPLICATION = endpoints.api_server([PredictorApi])
nelango/ViralityAnalysis
model/predictor.py
Python
mit
10,218
[ "ASE" ]
c2c46e5fd65a20185bba25cf45d423c9d5334a0d5d56a92e89779d70a14b1acf
# Copyright 2001 by Gavin E. Crooks. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """ Handle the SCOP DEScription file. The file format is described in the scop "release notes.":http://scop.berkeley.edu/release-notes-1.55.html The latest DES file can be found "elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/ "Release 1.55":http://scop.berkeley.edu/parse/des.cla.scop.txt_1.55 (July 2001) """ from types import * class Record: """Holds information for one node in the SCOP hierarchy. sunid -- SCOP unique identifiers nodetype -- One of 'cl' (class), 'cf' (fold), 'sf' (superfamily), 'fa' (family), 'dm' (protein), 'sp' (species), 'px' (domain). Additional node types may be added. sccs -- SCOP concise classification strings. e.g. b.1.2.1 name -- The SCOP ID (sid) for domains (e.g. d1anu1), currently empty for other node types description -- e.g. "All beta proteins","Fibronectin type III", """ def __init__(self): self.sunid = '' self.nodetype = '' self.sccs = '' self.name = '' self.description ='' def __str__(self): s = [] s.append(self.sunid) s.append(self.nodetype) s.append(self.sccs) if self.name : s.append(self.name) else : s.append("-") s.append(self.description) return "\t".join(map(str,s)) + "\n" class Iterator: """Iterates over a DES file. """ def __init__(self, handle, parser=None): """Create an object that iterates over a DES file. handle -- file-like object. parser -- an optional Parser object to chang the results into another form. If set to None, then the raw contents of the file will be returned. """ if type(handle) is not FileType and type(handle) is not InstanceType: raise TypeError, "I expected a file handle or file-like object" self._handle = handle self._parser = parser def next(self): """Retrieve the next DES record.""" while 1: line = self._handle.readline() if not line: return None if line[0] !='#': break # Not a comment line if self._parser is not None : return self._parser.parse(line) return line def __iter__(self): return iter(self.next, None) class Parser: """Parses DES records. Records consist of 5 tab deliminated fields, sunid, node type, sccs, node name, node description. """ #For example :: # #21953 px b.1.2.1 d1dan.1 1dan T:,U:91-106 #48724 cl b - All beta proteins #48725 cf b.1 - Immunoglobulin-like beta-sandwich #49265 sf b.1.2 - Fibronectin type III #49266 fa b.1.2.1 - Fibronectin type III def parse(self, entry): """Returns a Des Record """ entry = entry.rstrip() # no trailing whitespace columns = entry.split("\t") # separate the tab-delineated cols if len(columns) != 5: raise ValueError, "I don't understand the format of %s" % entry rec = Record() rec.sunid, rec.nodetype, rec.sccs, rec.name, rec.description = columns if rec.name == '-' : rec.name ='' rec.sunid = int(rec.sunid) return rec
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/SCOP/Des.py
Python
apache-2.0
3,692
[ "Biopython" ]
2d5cba190d44d6690ac279492525a38d64c78d5aca53d7389b9cf7d345b9e839
#!/usr/bin/python ##This Python file uses the following encoding: utf-8 ## ## (C) 2015 Muthiah Annamalai, ## Licensed under GPL Version 3 ## ## Module has elements of PARSE-TREE AST and generates ## a Graphviz file for the AST. WIP. ## import codecs import functools import sys import xml from .ezhil_scanner import EzhilToken, Token from .transform import TransformVisitor from .ezhil_program_utils import get_ast PYTHON3 = (sys.version[0] == '3') if PYTHON3: unicode = str try: import graphviz as gv Graph = functools.partial(gv.Graph, format='svg') Digraph = functools.partial(gv.Digraph, format='svg') except ImportError as ie: pass class Tag(object): def __init__(self, fileobj, name, tab=0, attrs={}): object.__init__(self) self.tagname = name self.attrs = attrs self.fileobj = fileobj self.tabstr = " " * tab if len(attrs) > 0: pfx = " " else: pfx = "" serialized_attrs = pfx + u" ".join( ["%s=\"%s\" " % (str(k), str(v)) for k, v in attrs.items()]) self.fileobj.write("%s<%s%s>\n" % (self.tabstr, self.tagname, serialized_attrs)) def disp(self, content): self.fileobj.write("%s\n" % content) def __del__(self): self.fileobj.write("%s</%s>\n" % (self.tabstr, self.tagname)) class Graphing(object): styles = { 'graph': { 'label': 'Ezhil AST', 'fontsize': '16', 'fontcolor': 'white', 'bgcolor': '#333333', 'rankdir': 'BT', }, 'nodes': { 'fontname': 'Helvetica', 'shape': 'hexagon', 'fontcolor': 'white', 'color': 'white', 'style': 'filled', 'fillcolor': '#006699', }, 'edges': { 'style': 'dashed', 'color': 'white', 'arrowhead': 'open', 'fontname': 'Courier', 'fontsize': '12', 'fontcolor': 'white', } } def __init__(self, graph): self.graph = graph def add_nodes(self, nodes): for n in nodes: if isinstance(n, tuple): self.graph.node(n[0], **n[1]) else: self.graph.node(n) return self.graph def add_edges(self, edges): for e in edges: if isinstance(e[0], tuple): self.graph.edge(*e[0], **e[1]) else: self.graph.edge(*e) return self.graph def apply_styles(self, styles): self.graph.graph_attr.update(('graph' in styles and styles['graph']) or {}) self.graph.node_attr.update(('nodes' in styles and styles['nodes']) or {}) self.graph.edge_attr.update(('edges' in styles and styles['edges']) or {}) return self.graph class GraphVisualizer(TransformVisitor): def __init__(self, interpreter, debug=False, filename=None): """ base class to write transform methods """ self.tab = 0 self.filename = filename self.file = sys.stdout if filename: self.file = codecs.open(filename, "w", "UTF-8") self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") tobj = Tag(self.file, "ezhil", self.tab) self.incr() TransformVisitor.__init__(self, interpreter, debug) self.decr() def __del__(self): if self.filename: self.file.close() def disp_basic(self, tagname, contents): self.file.write("%s<%s>%s</%s>\n" % (self.tabstr(), tagname, contents, tagname)) def tabstr(self): return " " * self.tab def incr(self): self.tab += 1 def decr(self): self.tab -= 1 def update_line(self, obj): pass return def visit_identifier(self, IDobj): self.disp_basic("ID", unicode(IDobj.id)) return def visit_string(self, string): self.disp_basic("STR", string) return def visit_number(self, num): self.disp_basic("NUM", num) return def visit_expr_call(self, expr_call): tobj = Tag(self.file, name="EXPRCALL", tab=self.tab) self.incr() tobj_id = Tag(self.file, name="FUNCID", tab=self.tab) self.incr() expr_call.func_id.visit(self) del tobj_id self.decr() self.incr() tobj_args = Tag(self.file, name="FUNCARGS", tab=self.tab) expr_call.arglist.visit(self) del tobj_args self.decr() self.decr() return def visit_expr_list(self, expr_list): tobj = Tag(self.file, name="EXPRLIST", tab=self.tab) self.incr() for pos, exp_itr in enumerate(expr_list.exprs): exp_itr.visit(self) self.decr() return def visit_stmt_list(self, stmt_list): tobj = Tag(self.file, name="STMTLIST", tab=self.tab) self.incr() for stmt in stmt_list.List: stmt.visit(self) self.decr() return def visit_stmt(self, stmt): tobj = Tag(self.file, name="STMT", tab=self.tab) ## is this a recipe for getting stuck in a loop? stmt.visit(self) return def visit_expr(self, expr): escaped_tok_kind = xml.sax.saxutils.escape( Token.get_name(expr.binop.kind)) tobj = Tag(self.file, name="EXPR", tab=self.tab, attrs={"binop": escaped_tok_kind}) self.incr() tobj_term = Tag(self.file, name="TERM", tab=self.tab) self.incr() expr.term.visit(self) del tobj_term self.decr() toktype = EzhilToken.token_types[expr.binop.kind] expr.next_expr.visit(self) self.decr() return def visit_return_stmt(self, ret_stmt): tobj = Tag(self.file, name="RETURN", tab=self.tab) keyword = u"பின்கொடு" # return may have optional argument if hasattr(ret_stmt.rvalue, 'visit'): ret_stmt.rvalue.visit(self) return def visit_break_stmt(self, break_stmt): tobj = Tag(self.file, name="BREAK", tab=self.tab) keyword = u"நிறுத்து" #EzhilToken.Keywords["break"] return def visit_continue_stmt(self, cont_stmt): tobj = Tag(self.file, name="CONTINUE", tab=self.tab) keyword = u"தொடர்" #EzhilToken.Keywords["continue"] return def visit_else_stmt(self, else_stmt): tobj = Tag(self.file, name="ELSE", tab=self.tab) self.incr() keyword = u"இல்லை" else_stmt.stmt.visit(self) self.decr() return def visit_if_elseif_stmt(self, if_elseif_stmt): tobj = Tag(self.file, name="IF", tab=self.tab) self.incr() # condition expression if_elseif_stmt.expr.visit(self) # IF kw keyword_if = u"ஆனால்" # True-Body if_elseif_stmt.body.visit(self) # False-Body - optionally present if hasattr(if_elseif_stmt.next_stmt, 'visit'): tobj_else = Tag(self.file, name="ELSE", tab=self.tab) self.incr() if_elseif_stmt.next_stmt.visit(self) del tobj_else self.decr() self.visit_end_kw() self.decr() def visit_end_kw(self): # END kw #tobj = Tag(name="END",tab=self.tab) keyword_end = u"முடி" def visit_while_stmt(self, while_stmt): """ @( itr < L ) வரை சமம்= சமம் + input[itr]*wts[itr] itr = itr + 1 முடி""" tobj_while = Tag(self.file, name="WHILE", tab=self.tab) self.incr() # condition expression tobj_while_cond = Tag(self.file, name="WHILE_COND", tab=self.tab) self.incr() while_stmt.expr.visit(self) self.decr() del tobj_while_cond # While kw keyword_while = u"வரை" # Body while_stmt.body.visit(self) self.visit_end_kw() self.decr() return # foreach is transformed at the AST-level # so its really a MACRO here def visit_for_stmt(self, for_stmt): """ @( x = -1 , x < 0, x = x + 1 ) ஆக பதிப்பி x, "கருவேபில" முடி """ tobj_for = Tag(self.file, name="FOR", tab=self.tab) self.incr() # condition expression tobj_for_init = Tag(self.file, name="FOR_INIT", tab=self.tab) for_stmt.expr_init.visit(self) del tobj_for_init tobj_for_cond = Tag(self.file, name="FOR_COND", tab=self.tab) for_stmt.expr_cond.visit(self) del tobj_for_cond tobj_for_update = Tag(self.file, name="FOR_UPDATE", tab=self.tab) for_stmt.expr_update.visit(self) del tobj_for_update # For kw keyword_for = u"ஆக" # Body for_stmt.body.visit(self) self.visit_end_kw() self.decr() return def visit_assign_stmt(self, assign_stmt): tobj_assign = Tag(self.file, name="ASSIGN", tab=self.tab) self.incr() tobj_assign_lval = Tag(self.file, name="ASSIGN_LVAL", tab=self.tab) self.incr() assign_stmt.lvalue.visit(self) self.decr() del tobj_assign_lval tobj_assign_rval = Tag(self.file, name="ASSIGN_RVAL", tab=self.tab) self.incr() assign_stmt.rvalue.visit(self) self.decr() del tobj_assign_rval self.decr() return def visit_print_stmt(self, print_stmt): tobj = Tag(self.file, name="PRINT", tab=self.tab) keyword = u"பதிப்பி" self.incr() print_stmt.exprlst.visit(self) self.decr() return def visit_eval_stmt(self, eval_stmt): tobj = Tag(self.file, name="EVALSTMT", tab=self.tab) self.incr() eval_stmt.expr.visit(self) self.decr() return def visit_arg_list(self, arg_list): tobj = Tag(self.file, name="ARGLIST", tab=self.tab) self.incr() L = len(arg_list.get_list()) for pos, arg in enumerate(arg_list.get_list()): if hasattr(arg, 'visit'): arg.visit(self) self.decr() return def visit_value_list(self, value_list): for value in value_list.args: value.visit(self) return def visit_function(self, fndecl_stmt): """ நிரல்பாகம் fibonacci_தமிழ்( x ) @( x <= 1 ) ஆனால் ஈ = 1 இல்லை ஈ = fibonacci_தமிழ்( x - 1 ) + fibonacci_தமிழ்( x - 2 ) முடி பின்கொடு ஈ முடி """ tobj = Tag(self.file, name="FUNCTION", tab=self.tab, attrs={"name": fndecl_stmt.name}) # Function kw keyword_fn = u"நிரல்பாகம்" # name of function # arglist expression fndecl_stmt.arglist.visit(self) # Body expression fndecl_stmt.body.visit(self) return def visit_binary_expr(self, binexpr): self.visit_expr(binexpr) return #### interface for CLI use #### def visualizeSourceFile(filename): parse_tree, ezhil_interpreter = get_ast(filename) svgfilename = filename.replace(".n", ".svg") GraphVisualizer(interpreter=ezhil_interpreter, filename=svgfilename) if __name__ == "__main__": if len(sys.argv) < 2: print("Usage: python ezhil_vsiualizer.py <srcfile1> <srcfile2> ...") sys.exit(255) for srcfile in sys.argv[1:]: print("processing =>", srcfile) visualizeSourceFile(srcfile)
arcturusannamalai/Ezhil-Lang
ezhil/ezhil_visualizer.py
Python
gpl-3.0
12,640
[ "VisIt" ]
3914da63c70c65394655b6319cde7e63ee9ff10ab3eb6a6029c55fdd9dc4d33f
""" ################################################################################ # # SOAPpy - Cayce Ullman (cayce@actzero.com) # Brian Matthews (blm@actzero.com) # Gregory Warnes (Gregory.R.Warnes@Pfizer.com) # Christopher Blunck (blunck@gst.com) # ################################################################################ # Copyright (c) 2003, Pfizer # Copyright (c) 2001, Cayce Ullman. # Copyright (c) 2001, Brian Matthews. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of actzero, inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $' from .version import __version__ #import xml.sax import socket import sys import socketserver from SOAPpy.Types import * import http.server import _thread # SOAPpy-py3 modules from .Parser import parseSOAPRPC from .Config import Config from SOAPpy.Types import faultType, voidType, simplify from .NS import NS from .SOAPBuilder import buildSOAP from .Utilities import debugHeader, debugFooter import collections try: from M2Crypto import SSL except: pass ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $' from .version import __version__ ################################################################################ # Call context dictionary ################################################################################ _contexts = dict() def GetSOAPContext(): global _contexts return _contexts[_thread.get_ident()] ################################################################################ # Server ################################################################################ # Method Signature class for adding extra info to registered funcs, right now # used just to indicate it should be called with keywords, instead of ordered # params. class MethodSig: def __init__(self, func, keywords=0, context=0): self.func = func self.keywords = keywords self.context = context self.__name__ = func.__name__ def __call__(self, *args, **kw): return self.func(*args, **kw) class SOAPContext: def __init__(self, header, body, attrs, xmldata, connection, httpheaders, soapaction): self.header = header self.body = body self.attrs = attrs self.xmldata = xmldata self.connection = connection self.httpheaders= httpheaders self.soapaction = soapaction # A class to describe how header messages are handled class HeaderHandler: # Initially fail out if there are any problems. def __init__(self, header, attrs): for i in list(header.__dict__.keys()): if i[0] == "_": continue d = getattr(header, i) try: fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')]) except: fault = 0 if fault: raise faultType("%s:MustUnderstand" % NS.ENV_T, "Required Header Misunderstood", "%s" % i) ################################################################################ # SOAP Server ################################################################################ class SOAPServerBase: def get_request(self): sock, addr = socketserver.TCPServer.get_request(self) if self.ssl_context: sock = SSL.Connection(self.ssl_context, sock) sock._setup_ssl(addr) if sock.accept_ssl() != 1: raise socket.error("Couldn't accept SSL connection") return sock, addr def registerObject(self, object, namespace = '', path = ''): if namespace == '' and path == '': namespace = self.namespace if namespace == '' and path != '': namespace = path.replace("/", ":") if namespace[0] == ":": namespace = namespace[1:] self.objmap[namespace] = object def registerFunction(self, function, namespace = '', funcName = None, path = ''): if not funcName : funcName = function.__name__ if namespace == '' and path == '': namespace = self.namespace if namespace == '' and path != '': namespace = path.replace("/", ":") if namespace[0] == ":": namespace = namespace[1:] if namespace in self.funcmap: self.funcmap[namespace][funcName] = function else: self.funcmap[namespace] = {funcName : function} def registerKWObject(self, object, namespace = '', path = ''): if namespace == '' and path == '': namespace = self.namespace if namespace == '' and path != '': namespace = path.replace("/", ":") if namespace[0] == ":": namespace = namespace[1:] for i in dir(object.__class__): if i[0] != "_" and isinstance(getattr(object, i), collections.Callable): self.registerKWFunction(getattr(object,i), namespace) # convenience - wraps your func for you. def registerKWFunction(self, function, namespace = '', funcName = None, path = ''): if namespace == '' and path == '': namespace = self.namespace if namespace == '' and path != '': namespace = path.replace("/", ":") if namespace[0] == ":": namespace = namespace[1:] self.registerFunction(MethodSig(function,keywords=1), namespace, funcName) def unregisterObject(self, object, namespace = '', path = ''): if namespace == '' and path == '': namespace = self.namespace if namespace == '' and path != '': namespace = path.replace("/", ":") if namespace[0] == ":": namespace = namespace[1:] del self.objmap[namespace] class SOAPRequestHandler(http.server.BaseHTTPRequestHandler): ignore_ext = True def version_string(self): return '<a href="http://pywebsvcs.sf.net">' + \ 'SOAPpy-py3 ' + __version__ + '</a> (Python ' + \ sys.version.split()[0] + ')' def date_time_string(self): self.__last_date_time_string = \ http.server.BaseHTTPRequestHandler.\ date_time_string(self) return self.__last_date_time_string def do_POST(self): global _contexts status = 500 try: if self.server.config.dumpHeadersIn: s = 'Incoming HTTP headers' debugHeader(s) print(self.raw_requestline.strip()) print("\n".join([x.strip() for x in self.headers.headers])) debugFooter(s) data = self.rfile.read(int(self.headers["Content-length"])) if self.server.config.dumpSOAPIn: s = 'Incoming SOAP' debugHeader(s) print(data, end=' ') if data[-1] != '\n': print() debugFooter(s) (r, header, body, attrs) = \ parseSOAPRPC(data, header = 1, body = 1, attrs = 1, ignore_ext=self.ignore_ext) method = r._name args = r._aslist() kw = r._asdict() if self.server.config.simplify_objects: args = simplify(args) kw = simplify(kw) # Handle mixed named and unnamed arguments by assuming # that all arguments with names of the form "v[0-9]+" # are unnamed and should be passed in numeric order, # other arguments are named and should be passed using # this name. # This is a non-standard exension to the SOAP protocol, # but is supported by Apache AXIS. # It is enabled by default. To disable, set # Config.specialArgs to False. ordered_args = {} named_args = {} if self.server.config.specialArgs: for (k,v) in list(kw.items()): if k.decode()[0]=="v": try: i = int(k[1:]) ordered_args[i] = v except ValueError: named_args[str(k.decode())] = v else: named_args[str(k.decode())] = v # We have to decide namespace precedence # I'm happy with the following scenario # if r._ns is specified use it, if not check for # a path, if it's specified convert it and use it as the # namespace. If both are specified, use r._ns. ns = r._ns if len(self.path) > 1 and not ns: ns = self.path.replace("/", ":") if ns[0] == ":": ns = ns[1:] # authorization method a = None keylist = list(ordered_args.keys()) keylist.sort() # create list in proper order w/o names tmp = [ordered_args[x] for x in keylist] ordered_args = tmp #print '<-> Argument Matching Yielded:' #print '<-> Ordered Arguments:' + str(ordered_args) #print '<-> Named Arguments :' + str(named_args) resp = "" # For fault messages if ns: nsmethod = "%s:%s" % (ns, method) else: nsmethod = method try: # First look for registered functions if ns in self.server.funcmap and \ method in self.server.funcmap[ns]: f = self.server.funcmap[ns][method] # look for the authorization method if self.server.config.authMethod != None: authmethod = self.server.config.authMethod if ns in self.server.funcmap and \ authmethod in self.server.funcmap[ns]: a = self.server.funcmap[ns][authmethod] else: # Now look at registered objects # Check for nested attributes. This works even if # there are none, because the split will return # [method] f = self.server.objmap[ns] # Look for the authorization method if self.server.config.authMethod != None: authmethod = self.server.config.authMethod if hasattr(f, authmethod): a = getattr(f, authmethod) # then continue looking for the method l = method.split(".") for i in l: f = getattr(f, i) except: info = sys.exc_info() try: resp = buildSOAP(faultType("%s:Client" % NS.ENV_T, "Method Not Found", "%s : %s %s %s" % (nsmethod, info[0], info[1], info[2])), encoding = self.server.encoding, config = self.server.config) finally: del info status = 500 else: try: if header: x = HeaderHandler(header, attrs) fr = 1 # call context book keeping # We're stuffing the method into the soapaction if there # isn't one, someday, we'll set that on the client # and it won't be necessary here # for now we're doing both if "SOAPAction".lower() not in list(self.headers.keys()) or \ self.headers["SOAPAction"] == "\"\"": self.headers["SOAPAction"] = method thread_id = _thread.get_ident() _contexts[thread_id] = SOAPContext(header, body, attrs, data, self.connection, self.headers, self.headers["SOAPAction"]) # Do an authorization check if a != None: if not a(*(), **{"_SOAPContext" : _contexts[thread_id] }): raise faultType("%s:Server" % NS.ENV_T, "Authorization failed.", "%s" % nsmethod) # If it's wrapped, some special action may be needed if isinstance(f, MethodSig): c = None if f.context: # retrieve context object c = _contexts[thread_id] if self.server.config.specialArgs: if c: named_args["_SOAPContext"] = c fr = f(*ordered_args, **named_args) elif f.keywords: # This is lame, but have to de-unicode # keywords strkw = {} for (k, v) in list(kw.items()): strkw[str(k)] = v if c: strkw["_SOAPContext"] = c fr = f(*(), **strkw) elif c: fr = f(*args, **{'_SOAPContext':c}) else: fr = f(*args, **{}) else: if self.server.config.specialArgs: fr = f(*ordered_args, **named_args) else: fr = f(*args, **{}) if type(fr) == type(self) and \ isinstance(fr, voidType): resp = buildSOAP(kw = {'%sResponse' % method: fr}, encoding = self.server.encoding, config = self.server.config) else: resp = buildSOAP(kw = {'%sResponse' % method: {'Result': fr}}, encoding = self.server.encoding, config = self.server.config) # Clean up _contexts if thread_id in _contexts: del _contexts[thread_id] except Exception as e: import traceback info = sys.exc_info() try: if self.server.config.dumpFaultInfo: s = 'Method %s exception' % nsmethod debugHeader(s) traceback.print_exception(info[0], info[1], info[2]) debugFooter(s) if isinstance(e, faultType): f = e else: f = faultType("%s:Server" % NS.ENV_T, "Method Failed", "%s" % nsmethod) if self.server.config.returnFaultInfo: f._setDetail("".join(traceback.format_exception( info[0], info[1], info[2]))) elif not hasattr(f, 'detail'): f._setDetail("%s %s" % (info[0], info[1])) finally: del info resp = buildSOAP(f, encoding = self.server.encoding, config = self.server.config) status = 500 else: status = 200 except faultType as e: import traceback info = sys.exc_info() try: if self.server.config.dumpFaultInfo: s = 'Received fault exception' debugHeader(s) traceback.print_exception(info[0], info[1], info[2]) debugFooter(s) if self.server.config.returnFaultInfo: e._setDetail("".join(traceback.format_exception( info[0], info[1], info[2]))) elif not hasattr(e, 'detail'): e._setDetail("%s %s" % (info[0], info[1])) finally: del info resp = buildSOAP(e, encoding = self.server.encoding, config = self.server.config) status = 500 except Exception as e: # internal error, report as HTTP server error if self.server.config.dumpFaultInfo: s = 'Internal exception %s' % e import traceback debugHeader(s) info = sys.exc_info() try: traceback.print_exception(info[0], info[1], info[2]) finally: del info debugFooter(s) self.send_response(500) self.end_headers() if self.server.config.dumpHeadersOut and \ self.request_version != 'HTTP/0.9': s = 'Outgoing HTTP headers' debugHeader(s) if status in self.responses: s = ' ' + self.responses[status][0] else: s = '' print("%s %d%s" % (self.protocol_version, 500, s)) print("Server:", self.version_string()) print("Date:", self.__last_date_time_string) debugFooter(s) else: # got a valid SOAP response self.send_response(status) t = 'text/xml'; if self.server.encoding != None: t += '; charset=%s' % self.server.encoding self.send_header("Content-type", t) self.send_header("Content-length", str(len(resp))) self.end_headers() if self.server.config.dumpHeadersOut and \ self.request_version != 'HTTP/0.9': s = 'Outgoing HTTP headers' debugHeader(s) if status in self.responses: s = ' ' + self.responses[status][0] else: s = '' print("%s %d%s" % (self.protocol_version, status, s)) print("Server:", self.version_string()) print("Date:", self.__last_date_time_string) print("Content-type:", t) print("Content-length:", len(resp)) debugFooter(s) if self.server.config.dumpSOAPOut: s = 'Outgoing SOAP' debugHeader(s) print(resp, end=' ') if resp[-1] != '\n': print() debugFooter(s) self.wfile.write(resp) self.wfile.flush() # We should be able to shut down both a regular and an SSL # connection, but under Python 2.1, calling shutdown on an # SSL connections drops the output, so this work-around. # This should be investigated more someday. if self.server.config.SSLserver and \ isinstance(self.connection, SSL.Connection): self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN | SSL.SSL_RECEIVED_SHUTDOWN) else: self.connection.shutdown(1) def do_GET(self): #print 'command ', self.command #print 'path ', self.path #print 'request_version', self.request_version #print 'headers' #print ' type ', self.headers.type #print ' maintype', self.headers.maintype #print ' subtype ', self.headers.subtype #print ' params ', self.headers.plist path = self.path.lower() if path.endswith('wsdl'): method = 'wsdl' function = namespace = None if namespace in self.server.funcmap \ and method in self.server.funcmap[namespace]: function = self.server.funcmap[namespace][method] else: if namespace in list(self.server.objmap.keys()): function = self.server.objmap[namespace] l = method.split(".") for i in l: function = getattr(function, i) if function: self.send_response(200) self.send_header("Content-type", 'text/plain') self.end_headers() response = function(*()) self.wfile.write(str(response)) return # return error self.send_response(200) self.send_header("Content-type", 'text/html') self.end_headers() self.wfile.write('''\ <title> <head>Error!</head> </title> <body> <h1>Oops!</h1> <p> This server supports HTTP GET requests only for the the purpose of obtaining Web Services Description Language (WSDL) for a specific service. Either you requested an URL that does not end in "wsdl" or this server does not implement a wsdl method. </p> </body>''') def log_message(self, format, *args): if self.server.log: http.server.BaseHTTPRequestHandler.\ log_message (self, format, *args) class SOAPInsecureRequestHandler(http.server.BaseHTTPRequestHandler): '''Request handler that does load POSTed doctypes''' ignore_ext = False class SOAPServer(SOAPServerBase, socketserver.TCPServer): def __init__(self, addr = ('localhost', 8000), RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8', config = Config, namespace = None, ssl_context = None): # Test the encoding, raising an exception if it's not known if encoding != None: ''.encode(encoding) if ssl_context != None and not config.SSLserver: raise AttributeError("SSL server not supported by this Python installation") self.namespace = namespace self.objmap = {} self.funcmap = {} self.ssl_context = ssl_context self.encoding = encoding self.config = config self.log = log self.allow_reuse_address= 1 socketserver.TCPServer.__init__(self, addr, RequestHandler) class ThreadingSOAPServer(SOAPServerBase, socketserver.ThreadingTCPServer): def __init__(self, addr = ('localhost', 8000), RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8', config = Config, namespace = None, ssl_context = None): # Test the encoding, raising an exception if it's not known if encoding != None: ''.encode(encoding) if ssl_context != None and not config.SSLserver: raise AttributeError("SSL server not supported by this Python installation") self.namespace = namespace self.objmap = {} self.funcmap = {} self.ssl_context = ssl_context self.encoding = encoding self.config = config self.log = log self.allow_reuse_address= 1 socketserver.ThreadingTCPServer.__init__(self, addr, RequestHandler) # only define class if Unix domain sockets are available if hasattr(socket, "AF_UNIX"): class SOAPUnixSocketServer(SOAPServerBase, socketserver.UnixStreamServer): def __init__(self, addr = 8000, RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8', config = Config, namespace = None, ssl_context = None): # Test the encoding, raising an exception if it's not known if encoding != None: ''.encode(encoding) if ssl_context != None and not config.SSLserver: raise AttributeError("SSL server not supported by this Python installation") self.namespace = namespace self.objmap = {} self.funcmap = {} self.ssl_context = ssl_context self.encoding = encoding self.config = config self.log = log self.allow_reuse_address= 1 socketserver.UnixStreamServer.__init__(self, str(addr), RequestHandler)
cmsdaq/hltd
lib/SOAPpy-py3-0.52.24/src/SOAPpy/Server.py
Python
lgpl-3.0
26,981
[ "Brian" ]
5ae144fb83e328c88cb8654ba646880f50f9d3dd8b3d5116eb59ac6d437dd567
## # Copyright 2009-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing WIEN2k, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) @author: Michael Sluydts (Ghent University) """ import fileinput import os import re import shutil import sys import tempfile from distutils.version import LooseVersion import easybuild.tools.environment as env import easybuild.tools.toolchain as toolchain from easybuild.framework.easyblock import EasyBlock from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import extract_file, mkdir, read_file, rmtree2, write_file from easybuild.tools.modules import get_software_root, get_software_version from easybuild.tools.run import run_cmd, run_cmd_qa class EB_WIEN2k(EasyBlock): """Support for building/installing WIEN2k.""" def __init__(self,*args,**kwargs): """Enable building in install dir.""" super(EB_WIEN2k, self).__init__(*args, **kwargs) self.build_in_installdir = True @staticmethod def extra_options(): testdata_urls = ["http://www.wien2k.at/reg_user/benchmark/test_case.tar.gz", "http://www.wien2k.at/reg_user/benchmark/mpi-benchmark.tar.gz"] extra_vars = { 'runtest': [True, "Run WIEN2k tests", CUSTOM], 'testdata': [testdata_urls, "test data URL for WIEN2k benchmark test", CUSTOM], 'wien_mpirun': [None, "MPI wrapper command to use", CUSTOM], 'remote': [None, "Remote command to use (e.g. pbsssh, ...)", CUSTOM], 'use_remote': [True, "Whether to remotely login to initiate the k-point parallellization calls", CUSTOM], 'mpi_remote': [False, "Whether to initiate MPI calls locally or remotely", CUSTOM], 'wien_granularity': [True, "Granularity for parallel execution (see manual)", CUSTOM], 'taskset': [None, "Specifies an optional command for binding a process to a specific core", CUSTOM], } return EasyBlock.extra_options(extra_vars) def extract_step(self): """Unpack WIEN2k sources using gunzip and provided expand_lapw script.""" super(EB_WIEN2k, self).extract_step() cmd = "gunzip *gz" run_cmd(cmd, log_all=True, simple=True) cmd = "./expand_lapw" qanda = {'continue (y/n)': 'y'} no_qa = [ 'tar -xf.*', '.*copied and linked.*', ] run_cmd_qa(cmd, qanda, no_qa=no_qa, log_all=True, simple=True) def configure_step(self): """Configure WIEN2k build by patching siteconfig_lapw script and running it.""" self.cfgscript = "siteconfig_lapw" # patch config file first # toolchain-dependent values comp_answer = None if self.toolchain.comp_family() == toolchain.INTELCOMP: #@UndefinedVariable if LooseVersion(get_software_version("icc")) >= LooseVersion("2011"): comp_answer = 'I' # Linux (Intel ifort 12.0 compiler + mkl ) else: comp_answer = "K1" # Linux (Intel ifort 11.1 compiler + mkl ) elif self.toolchain.comp_family() == toolchain.GCC: #@UndefinedVariable comp_answer = 'V' # Linux (gfortran compiler + gotolib) else: raise EasyBuildError("Failed to determine toolchain-dependent answers.") # libraries rlibs = "%s %s" % (os.getenv('LIBLAPACK_MT'), self.toolchain.get_flag('openmp')) rplibs = [os.getenv('LIBSCALAPACK_MT'), os.getenv('LIBLAPACK_MT')] fftwver = get_software_version('FFTW') if fftwver: suff = '' if LooseVersion(fftwver) >= LooseVersion("3"): suff = '3' rplibs.insert(0, "-lfftw%(suff)s_mpi -lfftw%(suff)s" % {'suff': suff}) else: rplibs.append(os.getenv('LIBFFT')) rplibs = ' '.join(rplibs) d = { 'FC': '%s %s' % (os.getenv('F90'), os.getenv('FFLAGS')), 'MPF': "%s %s" % (os.getenv('MPIF90'), os.getenv('FFLAGS')), 'CC': os.getenv('CC'), 'LDFLAGS': '$(FOPT) %s ' % os.getenv('LDFLAGS'), 'R_LIBS': rlibs, # libraries for 'real' (not 'complex') binary 'RP_LIBS' : rplibs, # libraries for 'real' parallel binary 'MPIRUN': '', } for line in fileinput.input(self.cfgscript, inplace=1, backup='.orig'): # set config parameters for (k,v) in d.items(): regexp = re.compile('^([a-z0-9]+):%s:.*' % k) res = regexp.search(line) if res: # we need to exclude the lines with 'current', otherwise we break the script if not res.group(1) == "current": line = regexp.sub('\\1:%s:%s' % (k, v), line) # avoid exit code > 0 at end of configuration line = re.sub('(\s+)exit 1', '\\1exit 0', line) sys.stdout.write(line) # set correct compilers env.setvar('bin', os.getcwd()) dc = { 'COMPILERC': os.getenv('CC'), 'COMPILER': os.getenv('F90'), 'COMPILERP': os.getenv('MPIF90'), } for (k, v) in dc.items(): write_file(k, v) # configure with patched configure script self.log.debug('%s part I (configure)' % self.cfgscript) cmd = "./%s" % self.cfgscript qanda = { 'Press RETURN to continue': '', 'Your compiler:': '', 'Hit Enter to continue': '', 'Remote shell (default is ssh) =': '', 'and you need to know details about your installed mpi ..) (y/n)': 'y', 'Q to quit Selection:': 'Q', 'A Compile all programs (suggested) Q Quit Selection:': 'Q', ' Please enter the full path of the perl program: ': '', 'continue or stop (c/s)': 'c', '(like taskset -c). Enter N / your_specific_command:': 'N', } if LooseVersion(self.version) >= LooseVersion("13"): fftw_root = get_software_root('FFTW') if fftw_root: fftw_maj = get_software_version('FFTW').split('.')[0] fftw_spec = 'FFTW%s' % fftw_maj else: raise EasyBuildError("Required FFTW dependency is missing") qanda.update({ '(not updated) Selection:': comp_answer, 'Shared Memory Architecture? (y/N):': 'N', 'Set MPI_REMOTE to 0 / 1:': '0', 'You need to KNOW details about your installed MPI and FFTW ) (y/n)': 'y', 'Please specify whether you want to use FFTW3 (default) or FFTW2 (FFTW3 / FFTW2):' : fftw_spec, 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3):' : fftw_root, 'is this correct? enter Y (default) or n:' : 'Y', }) else: qanda.update({ 'compiler) Selection:': comp_answer, 'Shared Memory Architecture? (y/n):': 'n', 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', 'Do you have MPI and Scalapack installed and intend to run ' \ 'finegrained parallel? (This is usefull only for BIG cases ' \ '(50 atoms and more / unit cell) and you need to know details ' \ 'about your installed mpi and fftw ) (y/n)': 'y', }) no_qa = [ 'You have the following mkl libraries in %s :' % os.getenv('MKLROOT'), "%s[ \t]*.*" % os.getenv('MPIF90'), "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), ".*SRC_.*", "Please enter the full path of the perl program:", ] std_qa = { r'S\s+Save and Quit[\s\n]+To change an item select option.[\s\n]+Selection:': 'S', 'Recommended setting for parallel f90 compiler: .* Current selection: Your compiler:': os.getenv('MPIF90'), } run_cmd_qa(cmd, qanda, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) # post-configure patches parallel_options = {} parallel_options_fp = os.path.join(self.cfg['start_dir'], 'parallel_options') if self.cfg['wien_mpirun']: parallel_options.update({'WIEN_MPIRUN': self.cfg['wien_mpirun']}) if self.cfg['taskset'] is None: self.cfg['taskset'] = 'no' parallel_options.update({'TASKSET': self.cfg['taskset']}) for opt in ['use_remote', 'mpi_remote', 'wien_granularity']: parallel_options.update({opt.upper(): int(self.cfg[opt])}) write_file(parallel_options_fp, '\n'.join(['setenv %s "%s"' % tup for tup in parallel_options.items()])) if self.cfg['remote']: if self.cfg['remote'] == 'pbsssh': extratxt = '\n'.join([ '', "set remote = pbsssh", "setenv PBSSSHENV 'LD_LIBRARY_PATH PATH'", '', ]) write_file(parallel_options_fp, extratxt, append=True) else: raise EasyBuildError("Don't know how to handle remote %s", self.cfg['remote']) self.log.debug("Patched file %s: %s", parallel_options_fp, read_file(parallel_options_fp)) def build_step(self): """Build WIEN2k by running siteconfig_lapw script again.""" self.log.debug('%s part II (build_step)' % self.cfgscript) cmd = "./%s" % self.cfgscript qanda = { 'L Perl path (if not in /usr/bin/perl) Q Quit Selection:': 'R', 'A Compile all programs S Select program Q Quit Selection:': 'A', 'Press RETURN to continue': '\nQ', # also answer on first qanda pattern with 'Q' to quit ' Please enter the full path of the perl program: ':'', } no_qa = [ "%s[ \t]*.*" % os.getenv('MPIF90'), "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), "mv[ \t]*.*", ".*SRC_.*", ".*: warning .*", ".*Stop.", "Compile time errors (if any) were:", "Please enter the full path of the perl program:", ] self.log.debug("no_qa for %s: %s" % (cmd, no_qa)) run_cmd_qa(cmd, qanda, no_qa=no_qa, log_all=True, simple=True) def test_step(self): """Run WIEN2k test benchmarks. """ def run_wien2k_test(cmd_arg): """Run a WPS command, and check for success.""" cmd = "x_lapw lapw1 %s" % cmd_arg (out, _) = run_cmd(cmd, log_all=True, simple=False) re_success = re.compile("LAPW1\s+END") if not re_success.search(out): raise EasyBuildError("Test '%s' in %s failed (pattern '%s' not found)?", cmd, os.getcwd(), re_success.pattern) else: self.log.info("Test '%s' seems to have run successfully: %s" % (cmd, out)) if self.cfg['runtest']: if not self.cfg['testdata']: raise EasyBuildError("List of URLs for testdata not provided.") # prepend $PATH with install directory, define $SCRATCH which is used by the tests env.setvar('PATH', "%s:%s" % (self.installdir, os.environ['PATH'])) try: cwd = os.getcwd() # create temporary directory tmpdir = tempfile.mkdtemp() os.chdir(tmpdir) self.log.info("Running tests in %s" % tmpdir) scratch = os.path.join(tmpdir, 'scratch') mkdir(scratch) env.setvar('SCRATCH', scratch) # download data testdata_paths = {} for testdata in self.cfg['testdata']: td_path = self.obtain_file(testdata) if not td_path: raise EasyBuildError("Downloading file from %s failed?", testdata) testdata_paths.update({os.path.basename(testdata): td_path}) self.log.debug('testdata_paths: %s' % testdata_paths) # unpack serial benchmark serial_test_name = "test_case" extract_file(testdata_paths['%s.tar.gz' % serial_test_name], tmpdir) # run serial benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) run_wien2k_test("-c") # unpack parallel benchmark (in serial benchmark dir) parallel_test_name = "mpi-benchmark" extract_file(testdata_paths['%s.tar.gz' % parallel_test_name], tmpdir) # run parallel benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) run_wien2k_test("-p") os.chdir(cwd) rmtree2(tmpdir) except OSError, err: raise EasyBuildError("Failed to run WIEN2k benchmark tests: %s", err) self.log.debug("Current dir: %s" % os.getcwd()) def test_cases_step(self): """Run test cases, if specified.""" for test in self.cfg['tests']: # check expected format if not len(test) == 4: raise EasyBuildError("WIEN2k test case not specified in expected format: " "(testcase_name, init_lapw_args, run_lapw_args, [scf_regexp_pattern])") test_name = test[0] init_args = test[1] run_args = test[2] scf_regexp_patterns = test[3] try: cwd = os.getcwd() # WIEN2k enforces that working dir has same name as test case tmpdir = os.path.join(tempfile.mkdtemp(), test_name) scratch = os.path.join(tmpdir, 'scratch') mkdir(scratch, parents=True) env.setvar('SCRATCH', scratch) os.chdir(tmpdir) self.log.info("Running test case %s in %s" % (test_name, tmpdir)) except OSError, err: raise EasyBuildError("Failed to create temporary directory for test %s: %s", test_name, err) # try and find struct file for test test_fp = self.obtain_file("%s.struct" % test_name) try: shutil.copy2(test_fp, tmpdir) except OSError, err: raise EasyBuildError("Failed to copy %s: %s", test_fp, err) # run test cmd = "init_lapw %s" % init_args run_cmd(cmd, log_all=True, simple=True) cmd = "run_lapw %s" % run_args run_cmd(cmd, log_all=True, simple=True) # check output scf_fn = "%s.scf" % test_name self.log.debug("Checking output of test %s in %s" % (str(test), scf_fn)) scftxt = read_file(scf_fn) for regexp_pat in scf_regexp_patterns: regexp = re.compile(regexp_pat, re.M) if not regexp.search(scftxt): raise EasyBuildError("Failed to find pattern %s in %s", regexp.pattern, scf_fn) else: self.log.debug("Found pattern %s in %s" % (regexp.pattern, scf_fn)) # cleanup try: os.chdir(cwd) rmtree2(tmpdir) except OSError, err: raise EasyBuildError("Failed to clean up temporary test dir: %s", err) def install_step(self): """Fix broken symlinks after build/installation.""" # fix broken symlink os.remove(os.path.join(self.installdir, "SRC_w2web", "htdocs", "usersguide")) os.symlink(os.path.join(self.installdir, "SRC_usersguide_html"), os.path.join(self.installdir, "SRC_w2web","htdocs", "usersguide")) def sanity_check_step(self): """Custom sanity check for WIEN2k.""" lapwfiles = [] for suffix in ['0', '0_mpi', '1', '1_mpi', '1c', '1c_mpi', '2', '2_mpi', '2c' ,'2c_mpi', '3', '3c', '5', '5c', '7', '7c', 'dm', 'dmc', 'so']: p = os.path.join(self.installdir, "lapw%s" % suffix) lapwfiles.append(p) custom_paths = { 'files': lapwfiles, 'dirs': [], } super(EB_WIEN2k, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Set WIENROOT environment variable, and correctly prepend PATH.""" txt = super(EB_WIEN2k, self).make_module_extra() txt += self.module_generator.set_environment("WIENROOT", self.installdir) txt += self.module_generator.prepend_paths("PATH", [""]) return txt
ULHPC/modules
easybuild/easybuild-easyblocks/easybuild/easyblocks/w/wien2k.py
Python
mit
18,274
[ "WIEN2k" ]
73e6783c478058a3ab7fe3eeb9e555378fc82f89638e3778994d5463b325d438
from .posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dtrtri, pdinv from ...util import diag import numpy as np from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) class PEP(LatentFunctionInference): ''' Sparse Gaussian processes using Power-Expectation Propagation for regression: alpha \approx 0 gives VarDTC and alpha = 1 gives FITC Reference: A Unifying Framework for Sparse Gaussian Process Approximation using Power Expectation Propagation, https://arxiv.org/abs/1605.07066 ''' const_jitter = 1e-6 def __init__(self, alpha): super(PEP, self).__init__() self.alpha = alpha def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): assert mean_function is None, "inference with a mean function not implemented" num_inducing, _ = Z.shape num_data, output_dim = Y.shape #make sure the noise is not hetero sigma_n = likelihood.gaussian_variance(Y_metadata) if sigma_n.size >1: raise NotImplementedError("no hetero noise with this implementation of PEP") Kmm = kern.K(Z) Knn = kern.Kdiag(X) Knm = kern.K(X, Z) U = Knm #factor Kmm diag.add(Kmm, self.const_jitter) Kmmi, L, Li, _ = pdinv(Kmm) #compute beta_star, the effective noise precision LiUT = np.dot(Li, U.T) sigma_star = sigma_n + self.alpha * (Knn - np.sum(np.square(LiUT),0)) beta_star = 1./sigma_star # Compute and factor A A = tdot(LiUT*np.sqrt(beta_star)) + np.eye(num_inducing) LA = jitchol(A) # back substitute to get b, P, v URiy = np.dot(U.T*beta_star,Y) tmp, _ = dtrtrs(L, URiy, lower=1) b, _ = dtrtrs(LA, tmp, lower=1) tmp, _ = dtrtrs(LA, b, lower=1, trans=1) v, _ = dtrtrs(L, tmp, lower=1, trans=1) tmp, _ = dtrtrs(LA, Li, lower=1, trans=0) P = tdot(tmp.T) alpha_const_term = (1.0-self.alpha) / self.alpha #compute log marginal log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \ -np.sum(np.log(np.diag(LA)))*output_dim + \ 0.5*output_dim*(1+alpha_const_term)*np.sum(np.log(beta_star)) + \ -0.5*np.sum(np.square(Y.T*np.sqrt(beta_star))) + \ 0.5*np.sum(np.square(b)) + 0.5*alpha_const_term*num_data*np.log(sigma_n) #compute dL_dR Uv = np.dot(U, v) dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - (1.0+alpha_const_term)/beta_star + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) \ + np.sum(np.square(Uv), 1))*beta_star**2 # Compute dL_dKmm vvT_P = tdot(v.reshape(-1,1)) + P dL_dK = 0.5*(Kmmi - vvT_P) KiU = np.dot(Kmmi, U.T) dL_dK += self.alpha * np.dot(KiU*dL_dR, KiU.T) # Compute dL_dU vY = np.dot(v.reshape(-1,1),Y.T) dL_dU = vY - np.dot(vvT_P, U.T) dL_dU *= beta_star dL_dU -= self.alpha * 2.*KiU*dL_dR dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) dL_dthetaL += 0.5*alpha_const_term*num_data / sigma_n grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR * self.alpha, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) return post, log_marginal, grad_dict
befelix/GPy
GPy/inference/latent_function_inference/pep.py
Python
bsd-3-clause
3,521
[ "Gaussian" ]
2b611ccb6e7ff5b0aca0b0e98539815ee1cd0a39310c271ccf071262107a21e4
# -*- coding: utf-8 -*- # GromacsWrapper # Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com> # Released under the GNU Public License 3 (or higher, your choice) # See the file COPYING for details. from __future__ import division, absolute_import, print_function import pytest from numpy.testing import assert_equal, assert_almost_equal from gromacs.fileformats import convert @pytest.mark.parametrize( 's,expected', [(100, 100), ("Jabberwock", u"Jabberwock"), (u"Ångström", u"Ångström"), ] ) def test_to_unicode(s, expected): output = convert.to_unicode(s) assert output == expected class TestAutoconverter(object): def _convert(self, s, **kwargs): ac = convert.Autoconverter(**kwargs) assert ac.active is True return ac.convert(s) @pytest.mark.parametrize( "s,expected", [ ('foo bar 22 boing ---', ('foo', 'bar', 22, 'boing', None)), ('1 2 3 4', (1, 2, 3, 4)), ('1 2 3 4', (1, 2, 3, 4)), ('True x X yes Present', (True, True, True, True, True)), ('False no - None none', (False, False, False, False, False)) ], ) @pytest.mark.parametrize('sep', (True, None)) def test_convert_default(self, s, expected, sep): output = self._convert(s, sep=sep) assert_equal(output, expected) @pytest.mark.parametrize( "s,expected", [ ('1,2,3,4', (1, 2, 3, 4)), ('1 2,3,4', ('1 2', 3, 4)), ] ) def test_convert_default_sep(self, s, expected, sep=','): output = self._convert(s, sep=sep) assert_equal(output, expected) @pytest.mark.parametrize( "s,expected", [ ('2.71213 3.14', (2.71213, 3.14)), ('1000 -234 987654', (1000, -234, 987654)), ] ) @pytest.mark.parametrize('sep', (True, None)) def test_convert_numbers(self, s, expected, sep): output = self._convert(s, sep=sep) assert_almost_equal(output, expected)
Becksteinlab/GromacsWrapper
tests/fileformats/test_convert.py
Python
gpl-3.0
2,029
[ "Gromacs" ]
14f477a227e1db3335cb1cb024abe61aa0b0b060c1626dbd452db01dbb10dc32
# -*- coding: utf-8 -*- # Copyright 2013 splinter authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. from urllib.request import urlopen import unittest from splinter import Browser from .fake_webapp import EXAMPLE_APP from .base import WebDriverTests import pytest def selenium_server_is_running(): try: from splinter.driver.webdriver.remote import WebDriver page_contents = urlopen(WebDriver.DEFAULT_URL).read() except IOError: return False return "WebDriver Hub" in page_contents class RemoteBrowserFirefoxTest(WebDriverTests, unittest.TestCase): @pytest.fixture(autouse=True, scope='class') def setup_browser(self, request): request.cls.browser = Browser('remote', browser='firefox') request.addfinalizer(request.cls.browser.quit) def setUp(self): self.browser.visit(EXAMPLE_APP) def test_support_with_statement(self): "Remote should support with statement" with Browser("remote"): pass def test_should_be_able_to_change_user_agent(self): "Remote should not support custom user agent" pass class RemoteBrowserChromeTest(WebDriverTests, unittest.TestCase): @pytest.fixture(autouse=True, scope='class') def setup_browser(self, request): request.cls.browser = Browser('remote', browser='chrome') request.addfinalizer(request.cls.browser.quit) def setUp(self): self.browser.visit(EXAMPLE_APP) def test_support_with_statement(self): "Remote should support with statement" with Browser("remote"): pass def test_should_be_able_to_change_user_agent(self): "Remote should not support custom user agent" pass
cobrateam/splinter
tests/test_webdriver_remote.py
Python
bsd-3-clause
1,813
[ "VisIt" ]
4c8f48e9bd49b497493a03032907d0c67ff937eac6adaf66415b4ffcf8b43c49
import pymol from pymol import stored from pymol import cmd, CmdException cmd=pymol.cmd input_file='4oe9.pdb' cmd.load( input_file , '4oe9' )
S-John-S/MAT
sb_script.py
Python
mit
144
[ "PyMOL" ]
ab3fc5e42c6ecb9c868be7edabb3394a640fb12098fc3255179c7bdef82f844e
__author__ = 'noe' import unittest import numpy as np import time from bhmm import hidden from bhmm.output_models.gaussian import GaussianOutputModel print_speedup = False class TestHidden(unittest.TestCase): def setUp(self): self.nexamples = 0 self.A = [] self.pi = [] self.pobs = [] self.T = [] self.N = [] self.logprob = [] self.alpha = [] self.time_alpha = [] self.beta = [] self.time_beta = [] self.gamma = [] self.time_gamma = [] self.c = [] self.time_c = [] self.C = [] self.time_C = [] self.vpath = [] self.time_vpath = [] self.alpha_mem = [] self.beta_mem = [] self.gamma_mem = [] self.C_mem = [] # first toy example A = np.array([[0.9, 0.1], [0.1, 0.9]]) pi = np.array([0.5, 0.5]) pobs = np.array([[0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.1, 0.9], [0.5, 0.5], [0.9, 0.1], [0.9, 0.1], [0.9, 0.1], [0.9, 0.1], [0.9, 0.1]]) self.append_example(A, pi, pobs) # second example A = np.array([[0.97, 0.02, 0.01], [0.1, 0.8, 0.1], [0.01, 0.02, 0.97]]) pi = np.array([0.45, 0.1, 0.45]) T = 10000 means = np.array([-1.0, 0.0, 1.0]) sigmas = np.array([0.5, 0.5, 0.5]) gom = GaussianOutputModel(3, means=means, sigmas=sigmas) obs = np.random.randint(3, size=T) pobs = gom.p_obs(obs) self.append_example(A, pi, pobs) def append_example(self, A, pi, pobs): i = len(self.A) self.A.append(A) self.pi.append(pi) self.pobs.append(pobs) self.N.append(A.shape[0]) self.T.append(pobs.shape[0]) (logprob, alpha, t) = self.run_forward(i, 'python', None) self.logprob.append(logprob) self.alpha.append(alpha) self.time_alpha.append(t) (beta, t) = self.run_backward(i, 'python', None) self.beta.append(beta) self.time_beta.append(t) (gamma, t) = self.run_gamma(i, 'python', None) self.gamma.append(gamma) self.time_gamma.append(t) (c, t) = self.run_state_counts(i, 'python', None) self.c.append(c) self.time_c.append(t) (C, t) = self.run_transition_counts(i, 'python', None) self.C.append(C) self.time_C.append(t) (vpath, t) = self.run_viterbi(i, 'python', None) self.vpath.append(vpath) self.time_vpath.append(t) # self.alpha_mem.append(np.zeros((pobs.shape[0],A.shape[0]))) self.beta_mem.append(np.zeros((pobs.shape[0],A.shape[0]))) self.gamma_mem.append(np.zeros((pobs.shape[0],A.shape[0]))) self.C_mem.append(np.zeros((A.shape[0],A.shape[0]))) self.nexamples += 1 def run_all(self, A, pobs, pi): # forward logprob, alpha = hidden.forward(A, pobs, pi) # backward beta = hidden.backward(A, pobs) # gamma gamma = hidden.state_probabilities(alpha, beta) # state counts T = pobs.shape[0] statecount = hidden.state_counts(gamma, T) # transition counts C = hidden.transition_counts(alpha, beta, A, pobs) # viterbi path vpath = hidden.viterbi(A, pobs, pi) # return return (logprob, alpha, beta, gamma, statecount, C, vpath) def run_all_mem(self, A, pobs, pi): T = pobs.shape[0] N = A.shape[0] alpha = np.zeros( (T,N) ) beta = np.zeros( (T,N) ) gamma = np.zeros( (T,N) ) C = np.zeros( (N,N) ) logprob, alpha = hidden.forward(A, pobs, pi, alpha_out = alpha) # backward hidden.backward(A, pobs, beta_out = beta) # gamma hidden.state_probabilities(alpha, beta, gamma_out = gamma) # state counts statecount = hidden.state_counts(gamma, T) # transition counts hidden.transition_counts(alpha, beta, A, pobs, out=self.C) # viterbi path vpath = hidden.viterbi(A, pobs, pi) # return return (logprob, alpha, beta, gamma, statecount, C, vpath) def tearDown(self): pass def run_forward(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) logprob = 0 alpha = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): logprob, alpha = hidden.forward(self.A[i], self.pobs[i], self.pi[i], alpha_out=out) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (logprob, alpha, d) def run_backward(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) beta = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): beta = hidden.backward(self.A[i], self.pobs[i], beta_out=out) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (beta, d) def run_gamma(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) gamma = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): gamma = hidden.state_probabilities(self.alpha[i], self.beta[i], gamma_out=out) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (gamma, d) def run_state_counts(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) c = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): c = hidden.state_counts(self.gamma[i], self.T[i]) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (c, d) def run_transition_counts(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) C = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): C = hidden.transition_counts(self.alpha[i], self.beta[i], self.A[i], self.pobs[i], out=out) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (C, d) def run_viterbi(self, i, kernel, out): nrep = max(1, int(10000/self.T[i])) vpath = None hidden.set_implementation(kernel) time1 = time.time() for k in range(nrep): vpath = hidden.viterbi(self.A[i], self.pobs[i], self.pi[i]) # compare time2 = time.time() d = (time2-time1)/(1.0*nrep) return (vpath, d) def run_abs(self, call, kernel): """ Reference. Just computes the time """ for i in range(self.nexamples): res = call(i, kernel, None) if (print_speedup): print('\t'+str(call.__name__)+'\t Example '+str(i)+'\t Impl = '+str(kernel)+' Time = '+str(res[-1])) def run_comp(self, call, kernel, outs, refs, reftime): """ Reference. Just computes the time """ for i in range(self.nexamples): if (outs is None): res = call(i, kernel, None) else: res = call(i, kernel, outs[i]) for j in range(len(res)-1): myres = res[j] refres = refs[j][i] self.assertTrue(np.allclose(myres, refres)) if outs is None: pkernel = kernel else: pkernel = kernel + ' mem' if (print_speedup): print('\t'+str(call.__name__)+'\t Example '+str(i)+'\t Impl = '+pkernel+' Speedup = '+str(reftime[i]/res[-1])) def test_forward_p(self): self.run_abs(self.run_forward, 'python') def test_forward_p_mem(self): self.run_comp(self.run_forward, 'python', self.alpha_mem, [self.logprob, self.alpha], self.time_alpha) def test_forward_c(self): self.run_comp(self.run_forward, 'c', None, [self.logprob, self.alpha], self.time_alpha) def test_forward_c_mem(self): self.run_comp(self.run_forward, 'c', self.alpha_mem, [self.logprob, self.alpha], self.time_alpha) def test_backward_p(self): self.run_abs(self.run_backward, 'python') def test_backward_p_mem(self): self.run_comp(self.run_backward, 'python', self.beta_mem, [self.beta], self.time_beta) def test_backward_c(self): self.run_comp(self.run_backward, 'c', None, [self.beta], self.time_beta) def test_backward_c_mem(self): self.run_comp(self.run_backward, 'c', self.beta_mem, [self.beta], self.time_beta) def test_gamma_p(self): self.run_abs(self.run_gamma, 'python') def test_gamma_p_mem(self): self.run_comp(self.run_gamma, 'python', self.gamma_mem, [self.gamma], self.time_gamma) def test_gamma_c(self): self.run_comp(self.run_gamma, 'c', None, [self.gamma], self.time_gamma) def test_gamma_c_mem(self): self.run_comp(self.run_gamma, 'c', self.gamma_mem, [self.gamma], self.time_gamma) def test_state_counts_p(self): self.run_abs(self.run_state_counts, 'python') def test_state_counts_p_mem(self): self.run_comp(self.run_state_counts, 'python', None, [self.c], self.time_c) def test_state_counts_c(self): self.run_comp(self.run_state_counts, 'c', None, [self.c], self.time_c) def test_state_counts_c_mem(self): self.run_comp(self.run_state_counts, 'c', None, [self.c], self.time_c) def test_transition_counts_p(self): self.run_abs(self.run_transition_counts, 'python') def test_transition_counts_p_mem(self): self.run_comp(self.run_transition_counts, 'python', self.C_mem, [self.C], self.time_C) def test_transition_counts_c(self): self.run_comp(self.run_transition_counts, 'c', None, [self.C], self.time_C) def test_transition_counts_c_mem(self): self.run_comp(self.run_transition_counts, 'c', self.C_mem, [self.C], self.time_C) def test_viterbi_p(self): self.run_abs(self.run_viterbi, 'python') def test_viterbi_p_mem(self): self.run_comp(self.run_viterbi, 'python', None, [self.vpath], self.time_vpath) def test_viterbi_c(self): self.run_comp(self.run_viterbi, 'c', None, [self.vpath], self.time_vpath) def test_viterbi_c_mem(self): self.run_comp(self.run_viterbi, 'c', None, [self.vpath], self.time_vpath) def test_fbtime_p_mem(self): for i in range(self.nexamples): ttot = 0.0 (logprob, alpha, t) = self.run_forward(i, 'python', self.alpha_mem[i]) ttot += t (beta, t) = self.run_backward(i, 'python', self.beta_mem[i]) ttot += t (gamma, t) = self.run_gamma(i, 'python', self.gamma_mem[i]) ttot += t (c, t) = self.run_state_counts(i, 'python', None) ttot += t (C, t) = self.run_transition_counts(i, 'python', self.C_mem[i]) ttot += t tref = self.time_alpha[i] + self.time_beta[i] + self.time_gamma[i] + self.time_c[i] + self.time_C[i] if (print_speedup): print ('TOTAL speedup forward-backward example '+str(i)+'\t impl=python mem: \t'+str(tref/ttot)) def test_fbtime_c(self): for i in range(self.nexamples): ttot = 0.0 (logprob, alpha, t) = self.run_forward(i, 'c', None) ttot += t (beta, t) = self.run_backward(i, 'c', None) ttot += t (gamma, t) = self.run_gamma(i, 'c', None) ttot += t (c, t) = self.run_state_counts(i, 'c', None) ttot += t (C, t) = self.run_transition_counts(i, 'c', None) ttot += t tref = self.time_alpha[i] + self.time_beta[i] + self.time_gamma[i] + self.time_c[i] + self.time_C[i] if (print_speedup): print ('TOTAL speedup forward-backward example '+str(i)+'\t impl=c: \t'+str(tref/ttot)) def test_fbtime_c_mem(self): for i in range(self.nexamples): ttot = 0.0 (logprob, alpha, t) = self.run_forward(i, 'c', self.alpha_mem[i]) ttot += t (beta, t) = self.run_backward(i, 'c', self.beta_mem[i]) ttot += t (gamma, t) = self.run_gamma(i, 'c', self.gamma_mem[i]) ttot += t (c, t) = self.run_state_counts(i, 'c', None) ttot += t (C, t) = self.run_transition_counts(i, 'c', self.C_mem[i]) ttot += t tref = self.time_alpha[i] + self.time_beta[i] + self.time_gamma[i] + self.time_c[i] + self.time_C[i] if (print_speedup): print ('TOTAL speedup forward-backward example '+str(i)+'\t impl=c mem: \t'+str(tref/ttot)) if __name__=="__main__": unittest.main()
bhmm/legacy-bhmm-force-spectroscopy-manuscript
bhmm/tests/test_hidden.py
Python
lgpl-3.0
13,255
[ "Gaussian" ]
400f611053b5bb4bf57826dfe74712f875e8fca40d649eed028ac47a8fdc9e88
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkXMLStructuredGridReader(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkXMLStructuredGridReader(), 'Reading vtkXMLStructuredGrid.', (), ('vtkXMLStructuredGrid',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
nagyistoce/devide
modules/vtk_basic/vtkXMLStructuredGridReader.py
Python
bsd-3-clause
520
[ "VTK" ]
9d6662f240b09233b832cde1c3b728908844667f6d7022e14b2fe13f2a6bf5eb
"""An NNTP client class based on RFC 977: Network News Transfer Protocol. Example: >>> from nntplib import NNTP >>> s = NNTP('news') >>> resp, count, first, last, name = s.group('comp.lang.python') >>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last Group comp.lang.python has 51 articles, range 5770 to 5821 >>> resp, subs = s.xhdr('subject', first + '-' + last) >>> resp = s.quit() >>> Here 'resp' is the server response line. Error responses are turned into exceptions. To post an article from a file: >>> f = open(filename, 'r') # file containing article, including header >>> resp = s.post(f) >>> For descriptions of all methods, read the comments in the code below. Note that all arguments and return values representing article numbers are strings, not numbers, since they are rarely used for calculations. """ # RFC 977 by Brian Kantor and Phil Lapsley. # xover, xgtitle, xpath, date methods by Kevan Heydon # Imports import re import socket import types __all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError", "NNTPPermanentError","NNTPProtocolError","NNTPDataError", "error_reply","error_temp","error_perm","error_proto", "error_data",] # Exceptions raised when an error or invalid response is received class NNTPError(Exception): """Base class for all nntplib exceptions""" def __init__(self, *args): apply(Exception.__init__, (self,)+args) try: self.response = args[0] except IndexError: self.response = 'No response given' class NNTPReplyError(NNTPError): """Unexpected [123]xx reply""" pass class NNTPTemporaryError(NNTPError): """4xx errors""" pass class NNTPPermanentError(NNTPError): """5xx errors""" pass class NNTPProtocolError(NNTPError): """Response does not begin with [1-5]""" pass class NNTPDataError(NNTPError): """Error in response data""" pass # for backwards compatibility error_reply = NNTPReplyError error_temp = NNTPTemporaryError error_perm = NNTPPermanentError error_proto = NNTPProtocolError error_data = NNTPDataError # Standard port used by NNTP servers NNTP_PORT = 119 # Response numbers that are followed by additional text (e.g. article) LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282'] # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) CRLF = '\r\n' # The class itself class NNTP: def __init__(self, host, port=NNTP_PORT, user=None, password=None, readermode=None): """Initialize an instance. Arguments: - host: hostname to connect to - port: port to connect to (default the standard NNTP port) - user: username to authenticate with - password: password to use with username - readermode: if true, send 'mode reader' command after connecting. readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call reader-specific comamnds, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ self.host = host self.port = port self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, self.port)) self.file = self.sock.makefile('rb') self.debugging = 0 self.welcome = self.getresp() # 'mode reader' is sometimes necessary to enable 'reader' mode. # However, the order in which 'mode reader' and 'authinfo' need to # arrive differs between some NNTP servers. Try to send # 'mode reader', and if it fails with an authorization failed # error, try again after sending authinfo. readermode_afterauth = 0 if readermode: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass except NNTPTemporaryError, e: if user and e.response[:3] == '480': # Need authorization before 'mode reader' readermode_afterauth = 1 else: raise if user: resp = self.shortcmd('authinfo user '+user) if resp[:3] == '381': if not password: raise NNTPReplyError(resp) else: resp = self.shortcmd( 'authinfo pass '+password) if resp[:3] != '281': raise NNTPPermanentError(resp) if readermode_afterauth: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass # Get the welcome message from the server # (this is read and squirreled away by __init__()). # If the response code is 200, posting is allowed; # if it 201, posting is not allowed def getwelcome(self): """Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.""" if self.debugging: print '*welcome*', `self.welcome` return self.welcome def set_debuglevel(self, level): """Set the debugging level. Argument 'level' means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF""" self.debugging = level debug = set_debuglevel def putline(self, line): """Internal: send one line to the server, appending CRLF.""" line = line + CRLF if self.debugging > 1: print '*put*', `line` self.sock.send(line) def putcmd(self, line): """Internal: send one command to the server (through putline()).""" if self.debugging: print '*cmd*', `line` self.putline(line) def getline(self): """Internal: return one line from the server, stripping CRLF. Raise EOFError if the connection is closed.""" line = self.file.readline() if self.debugging > 1: print '*get*', `line` if not line: raise EOFError if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] return line def getresp(self): """Internal: get a response from the server. Raise various errors if the response indicates an error.""" resp = self.getline() if self.debugging: print '*resp*', `resp` c = resp[:1] if c == '4': raise NNTPTemporaryError(resp) if c == '5': raise NNTPPermanentError(resp) if c not in '123': raise NNTPProtocolError(resp) return resp def getlongresp(self, file=None): """Internal: get a response plus following text from the server. Raise various errors if the response indicates an error.""" openedFile = None try: # If a string was passed then open a file with that name if isinstance(file, types.StringType): openedFile = file = open(file, "w") resp = self.getresp() if resp[:3] not in LONGRESP: raise NNTPReplyError(resp) list = [] while 1: line = self.getline() if line == '.': break if line[:2] == '..': line = line[1:] if file: file.write(line + "\n") else: list.append(line) finally: # If this method created the file, then it must close it if openedFile: openedFile.close() return resp, list def shortcmd(self, line): """Internal: send a command and get the response.""" self.putcmd(line) return self.getresp() def longcmd(self, line, file=None): """Internal: send a command and get the response plus following text.""" self.putcmd(line) return self.getlongresp(file) def newgroups(self, date, time): """Process a NEWGROUPS command. Arguments: - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of newsgroup names""" return self.longcmd('NEWGROUPS ' + date + ' ' + time) def newnews(self, group, date, time): """Process a NEWNEWS command. Arguments: - group: group name or '*' - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of article ids""" cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time return self.longcmd(cmd) def list(self): """Process a LIST command. Return: - resp: server response if successful - list: list of (group, last, first, flag) (strings)""" resp, list = self.longcmd('LIST') for i in range(len(list)): # Parse lines into "group last first flag" list[i] = tuple(list[i].split()) return resp, list def group(self, name): """Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name""" resp = self.shortcmd('GROUP ' + name) if resp[:3] != '211': raise NNTPReplyError(resp) words = resp.split() count = first = last = 0 n = len(words) if n > 1: count = words[1] if n > 2: first = words[2] if n > 3: last = words[3] if n > 4: name = words[4].lower() return resp, count, first, last, name def help(self): """Process a HELP command. Returns: - resp: server response if successful - list: list of strings""" return self.longcmd('HELP') def statparse(self, resp): """Internal: parse the response of a STAT, NEXT or LAST command.""" if resp[:2] != '22': raise NNTPReplyError(resp) words = resp.split() nr = 0 id = '' n = len(words) if n > 1: nr = words[1] if n > 2: id = words[2] return resp, nr, id def statcmd(self, line): """Internal: process a STAT, NEXT or LAST command.""" resp = self.shortcmd(line) return self.statparse(resp) def stat(self, id): """Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the article id""" return self.statcmd('STAT ' + id) def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self.statcmd('NEXT') def last(self): """Process a LAST command. No arguments. Return as for STAT.""" return self.statcmd('LAST') def artcmd(self, line, file=None): """Internal: process a HEAD, BODY or ARTICLE command.""" resp, list = self.longcmd(line, file) resp, nr, id = self.statparse(resp) return resp, nr, id, list def head(self, id): """Process a HEAD command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's header""" return self.artcmd('HEAD ' + id) def body(self, id, file=None): """Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used""" return self.artcmd('BODY ' + id, file) def article(self, id): """Process an ARTICLE command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article""" return self.artcmd('ARTICLE ' + id) def slave(self): """Process a SLAVE command. Returns: - resp: server response if successful""" return self.shortcmd('SLAVE') def xhdr(self, hdr, str): """Process an XHDR command (optional server extension). Arguments: - hdr: the header type (e.g. 'subject') - str: an article nr, a message id, or a range nr1-nr2 Returns: - resp: server response if successful - list: list of (nr, value) strings""" pat = re.compile('^([0-9]+) ?(.*)\n?') resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str) for i in range(len(lines)): line = lines[i] m = pat.match(line) if m: lines[i] = m.group(1, 2) return resp, lines def xover(self,start,end): """Process an XOVER command (optional server extension) Arguments: - start: start of range - end: end of range Returns: - resp: server response if successful - list: list of (art-nr, subject, poster, date, id, references, size, lines)""" resp, lines = self.longcmd('XOVER ' + start + '-' + end) xover_lines = [] for line in lines: elem = line.split("\t") try: xover_lines.append((elem[0], elem[1], elem[2], elem[3], elem[4], elem[5].split(), elem[6], elem[7])) except IndexError: raise NNTPDataError(line) return resp,xover_lines def xgtitle(self, group): """Process an XGTITLE command (optional server extension) Arguments: - group: group name wildcard (i.e. news.*) Returns: - resp: server response if successful - list: list of (name,title) strings""" line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$") resp, raw_lines = self.longcmd('XGTITLE ' + group) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def xpath(self,id): """Process an XPATH command (optional server extension) Arguments: - id: Message id of article Returns: resp: server response if successful path: directory path to article""" resp = self.shortcmd("XPATH " + id) if resp[:3] != '223': raise NNTPReplyError(resp) try: [resp_num, path] = resp.split() except ValueError: raise NNTPReplyError(resp) else: return resp, path def date (self): """Process the DATE command. Arguments: None Returns: resp: server response if successful date: Date suitable for newnews/newgroups commands etc. time: Time suitable for newnews/newgroups commands etc.""" resp = self.shortcmd("DATE") if resp[:3] != '111': raise NNTPReplyError(resp) elem = resp.split() if len(elem) != 2: raise NNTPDataError(resp) date = elem[1][2:8] time = elem[1][-6:] if len(date) != 6 or len(time) != 6: raise NNTPDataError(resp) return resp, date, time def post(self, f): """Process a POST command. Arguments: - f: file containing the article Returns: - resp: server response if successful""" resp = self.shortcmd('POST') # Raises error_??? if posting is not allowed if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def ihave(self, id, f): """Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.""" resp = self.shortcmd('IHAVE ' + id) # Raises error_??? if the server already has it if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def quit(self): """Process a QUIT command and close the socket. Returns: - resp: server response if successful""" resp = self.shortcmd('QUIT') self.file.close() self.sock.close() del self.file, self.sock return resp def _test(): """Minimal test function.""" s = NNTP('news', readermode='reader') resp, count, first, last, name = s.group('comp.lang.python') print resp print 'Group', name, 'has', count, 'articles, range', first, 'to', last resp, subs = s.xhdr('subject', first + '-' + last) print resp for item in subs: print "%7s %s" % item resp = s.quit() print resp # Run the test when run as a script if __name__ == '__main__': _test()
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-2.2/Lib/nntplib.py
Python
mit
18,773
[ "Brian" ]
2adeba1d1654ddc6be86d9afcc62487ed0d1c1e87542efb6dc570e41709431bd
""" Test class for JobWrapper """ #pylint: disable=protected-access, invalid-name # imports import unittest import importlib import os import shutil from mock import MagicMock, patch from DIRAC import gLogger from DIRAC.DataManagementSystem.Client.test.mock_DM import dm_mock from DIRAC.Resources.Catalog.test.mock_FC import fc_mock from DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper import JobWrapper from DIRAC.WorkloadManagementSystem.JobWrapper.WatchdogLinux import WatchdogLinux getSystemSectionMock = MagicMock() getSystemSectionMock.return_value = 'aValue' class JobWrapperTestCase( unittest.TestCase ): """ Base class for the JobWrapper test cases """ def setUp( self ): gLogger.setLevel( 'DEBUG' ) def tearDown( self ): for f in ['std.out']: try: os.remove(f) except OSError: pass class JobWrapperTestCaseSuccess( JobWrapperTestCase ): def test_InputData( self ): myJW = importlib.import_module( 'DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper' ) myJW.getSystemSection = MagicMock() myJW.ModuleFactory = MagicMock() jw = JobWrapper() jw.jobArgs['InputData'] = '' res = jw.resolveInputData() self.assertFalse( res['OK'] ) jw = JobWrapper() jw.jobArgs['InputData'] = 'pippo' jw.dm = dm_mock jw.fc = fc_mock res = jw.resolveInputData() self.assertTrue( res['OK'] ) jw = JobWrapper() jw.jobArgs['InputData'] = 'pippo' jw.jobArgs['LocalSE'] = 'mySE' jw.jobArgs['InputDataModule'] = 'aa.bb' jw.dm = dm_mock jw.fc = fc_mock res = jw.resolveInputData() self.assertTrue( res['OK'] ) def test__performChecks( self ): wd = WatchdogLinux( pid = os.getpid(), exeThread = MagicMock(), spObject = MagicMock(), jobCPUTime = 1000, memoryLimit = 1024 * 1024, jobArgs = { 'StopSigNumber' : 10 } ) res = wd._performChecks() self.assertTrue( res['OK'] ) @patch( "DIRAC.WorkloadManagementSystem.JobWrapper.JobWrapper.getSystemSection", side_effect = getSystemSectionMock ) @patch( "DIRAC.WorkloadManagementSystem.JobWrapper.Watchdog.getSystemInstance", side_effect = getSystemSectionMock ) def test_execute(self, _patch1, _patch2): jw = JobWrapper() jw.jobArgs = {'Executable':'/bin/ls'} res = jw.execute('') print 'jw.execute() returns',str(res) self.assertTrue( res['OK'] ) shutil.copy('WorkloadManagementSystem/JobWrapper/test/script-OK.sh', 'script-OK.sh') jw = JobWrapper() jw.jobArgs = {'Executable':'script-OK.sh'} res = jw.execute('') self.assertTrue( res['OK'] ) os.remove('script-OK.sh') shutil.copy('WorkloadManagementSystem/JobWrapper/test/script.sh', 'script.sh') jw = JobWrapper() jw.jobArgs = {'Executable':'script.sh', 'Arguments':'111'} res = jw.execute('') self.assertTrue( res['OK'] ) # In this case the application finished with errors, # but the JobWrapper executed successfully os.remove('script.sh') shutil.copy('WorkloadManagementSystem/JobWrapper/test/script-RESC.sh', 'script-RESC.sh') #this will reschedule jw = JobWrapper() jw.jobArgs = {'Executable':'script-RESC.sh'} res = jw.execute('') if res['OK']: # FIXME: This may happen depending on the shell - not the best test admittedly! print "We should not be here, unless the 'Execution thread status' is equal to 1" self.assertTrue( res['OK'] ) else: self.assertFalse( res['OK'] ) # In this case the application finished with an error code # that the JobWrapper interpreted as "to reschedule" # so in this case the "execute" is considered an error os.remove('script-RESC.sh') ############################################################################# # Test Suite run ############################################################################# if __name__ == '__main__': suite = unittest.defaultTestLoader.loadTestsFromTestCase( JobWrapperTestCase ) suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobWrapperTestCaseSuccess ) ) testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite ) # EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
andresailer/DIRAC
WorkloadManagementSystem/JobWrapper/test/Test_JobWrapper.py
Python
gpl-3.0
4,402
[ "DIRAC" ]
3c53fe8c691471697a1c1e625aeccfc2fa266754588c0fae51776a34d060f2a8
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # nettest_nodes - [insert a few words of module description on this line] # Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # """ Created by Jan Wiberg on 2010-03-29. Copyright (c) 2010 __MyCompanyName__. All rights reserved. """ import sys, time, os from core.entities import * from core.configuration import * import core.kernel as kernel import yappi def main(): # yappi.start() if len(sys.argv) < 2: print "Must supply node id [0,1,2] as argument" sys.exit(1) no = sys.argv[1] do_write = len(sys.argv) >= 3 opt = Configuration() node_id = 8000 + int(no) opt.backingstore = '/tmp/node%s' % node_id if node_id > 8000: # anti-idiot opt.initial_connect_list = [('ubuntu1', 8000)] opt.backingstorestate = '../tests/nettest_node%s.bsc' % no opt.serverport = node_id opt.logverbosity = 3 # opt.maxcopies = 0 # opt.mincopies = 0 opt.validate() k = kernel.Kernel(opt) k.fsinit() print "Instance started" # instance started, now start other instances and see what happens wait_time = (2 if opt.mincopies == 0 else 15) print "Sleeping %d seconds to allow others to get off the ground" % wait_time time.sleep(wait_time / 2) print "Attempting actions" try: print "!! Getattr on '/': %s" % k.getattr("/", None) print "!! Readdir on '/': %s" % k.readdir("/", 0, None) f = GRSFile('/hello', os.O_RDONLY) assert f.file is not None and f.file >= 1 print "!! Read: ", f.read(-1, 0) print "!! Success on last test" attrs = f.fgetattr() print "!! Fgetattrs %s" % attrs if not do_write or k.state.get_instancetype() > 1: print "!!!! No writes requested or not master" time.sleep(90) return opt.maxcopies = 1 opt.mincopies = 1 import random filename = '/file%d' % random.randint(1000, 9999) print "!! Writing to %s" % filename w_f = GRSFile(filename, os.O_CREAT|os.O_WRONLY) # print "Errno EROFS %d" % errno.EROFS assert w_f.file is not None and w_f.file >= 1 w_f.write("Some string goes here %s\n" % random.randint(0, 10000000), 0) w_f.flush() w_f.release(w_f.open_args[1]) r_f = GRSFile(filename, os.O_RDONLY) assert r_f.file is not None and r_f.file >= 1 print "!! Read: ", r_f.read(-1, 0) r_f.release(r_f.open_args[1]) k.utime("/hello", (time.time(), time.time()), None) finally: k.fshalt() if __name__ == '__main__': main()
heromod/migrid
mig/grsfs-fuse/fs/nettest_nodes.py
Python
gpl-2.0
3,508
[ "Brian" ]
010e9a2deb930e2488723a479ffcf26a5e6a96446114a8e78781c7f87a6a8602
"""Provide analysis of input files by chromosomal regions. Handle splitting and analysis of files from chromosomal subsets separated by no-read regions. """ import collections import os import toolz as tz from bcbio import utils from bcbio.distributed.split import parallel_split_combine from bcbio.pipeline import datadict as dd def get_max_counts(samples): """Retrieve number of regions that can be processed in parallel from current samples. """ counts = [] for data in (x[0] for x in samples): count = tz.get_in(["config", "algorithm", "callable_count"], data, 1) vcs = tz.get_in(["config", "algorithm", "variantcaller"], data, []) if isinstance(vcs, basestring): vcs = [vcs] if vcs: count *= len(vcs) counts.append(count) return max(counts) # ## BAM preparation def to_safestr(region): if region[0] in ["nochrom", "noanalysis"]: return region[0] else: return "_".join([str(x) for x in region]) # ## Split and delayed BAM combine def _split_by_regions(dirname, out_ext, in_key): """Split a BAM file data analysis into chromosomal regions. """ def _do_work(data): # XXX Need to move retrieval of regions into preparation to avoid # need for files when running in non-shared filesystems regions = _get_parallel_regions(data) def _sort_by_size(region): _, start, end = region return end - start regions.sort(key=_sort_by_size, reverse=True) bam_file = data[in_key] if bam_file is None: return None, [] part_info = [] base_out = os.path.splitext(os.path.basename(bam_file))[0] nowork = [["nochrom"], ["noanalysis", data["config"]["algorithm"]["non_callable_regions"]]] for region in regions + nowork: out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0]) region_outfile = os.path.join(out_dir, "%s-%s%s" % (base_out, to_safestr(region), out_ext)) part_info.append((region, region_outfile)) out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], "%s%s" % (base_out, out_ext)) return out_file, part_info return _do_work def _get_parallel_regions(data): """Retrieve regions to run in parallel, putting longest intervals first. """ callable_regions = tz.get_in(["config", "algorithm", "callable_regions"], data) if not callable_regions: raise ValueError("Did not find any callable regions for sample: %s\n" "Check 'align/%s/*-callableblocks.bed' and 'regions' to examine callable regions" % (dd.get_sample_name(data), dd.get_sample_name(data))) with open(callable_regions) as in_handle: regions = [(xs[0], int(xs[1]), int(xs[2])) for xs in (l.rstrip().split("\t") for l in in_handle) if (len(xs) >= 3 and not xs[0].startswith(("track", "browser",)))] return regions def get_parallel_regions(batch): """CWL target to retrieve a list of callable regions for parallelization. """ samples = [utils.to_single_data(d) for d in batch] regions = _get_parallel_regions(samples[0]) return [{"region": "%s:%s-%s" % (c, s, e)} for c, s, e in regions] def get_parallel_regions_block(batch): """CWL target to retrieve block group of callable regions for parallelization. Uses blocking to handle multicore runs. """ samples = [utils.to_single_data(d) for d in batch] regions = _get_parallel_regions(samples[0]) out = [] # Currently don't have core information here so aim for about 10 items per partition n = 10 for region_block in tz.partition_all(n, regions): out.append({"region_block": ["%s:%s-%s" % (c, s, e) for c, s, e in region_block]}) return out def _add_combine_info(output, combine_map, file_key): """Do not actually combine, but add details for later combining work. Each sample will contain information on the out file and additional files to merge, enabling other splits and recombines without losing information. """ files_per_output = collections.defaultdict(list) for part_file, out_file in combine_map.items(): files_per_output[out_file].append(part_file) out_by_file = collections.defaultdict(list) out = [] for data in output: # Do not pass along nochrom, noanalysis regions if data["region"][0] not in ["nochrom", "noanalysis"]: cur_file = data[file_key] # If we didn't process, no need to add combine information if cur_file in combine_map: out_file = combine_map[cur_file] if "combine" not in data: data["combine"] = {} data["combine"][file_key] = {"out": out_file, "extras": files_per_output.get(out_file, [])} out_by_file[out_file].append(data) elif cur_file: out_by_file[cur_file].append(data) else: out.append([data]) for samples in out_by_file.values(): regions = [x["region"] for x in samples] region_bams = [x["work_bam"] for x in samples] assert len(regions) == len(region_bams) if len(set(region_bams)) == 1: region_bams = [region_bams[0]] data = samples[0] data["region_bams"] = region_bams data["region"] = regions out.append([data]) return out def parallel_prep_region(samples, run_parallel): """Perform full pre-variant calling BAM prep work on regions. """ file_key = "work_bam" split_fn = _split_by_regions("bamprep", "-prep.bam", file_key) # identify samples that do not need preparation -- no recalibration or realignment extras = [] torun = [] for data in [x[0] for x in samples]: if data.get("work_bam"): data["align_bam"] = data["work_bam"] if (not dd.get_realign(data) and not dd.get_variantcaller(data)): extras.append([data]) elif not data.get(file_key): extras.append([data]) else: torun.append([data]) return extras + parallel_split_combine(torun, split_fn, run_parallel, "piped_bamprep", _add_combine_info, file_key, ["config"]) def delayed_bamprep_merge(samples, run_parallel): """Perform a delayed merge on regional prepared BAM files. """ if any("combine" in data[0] for data in samples): return run_parallel("delayed_bam_merge", samples) else: return samples # ## Utilities def clean_sample_data(samples): """Clean unnecessary information from sample data, reducing size for message passing. """ out = [] for data in (x[0] for x in samples): if "dirs" in data: data["dirs"] = {"work": data["dirs"]["work"], "galaxy": data["dirs"]["galaxy"], "fastq": data["dirs"].get("fastq")} data["config"] = {"algorithm": data["config"]["algorithm"], "resources": data["config"]["resources"]} for remove_attr in ["config_file", "algorithm"]: data.pop(remove_attr, None) out.append([data]) return out
biocyberman/bcbio-nextgen
bcbio/pipeline/region.py
Python
mit
7,498
[ "Galaxy" ]
6f66013cccbfd84543d54e974909d4586cd909405e3c600ac952cf7d4527bd02
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Visitor restricting traversal to only the public tensorflow API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import inspect class PublicAPIVisitor(object): """Visitor to use with `traverse` to visit exactly the public TF API.""" def __init__(self, visitor): """Constructor. `visitor` should be a callable suitable as a visitor for `traverse`. It will be called only for members of the public TensorFlow API. Args: visitor: A visitor to call for the public API. """ self._visitor = visitor # Modules/classes we do not want to descend into if we hit them. Usually, # sytem modules exposed through platforms for compatibility reasons. # Each entry maps a module path to a name to ignore in traversal. _do_not_descend_map = { '': [ 'core', 'examples', 'flags', # Don't add flags 'platform', # TODO(drpng): This can be removed once sealed off. 'pywrap_tensorflow', # TODO(drpng): This can be removed once sealed. 'user_ops', # TODO(drpng): This can be removed once sealed. 'python', 'tools' ], # Some implementations have this internal module that we shouldn't expose. 'flags': ['cpp_flags'], # Everything below here is legitimate. 'app': ['flags'], # It'll stay, but it's not officially part of the API. 'test': ['mock'], # Imported for compatibility between py2/3. } @property def do_not_descend_map(self): """A map from parents to symbols that should not be descended into. This map can be edited, but it should not be edited once traversal has begun. Returns: The map marking symbols to not explore. """ return self._do_not_descend_map def _isprivate(self, name): """Return whether a name is private.""" # TODO(wicke): We have to almost certainly add more exceptions than init. return name.startswith('_') and name not in ['__init__'] def _do_not_descend(self, path, name): """Safely queries if a specific fully qualified name should be excluded.""" return (path in self._do_not_descend_map and name in self._do_not_descend_map[path]) def __call__(self, path, parent, children): """Visitor interface, see `traverse` for details.""" # Avoid long waits in cases of pretty unambiguous failure. if inspect.ismodule(parent) and len(path.split('.')) > 10: raise RuntimeError('Modules nested too deep:\n%s\n\nThis is likely a ' 'problem with an accidental public import.' % path) # Remove things that are not visible. for name, child in list(children): if self._isprivate(name): children.remove((name, child)) self._visitor(path, parent, children) # Remove things that are visible, but which should not be descended into. for name, child in list(children): if self._do_not_descend(path, name): children.remove((name, child))
tntnatbry/tensorflow
tensorflow/tools/common/public_api.py
Python
apache-2.0
3,732
[ "VisIt" ]
d94a342842fa458fe4cedd923cca4e2c55345a526749e0c28b8fe6c0bde47581
import numpy as np from gpaw.utilities.blas import gemm from gpaw.utilities import pack, unpack2 from gpaw.utilities.timing import nulltimer class EmptyWaveFunctions: def __nonzero__(self): return False def set_eigensolver(self, eigensolver): pass def set_orthonormalized(self, flag): pass def estimate_memory(self, mem): mem.set('Unknown WFs', 0) class WaveFunctions(EmptyWaveFunctions): """... setups: List of setup objects. symmetry: Symmetry object. kpt_u: List of **k**-point objects. nbands: int Number of bands. nspins: int Number of spins. dtype: dtype Data type of wave functions (float or complex). bzk_kc: ndarray Scaled **k**-points used for sampling the whole Brillouin zone - values scaled to [-0.5, 0.5). ibzk_kc: ndarray Scaled **k**-points in the irreducible part of the Brillouin zone. weight_k: ndarray Weights of the **k**-points in the irreducible part of the Brillouin zone (summing up to 1). kpt_comm: MPI-communicator for parallelization over **k**-points. """ def __init__(self, gd, nvalence, setups, bd, dtype, world, kd, timer=None): if timer is None: timer = nulltimer self.gd = gd self.nspins = kd.nspins self.nvalence = nvalence self.bd = bd self.nbands = self.bd.nbands #XXX self.mynbands = self.bd.mynbands #XXX self.dtype = dtype self.world = world self.kd = kd self.band_comm = self.bd.comm #XXX self.timer = timer self.rank_a = None # XXX Remember to modify aseinterface when removing the following # attributes from the wfs object self.gamma = kd.gamma self.kpt_comm = kd.comm self.bzk_kc = kd.bzk_kc self.ibzk_kc = kd.ibzk_kc self.ibzk_qc = kd.ibzk_qc self.weight_k = kd.weight_k self.symmetry = kd.symmetry self.nibzkpts = kd.nibzkpts self.kpt_u = kd.create_k_points(self.gd) self.eigensolver = None self.positions_set = False self.set_setups(setups) def set_setups(self, setups): self.setups = setups def set_eigensolver(self, eigensolver): self.eigensolver = eigensolver def __nonzero__(self): return True def calculate_density_contribution(self, nt_sG): """Calculate contribution to pseudo density from wave functions.""" nt_sG.fill(0.0) for kpt in self.kpt_u: self.add_to_density_from_k_point(nt_sG, kpt) self.band_comm.sum(nt_sG) self.kpt_comm.sum(nt_sG) self.timer.start('Symmetrize density') for nt_G in nt_sG: self.symmetry.symmetrize(nt_G, self.gd) self.timer.stop('Symmetrize density') def add_to_density_from_k_point(self, nt_sG, kpt): self.add_to_density_from_k_point_with_occupation(nt_sG, kpt, kpt.f_n) def get_orbital_density_matrix(self, a, kpt, n): """Add the nth band density from kpt to density matrix D_sp""" ni = self.setups[a].ni D_sii = np.zeros((self.nspins, ni, ni)) P_i = kpt.P_ani[a][n] D_sii[kpt.s] += np.outer(P_i.conj(), P_i).real D_sp = [pack(D_ii) for D_ii in D_sii] return D_sp def calculate_atomic_density_matrices_k_point(self, D_sii, kpt, a, f_n): if kpt.rho_MM is not None: P_Mi = kpt.P_aMi[a] #P_Mi = kpt.P_aMi_sparse[a] #ind = get_matrix_index(kpt.P_aMi_index[a]) #D_sii[kpt.s] += np.dot(np.dot(P_Mi.T.conj(), kpt.rho_MM), # P_Mi).real rhoP_Mi = np.zeros_like(P_Mi) D_ii = np.zeros(D_sii[kpt.s].shape, kpt.rho_MM.dtype) #gemm(1.0, P_Mi, kpt.rho_MM[ind.T, ind], 0.0, tmp) gemm(1.0, P_Mi, kpt.rho_MM, 0.0, rhoP_Mi) gemm(1.0, rhoP_Mi, P_Mi.T.conj().copy(), 0.0, D_ii) D_sii[kpt.s] += D_ii.real #D_sii[kpt.s] += dot(dot(P_Mi.T.conj().copy(), # kpt.rho_MM[ind.T, ind]), P_Mi).real else: P_ni = kpt.P_ani[a] D_sii[kpt.s] += np.dot(P_ni.T.conj() * f_n, P_ni).real if hasattr(kpt, 'c_on'): for ne, c_n in zip(kpt.ne_o, kpt.c_on): d_nn = ne * np.outer(c_n.conj(), c_n) D_sii[kpt.s] += np.dot(P_ni.T.conj(), np.dot(d_nn, P_ni)).real def calculate_atomic_density_matrices(self, D_asp): """Calculate atomic density matrices from projections.""" f_un = [kpt.f_n for kpt in self.kpt_u] self.calculate_atomic_density_matrices_with_occupation(D_asp, f_un) def calculate_atomic_density_matrices_with_occupation(self, D_asp, f_un): """Calculate atomic density matrices from projections with custom occupation f_un.""" # Varying f_n used in calculation of response part of GLLB-potential for a, D_sp in D_asp.items(): ni = self.setups[a].ni D_sii = np.zeros((self.nspins, ni, ni)) for f_n, kpt in zip(f_un, self.kpt_u): self.calculate_atomic_density_matrices_k_point(D_sii, kpt, a, f_n) D_sp[:] = [pack(D_ii) for D_ii in D_sii] self.band_comm.sum(D_sp) self.kpt_comm.sum(D_sp) self.symmetrize_atomic_density_matrices(D_asp) def symmetrize_atomic_density_matrices(self, D_asp): if len(self.symmetry.op_scc) > 1: all_D_asp = [] for a, setup in enumerate(self.setups): D_sp = D_asp.get(a) if D_sp is None: ni = setup.ni D_sp = np.empty((self.nspins, ni * (ni + 1) // 2)) self.gd.comm.broadcast(D_sp, self.rank_a[a]) all_D_asp.append(D_sp) for s in range(self.nspins): D_aii = [unpack2(D_sp[s]) for D_sp in all_D_asp] for a, D_sp in D_asp.items(): setup = self.setups[a] D_sp[s] = pack(setup.symmetrize(a, D_aii, self.symmetry.a_sa)) def set_positions(self, spos_ac): self.positions_set = False rank_a = self.gd.get_ranks_from_positions(spos_ac) """ # If both old and new atomic ranks are present, start a blank dict if # it previously didn't exist but it will needed for the new atoms. if (self.rank_a is not None and rank_a is not None and self.kpt_u[0].P_ani is None and (rank_a == self.gd.comm.rank).any()): for kpt in self.kpt_u: kpt.P_ani = {} """ if self.rank_a is not None and self.kpt_u[0].P_ani is not None: self.timer.start('Redistribute') requests = [] mynks = len(self.kpt_u) flags = (self.rank_a != rank_a) my_incoming_atom_indices = np.argwhere(np.bitwise_and(flags, \ rank_a == self.gd.comm.rank)).ravel() my_outgoing_atom_indices = np.argwhere(np.bitwise_and(flags, \ self.rank_a == self.gd.comm.rank)).ravel() for a in my_incoming_atom_indices: # Get matrix from old domain: ni = self.setups[a].ni P_uni = np.empty((mynks, self.mynbands, ni), self.dtype) requests.append(self.gd.comm.receive(P_uni, self.rank_a[a], tag=a, block=False)) for myu, kpt in enumerate(self.kpt_u): assert a not in kpt.P_ani kpt.P_ani[a] = P_uni[myu] for a in my_outgoing_atom_indices: # Send matrix to new domain: P_uni = np.array([kpt.P_ani.pop(a) for kpt in self.kpt_u]) requests.append(self.gd.comm.send(P_uni, rank_a[a], tag=a, block=False)) self.gd.comm.waitall(requests) self.timer.stop('Redistribute') self.rank_a = rank_a if self.symmetry is not None: self.symmetry.check(spos_ac) def allocate_arrays_for_projections(self, my_atom_indices): if not self.positions_set and self.kpt_u[0].P_ani is not None: # Projections have been read from file - don't delete them! pass else: for kpt in self.kpt_u: kpt.P_ani = {} for a in my_atom_indices: ni = self.setups[a].ni for kpt in self.kpt_u: kpt.P_ani[a] = np.empty((self.mynbands, ni), self.dtype) def collect_eigenvalues(self, k, s): return self.collect_array('eps_n', k, s) def collect_occupations(self, k, s): return self.collect_array('f_n', k, s) def collect_array(self, name, k, s, subset=None): """Helper method for collect_eigenvalues and collect_occupations. For the parallel case find the rank in kpt_comm that contains the (k,s) pair, for this rank, collect on the corresponding domain a full array on the domain master and send this to the global master.""" kpt_u = self.kpt_u kpt_rank, u = self.kd.get_rank_and_index(s, k) if self.kpt_comm.rank == kpt_rank: a_nx = getattr(kpt_u[u], name) if subset is not None: a_nx = a_nx[subset] # Domain master send this to the global master if self.gd.comm.rank == 0: if self.band_comm.size == 1: if kpt_rank == 0: return a_nx else: self.kpt_comm.ssend(a_nx, 0, 1301) else: b_nx = self.bd.collect(a_nx) if self.band_comm.rank == 0: if kpt_rank == 0: return b_nx else: self.kpt_comm.ssend(b_nx, 0, 1301) elif self.world.rank == 0 and kpt_rank != 0: # Find shape and dtype: a_nx = getattr(kpt_u[0], name) shape = (self.nbands,) + a_nx.shape[1:] dtype = a_nx.dtype b_nx = np.zeros(shape, dtype=dtype) self.kpt_comm.receive(b_nx, kpt_rank, 1301) return b_nx def collect_auxiliary(self, value, k, s, shape=1, dtype=float): """Helper method for collecting band-independent scalars/arrays. For the parallel case find the rank in kpt_comm that contains the (k,s) pair, for this rank, collect on the corresponding domain a full array on the domain master and send this to the global master.""" kpt_u = self.kpt_u kpt_rank, u = self.kd.get_rank_and_index(s, k) if self.kpt_comm.rank == kpt_rank: if isinstance(value, str): a_o = getattr(kpt_u[u], value) else: a_o = value[u] # assumed list # Make sure data is a mutable object a_o = np.asarray(a_o) if a_o.dtype is not dtype: a_o = a_o.astype(dtype) # Domain master send this to the global master if self.gd.comm.rank == 0: if kpt_rank == 0: return a_o else: self.kpt_comm.send(a_o, 0, 1302) elif self.world.rank == 0 and kpt_rank != 0: b_o = np.zeros(shape, dtype=dtype) self.kpt_comm.receive(b_o, kpt_rank, 1302) return b_o def collect_projections(self, k, s): """Helper method for collecting projector overlaps across domains. For the parallel case find the rank in kpt_comm that contains the (k,s) pair, for this rank, send to the global master.""" kpt_rank, u = self.kd.get_rank_and_index(s, k) natoms = len(self.rank_a) # it's a hack... nproj = sum([setup.ni for setup in self.setups]) if self.world.rank == 0: if kpt_rank == 0: P_ani = self.kpt_u[u].P_ani mynu = len(self.kpt_u) all_P_ni = np.empty((self.nbands, nproj), self.dtype) for band_rank in range(self.band_comm.size): nslice = self.bd.get_slice(band_rank) i = 0 for a in range(natoms): ni = self.setups[a].ni if kpt_rank == 0 and band_rank == 0 and a in P_ani: P_ni = P_ani[a] else: P_ni = np.empty((self.mynbands, ni), self.dtype) world_rank = (self.rank_a[a] + kpt_rank * self.gd.comm.size * self.band_comm.size + band_rank * self.gd.comm.size) self.world.receive(P_ni, world_rank, 1303 + a) all_P_ni[nslice, i:i + ni] = P_ni i += ni assert i == nproj return all_P_ni elif self.kpt_comm.rank == kpt_rank: # plain else works too... P_ani = self.kpt_u[u].P_ani for a in range(natoms): if a in P_ani: self.world.ssend(P_ani[a], 0, 1303 + a) def get_wave_function_array(self, n, k, s): """Return pseudo-wave-function array. For the parallel case find the rank in kpt_comm that contains the (k,s) pair, for this rank, collect on the corresponding domain a full array on the domain master and send this to the global master.""" kpt_rank, u = self.kd.get_rank_and_index(s, k) band_rank, myn = self.bd.who_has(n) size = self.world.size rank = self.world.rank if self.kpt_comm.rank == kpt_rank: psit1_G = self._get_wave_function_array(u, myn) if size == 1: return psit1_G if self.band_comm.rank == band_rank: psit_G = self.gd.collect(psit1_G) if kpt_rank == 0 and band_rank == 0: if rank == 0: return psit_G # Domain master send this to the global master if self.gd.comm.rank == 0: self.world.send(psit_G, 0, 1398) if rank == 0: # allocate full wavefunction and receive psit_G = self.gd.empty(dtype=self.dtype, global_array=True) world_rank = (kpt_rank * self.gd.comm.size * self.band_comm.size + band_rank * self.gd.comm.size) self.world.receive(psit_G, world_rank, 1398) return psit_G def _get_wave_function_array(self, u, n): raise NotImplementedError
qsnake/gpaw
gpaw/wavefunctions/base.py
Python
gpl-3.0
15,178
[ "GPAW" ]
5744ee2da0a391883b4cb8ced5ea4fac563223764552c9dbb885e7f99912c66a
import caching.base as caching import jingo import jinja2 from tower import ugettext_lazy as _ import amo from addons.models import Addon from api.views import addon_filter from bandwagon.models import Collection, MonthlyPick as MP from versions.compare import version_int # The global registry for promo modules. Managed through PromoModuleMeta. registry = {} class PromoModuleMeta(type): """Adds new PromoModules to the module registry.""" def __new__(mcs, name, bases, dict_): cls = type.__new__(mcs, name, bases, dict_) if 'abstract' not in dict_: registry[cls.slug] = cls return cls class PromoModule(object): """ Base class for promo modules in the discovery pane. Subclasses should assign a slug and define render(). The slug is only used internally, so it doesn't have to really be a slug. """ __metaclass__ = PromoModuleMeta abstract = True slug = None def __init__(self, request, platform, version): self.request = request self.platform = platform self.version = version self.compat_mode = 'strict' if version_int(self.version) >= version_int('10.0'): self.compat_mode = 'ignore' def render(self): raise NotImplementedError class TemplatePromo(PromoModule): abstract = True template = None def context(self, **kwargs): return {} def render(self, **kw): c = dict(self.context(**kw)) c.update(kw) r = jingo.render_to_string(self.request, self.template, c) return jinja2.Markup(r) class MonthlyPick(TemplatePromo): slug = 'Monthly Pick' template = 'discovery/modules/monthly.html' def context(self, **kwargs): try: pick = MP.objects.filter(locale=self.request.LANG)[0] except IndexError: try: pick = MP.objects.filter(locale='')[0] except IndexError: pick = None return {'pick': pick, 'module_context': 'discovery'} class GoMobile(TemplatePromo): slug = 'Go Mobile' template = 'discovery/modules/go-mobile.html' class CollectionPromo(PromoModule): abstract = True template = 'discovery/modules/collection.html' title = None subtitle = None cls = 'promo' limit = 3 linkify_title = False def __init__(self, *args, **kw): super(CollectionPromo, self).__init__(*args, **kw) self.collection = None if hasattr(self, 'pk'): try: self.collection = Collection.objects.get(pk=self.pk) except Collection.DoesNotExist: pass elif (hasattr(self, 'collection_author') and hasattr(self, 'collection_slug')): try: self.collection = Collection.objects.get( author__username=self.collection_author, slug=self.collection_slug) except Collection.DoesNotExist: pass def get_descriptions(self): return {} def get_addons(self): addons = self.collection.addons.filter(status=amo.STATUS_PUBLIC) kw = dict(addon_type='ALL', limit=self.limit, app=self.request.APP, platform=self.platform, version=self.version, compat_mode=self.compat_mode) def f(): return addon_filter(addons, **kw) return caching.cached_with(addons, f, repr(kw)) def render(self, module_context='discovery'): if module_context == 'home': self.platform = 'ALL' self.version = None c = dict(promo=self, module_context=module_context, descriptions=self.get_descriptions()) if self.collection: c.update(addons=self.get_addons()) return jinja2.Markup( jingo.render_to_string(self.request, self.template, c)) class ShoppingCollection(CollectionPromo): slug = 'Shopping Collection' collection_author, collection_slug = 'mozilla', 'onlineshopping' cls = 'promo promo-purple' title = _(u'Shopping Made Easy') subtitle = _(u'Save on your favorite items ' u'from the comfort of your browser.') class WebdevCollection(CollectionPromo): slug = 'Webdev Collection' pk = 10 cls = 'webdev' title = _(u'Build the perfect website') class TesterCollection(CollectionPromo): slug = 'Firefox Tester Tools' pk = 82266 cls = 'tester' title = _(u'Help test Firefox with these tools') class StarterPack(CollectionPromo): slug = 'Starter Pack' pk = 153649 id = 'starter' cls = 'promo' title = _(u'First time with Add-ons?') subtitle = _(u'Not to worry, here are three to get started.') def get_descriptions(self): return { 2257: _(u'Translate content on the web from and into over 40 ' 'languages.'), 1833: _(u"Easily connect to your social networks, and share or " "comment on the page you're visiting."), 11377: _(u'A quick view to compare prices when you shop online ' 'or search for flights.') } class Fx4Collection(CollectionPromo): slug = 'Fx4 Collection' pk = 153651 id = 'fx4-collection' cls = 'promo' title = _(u'Firefox 4 Collection') subtitle = _(u'Here are some great add-ons for Firefox 4.') linkify_title = True class StPatricksPersonas(CollectionPromo): slug = 'St. Pat Themes' pk = 666627 id = 'st-patricks' cls = 'promo' title = _(u'St. Patrick&rsquo;s Day Themes') subtitle = _(u'Decorate your browser to celebrate ' 'St. Patrick&rsquo;s Day.') class FxSummerCollection(CollectionPromo): slug = 'Fx Summer Collection' pk = 2128026 id = 'fx4-collection' cls = 'promo' title = _(u'Firefox Summer Collection') subtitle = _(u'Here are some great add-ons for Firefox.') class ThunderbirdCollection(CollectionPromo): slug = 'Thunderbird Collection' pk = 2128303 id = 'tb-collection' cls = 'promo' title = _(u'Thunderbird Collection') subtitle = _(u'Here are some great add-ons for Thunderbird.') class TravelCollection(CollectionPromo): slug = 'Travelers Pack' pk = 4 id = 'travel' cls = 'promo' title = _(u'Sit Back and Relax') subtitle = _(u'Add-ons that help you on your travels!') def get_descriptions(self): return { 5791: _(u"Displays a country flag depicting the location of the " "current website's server and more."), 1117: _(u'FoxClocks let you keep an eye on the time around the ' 'world.'), 11377: _(u'Automatically get the lowest price when you shop ' 'online or search for flights.') } class SchoolCollection(CollectionPromo): slug = 'School' pk = 2133887 id = 'school' cls = 'promo' title = _(u'A+ add-ons for School') subtitle = _(u'Add-ons for teachers, parents, and students heading back ' 'to school.') def get_descriptions(self): return { 3456: _(u'Would you like to know which websites you can trust?'), 2410: _(u'Xmarks is the #1 bookmarking add-on.'), 2444: _(u'Web page and text translator, dictionary, and more!') } # The add-ons that go with the promo modal. Not an actual PromoModule class PromoVideoCollection(): items = (349111, 349155, 349157, 52659, 5579, 252539, 11377, 2257) def get_items(self): items = Addon.objects.in_bulk(self.items) return [items[i] for i in self.items if i in items] class NewYearCollection(CollectionPromo): slug = 'New Year' collection_author, collection_slug = 'mozilla', 'newyear_2012' id = 'new-year' title = _(u'Add-ons to help you on your way in 2012') class ValentinesDay(CollectionPromo): slug = 'Valentines Day' collection_author, collection_slug = 'mozilla', 'bemine' id = 'valentines' title = _(u'Love is in the Air') subtitle = _(u'Add some romance to your Firefox.') class MobileThemes(CollectionPromo): slug = 'Mobile Themes' cls = 'promo promo-grey' collection_author, collection_slug = 'mozilla', 'mobilethemes' title = _(u'Put a Theme on It!') subtitle = _(u'Visit addons.mozilla.org from Firefox for Android and ' u'dress up your mobile browser to match your style, mood, ' u'or the season.') class Fitness(CollectionPromo): slug = 'Fitness' cls = 'promo promo-yellow' collection_author, collection_slug = 'mozilla', 'fitness' title = _(u'Get up and move!') subtitle = _(u'Install these fitness add-ons to keep you active and ' u'healthy.') class UpAndComing(CollectionPromo): slug = 'Up & Coming' cls = 'promo promo-blue' collection_author, collection_slug = 'mozilla', 'up_coming' title = _(u'New &amp; Now') subtitle = _(u'Get the latest, must-have add-ons of the moment.') class Olympics(TemplatePromo): slug = 'Olympics' template = 'discovery/modules/olympics.html' class ContestWinners(TemplatePromo): slug = 'Contest Winners' template = 'discovery/modules/contest-winners.html' def render(self, module_context='discovery'): # Hide on discovery pane. if module_context == 'home': return super(ContestWinners, self).render() class Holiday(TemplatePromo): slug = 'Holiday' template = 'discovery/modules/holiday.html' def render(self, module_context='discovery'): # Hide on discovery pane. if module_context == 'home': return super(Holiday, self).render() class Privacy(CollectionPromo): slug = 'Privacy Collection' cls = 'promo promo-purple' collection_author, collection_slug = 'mozilla', 'privacy' title = _(u'Worry-free browsing') subtitle = _(u'Protect your privacy online with the add-ons in this ' u'collection.') class Featured(CollectionPromo): slug = 'Featured Add-ons Collection' cls = 'promo promo-yellow' collection_author, collection_slug = 'mozilla', 'featured-add-ons' title = _(u'Featured Add-ons') subtitle = _(u'Great add-ons for work, fun, privacy, productivity&hellip; ' u'just about anything!') class AustralisContestResults(CollectionPromo): slug = 'Australis Contest Results' cls = 'promo promo-blue australis-contest' collection_author, collection_slug = 'mozilla', 'australis' title = _('Add-ons for Australis Contest Winners') template = 'discovery/modules/australis-contest-results.html' # Want to feature more than one add-on? Use FeaturedCollection class FeaturedAddon(TemplatePromo): slug = 'Featured Add-on' template = 'discovery/modules/featured.html' title = _('What are your interests?') # First add-on out is from bug 1100454 pk = 547630 def context(self, **kwargs): try: addon = Addon.objects.get(pk=self.pk) except IndexError: addon = None return {'addon': addon, 'module_context': 'discovery', 'title': self.title}
magopian/olympia
apps/discovery/modules.py
Python
bsd-3-clause
11,312
[ "VisIt" ]
a0b1c6e429f280d0c02d05b61dea5e4bb9c1c48baceaec4bf86858c66f394430
# Copyright (c) 2013-2015 Siphon Contributors. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ ================ NCSS and CartoPy ================ Use Siphon to query the NetCDF Subset Service (NCSS) and plot on a map. This example uses Siphon's NCSS class to provide temperature data for contouring a basic map using CartoPy. """ from datetime import datetime import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt from netCDF4 import num2date import numpy as np from siphon.catalog import TDSCatalog ########################################### # First we construct a `TDSCatalog` instance pointing to our dataset of interest, in # this case TDS' "Best" virtual dataset for the GFS global 0.25 degree collection of # GRIB files. This will give us a good resolution for our map. This catalog contains a # single dataset. best_gfs = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GFS/' 'Global_0p25deg/catalog.xml?dataset=grib/NCEP/GFS/Global_0p25deg/Best') print(list(best_gfs.datasets)) ########################################### # We pull out this dataset and get the NCSS access point best_ds = best_gfs.datasets[0] ncss = best_ds.subset() ########################################### # We can then use the `ncss` object to create a new query object, which # facilitates asking for data from the server. query = ncss.query() ########################################### # We construct a query asking for data corresponding to a latitude and longitude box where 43 # lat is the northern extent, 35 lat is the southern extent, -111 long is the western extent # and -100 is the eastern extent. We request the data for the current time. # # We also ask for NetCDF version 4 data, for the variable 'temperature_surface'. This request # will return all surface temperatures for points in our bounding box for a single time, # nearest to that requested. Note the string representation of the query is a properly encoded # query string. query.lonlat_box(north=43, south=35, east=-100, west=-111).time(datetime.utcnow()) query.accept('netcdf4') query.variables('Temperature_surface') ########################################### # We now request data from the server using this query. The `NCSS` class handles parsing # this NetCDF data (using the `netCDF4` module). If we print out the variable names, we see # our requested variable, as well as the coordinate variables (needed to properly reference # the data). data = ncss.get_data(query) print(list(data.variables)) ########################################### # We'll pull out the useful variables for temperature, latitude, and longitude, and time # (which is the time, in hours since the forecast run). temp_var = data.variables['Temperature_surface'] # Time variables can be renamed in GRIB collections. Best to just pull it out of the # coordinates attribute on temperature time_name = temp_var.coordinates.split()[1] time_var = data.variables[time_name] lat_var = data.variables['lat'] lon_var = data.variables['lon'] ########################################### # Now we make our data suitable for plotting. # Get the actual data values and remove any size 1 dimensions temp_vals = temp_var[:].squeeze() lat_vals = lat_var[:].squeeze() lon_vals = lon_var[:].squeeze() # Convert the number of hours since the reference time to an actual date time_val = num2date(time_var[:].squeeze(), time_var.units, only_use_cftime_datetimes=False) # Convert temps to Fahrenheit from Kelvin temp_vals = temp_vals * 1.8 - 459.67 # Combine 1D latitude and longitudes into a 2D grid of locations lon_2d, lat_2d = np.meshgrid(lon_vals, lat_vals) ########################################### # Now we can plot these up using matplotlib and cartopy. # Create a new figure fig = plt.figure(figsize=(15, 12)) # Add the map and set the extent ax = plt.axes(projection=ccrs.PlateCarree()) ax.set_extent([-100., -111., 35, 43]) # Add state boundaries to plot ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=2) # Contour temperature at each lat/long cf = ax.contourf(lon_2d, lat_2d, temp_vals, 200, transform=ccrs.PlateCarree(), zorder=0, cmap='coolwarm') # Plot a colorbar to show temperature and reduce the size of it plt.colorbar(cf, ax=ax, fraction=0.032) # Make a title with the time value ax.set_title(f'Temperature forecast (\u00b0F) for {time_val:%d %B %Y %H:%MZ}', fontsize=20) # Plot markers for each lat/long to show grid points for 0.25 deg GFS ax.plot(lon_2d.flatten(), lat_2d.flatten(), marker='o', color='black', markersize=2, alpha=0.3, transform=ccrs.Geodetic(), zorder=2, linestyle='none')
Unidata/siphon
examples/ncss/NCSS_Cartopy_Example.py
Python
bsd-3-clause
4,735
[ "NetCDF" ]
6fae6a0a92a598c014be10475ceb2738722488320d64fae9f992b9d19feb2615
## Propulsion.py ## Problems and solutions in Propulsion ###################################################################################### ## Copyleft 2015, Ernest Yeung <ernestyalumni@gmail.com> ## 20151112 ## This program, along with all its code, is free software; you can redistribute ## it and/or modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## Governing the ethics of using this program, I default to the Caltech Honor Code: ## ``No member of the Caltech community shall take unfair advantage of ## any other member of the Caltech community.'' ## ## Donate, and support my other scientific and engineering endeavors at ## ernestyalumni.tilt.com ###################################################################################### import decimal from decimal import Decimal import sympy from sympy import * from sympy.abc import a,A, t, u, psi, rho, theta from sympy import Rational as Rat from sympy.utilities.lambdify import lambdify, implemented_function import Physique from Physique import FCconv, KCconv, FundConst, conv, plnfacts, T_C, T_K, T_F t_p = Symbol('t_p', real=True) # burn rate g_0 = Symbol('g_0', positive=True) # standard gravity F_thrust = Function('F_thrust')(t) I_t = integrate(F_thrust,(t,0,t_p)) # total impulse m = Function('m')(t) # mass of propellant flowing out W_p = Symbol('W_p',positive=True) # weight of propellant I_sp = I_t/W_p I_sp.subs( W_p, (g_0*integrate(m.diff(t),(t,0,t_p))) ) # specific impulse M0 = Symbol('M0',positive=True) m_p = Symbol('m_p',positive=True) M = Function('M')(t) # mass of rocket+propellant system massflow = Symbol('massflow',real=True) u_e = Symbol('u_e',real=True) # effective exhaust velocity, $c$ for Bibliarz and Sutton u=Function('u')(t) M_constantflow = M0 - t*m_p/t_p # assume constant mass flow I_t.subs(F_thrust, massflow*u_e).doit() I_sp.subs(F_thrust,massflow*u_e).subs(W_p, g_0*massflow*t_p).doit() # u_e/g_0 # cf. 4.1 Gravity-Free, Drag-Free Space Flight # Biblarz, Sutton, Rocket Propulsion Elements (2001) gravityfreedragfreespaceflight = Eq( u.diff(t), massflow*u_e/M ) gravityfreedragfreespaceflight.subs(M,M_constantflow) Deltau_g0D0 = integrate( gravityfreedragfreespaceflight.subs(M,M_constantflow).rhs , (t,0,t_p)).simplify() # \Delta u for g_0 = 0, D = 0(gravity-free, drag-free) # cf. 4.2 Forces acting on a Vehicle in the Atmosphere # Biblarz, Sutton, Rocket Propulsion Elements (2001) C_L = Symbol('C_L', positive=True) C_D = Symbol('C_D', positive=True) Lift = C_L*(Rat(1)/Rat(2))*rho*A*u**2 Drag = C_D*(Rat(1)/Rat(2))*rho*A*u**2 theta = Function('theta')(t) flightpathdirection = Eq( u.diff(t), F_thrust/M*cos(psi-theta) - Drag/M - g_0*sin(theta) ) tangentialflightdirection = Eq( u*theta.diff(t) , F_thrust/M*sin(psi-theta)+ Lift/M - g_0*cos(theta) ) # Example 4-1 Biblarz, Sutton, Rocket Propulsion Elements (2001) # assume constant thrust F_thrust0 = Symbol('F_thrust0',real=True) I_sp = Symbol('I_sp',positive=True) I_spEq = Eq(I_sp, I_t/W_p) # specific impulse equation # Given # Launch weight 4.0 lbf # Useful propellant mass 0.4 lbm # Effective specific impulse 120 sec # Launch angle (relative to horizontal 80 degrees # Burn time (with constant thrust) 1.0 sec theta_0 = Symbol("theta_0",real=True) I_spEq.subs(I_t,F_thrust0*t_p) # I_sp == F_thrust0*t_p/W_p solve( I_spEq.subs(I_t,F_thrust0*t_p).subs(I_sp,120.).subs(t_p,1.0).subs(W_p, 0.4), F_thrust0) # [48.0000000000000] lbf # "The direction of thrust and the flight path are the same udot = Matrix([ [flightpathdirection.rhs],[tangentialflightdirection.rhs]]) Rot = Matrix([[ cos(theta), -sin(theta)],[sin(theta),cos(theta)]]) # assume negligible Drag (low velocity), no lift (wingless) udot.subs(Lift,0).subs(Drag,0).subs(psi,theta) ( Rot * udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand() # This reproduces the acceleration in x and y components of the powered flight stage ( Rot * udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand().subs(F_thrust, 48.0).subs(M,4.0/32.2).subs(g_0,32.0).subs( theta, 80./180.*N(pi) ) # 67.1 ft/sec^2 in x direction, 348.5 ft/sec^2 in y direction uxuydot = (Rot*udot.subs(Lift,0).subs(Drag,0).subs(psi,theta)).expand().subs(F_thrust,48.0).subs(g_0,32.0).subs(theta,80./180.*N(pi)).subs(M,M_constantflow).subs(M0,4.0/32.2).subs(m_p,0.4/32.2).subs(t_p,1.0) u_p = integrate(uxuydot,(t,0,1.0) ) # Matrix([ # [70.6944361984026], 70.7 ft/sec # [368.928070760125]]) 375 ft/sec # EY : 20151113 Launch weight is in 4.0 lbf, useful propellant mass was in 0.4 lbm, and yet Biblarz and Sutton divides by 32.2 ft/sec^2 for both, and lbf and lbm are along the same footing as the units for initial weight and final weight on pp. 114; is this wrong? Answer is no. lbm is lbf, but in different contexts, see this clear explanation: https://youtu.be/4ePaKh9QyC8 atan( u_p[1]/u_p[0])*180/N(pi) # 79.1524086456152 # Problems Ch. 4 Flight Performance pp. 154 # 3. Problem0403 = flightpathdirection.subs(Drag,0).subs(psi,theta).subs(theta,pi/2).subs(F_thrust, m_p/t_p*u_e).subs(M,M_constantflow).subs(m_p,M0*0.57).subs(t_p,5.).subs(u_e,2209.).subs(g_0,9.8).factor(M0).rhs # Chapter 4 Flight Performance, Problem 3 integrate( Problem0403, (t,0,5.0) ) # 1815.32988528061 integrate( integrate(Problem0403,(t,0,t) ),(t,0,5.0) ) # 3890.37850288891 # Problem 6, Ch. 4 Flight Performance pp. 155 M_earth = plnfacts.loc[plnfacts['Planet']=="EARTH","Mass (1024kg)"].values[0]*10**(24) # in kg R_earth = plnfacts.loc[plnfacts['Planet']=="EARTH","Diameter (km)"].values[0]/Decimal(2) Gconst = FundConst[ FundConst["Quantity"].str.contains("gravitation") ].loc[243,"Value"] v0406 = sqrt( Gconst*M_earth/((R_earth + Decimal(500))*10**3) ) # velocity of satellite v of Chapter 4, Problem 6 of Biblarz and Sutton T0406 = (2.*N(pi)*float((R_earth + Decimal(500))*10**3 )**(3./2))/float(sqrt( Gconst*M_earth)) Eperm0406 = Gconst*M_earth*(-1/(2*((R_earth+Decimal(500))*10**3)) + 1/(R_earth*10**3)) # Energy per mass Eperm0406.quantize(Decimal('100000.')) # cf. https://gist.github.com/jackiekazil/6201722 # cf. http://stackoverflow.com/questions/6913532/display-a-decimal-in-scientific-notation '%.6E' % Eperm0406 ############################## ##### AE 121 ############################## ######################### #### PS 2 ######################### #################### ### Problem 1 #################### gstd = FundConst[ FundConst["Quantity"].str.contains("gravity") ].loc[303,:].Value M_0 = Symbol('M_0',positive=True) Deltau = -I_sp*g_0*ln( (M_0 -m_p)/M_0) # part (a) Deltau.subs(I_sp,268.8).subs(g_0,gstd).subs(M_0,805309.).subs(m_p, (1-0.1396)*586344) # 2595.74521034101 m/s # part (b) Deltau.subs(I_sp,452.1).subs(g_0,gstd).subs(M_0,183952+35013.).subs(m_p, (1-0.1110)*183952) # 6090.68716730318 m/s # part (c) 1.5*805309./268.8 # 4493.911830357143 #################### ### Problem 3 #################### import scipy from scipy import exp, array from scipy.integrate import ode import matplotlib.pyplot as plt M_cannonball = (7.8*(10**2)**3/(10**3))*4./3.*N(pi)*(15./2./100.)**3 (1.225)*(0.1)/(2.*M_cannonball)*(N(pi)*(15./2./100.)**2) # 7.85256410256411e-5 def deriv(t,u): # return derivatives of the array u """ cf. http://bulldog2.redlands.edu/facultyfolder/deweerd/tutorials/Tutorial-ODEs.pdf """ uxdot = (7.853*10**(-5))*exp( -u[3]/(10000.))*(u[0]**2 + u[1]**2)**(0.5)*(-u[0]) uydot = -9.8 + (7.853*10**(-5))*exp(-u[3]/(10000.))*(u[0]**2 + u[1]**2)**(0.5)*(-u[1]) return array([ uxdot,uydot,u[0],u[1] ]) u0 = [300.*cos(50./180.*N(pi)), 300.*sin(50./180.*N(pi)),0,0] Prob0203 = ode(deriv).set_integrator('dopri5') # Problem 3 from Problem Set 2 for AE121 Fall 2015 # cf. http://stackoverflow.com/questions/26738676/does-scipy-integrate-ode-set-solout-work Prob0203.set_initial_value(u0) t1 = 41.575 dt = 0.005 while Prob0203.successful() and Prob0203.t < t1: Prob0203.integrate(Prob0203.t+dt) print(" %g " % Prob0203.t ) print Prob0203.y Prob0203.set_initial_value(u0) Prob0203_solution = [] while Prob0203.successful() and Prob0203.t < t1: Prob0203_solution.append( [Prob0203.t+dt,] + list( Prob0203.integrate(Prob0203.t+dt) ) ) # take the transpose of a list of lists Prob0203_solution = map(list, zip(*Prob0203_solution)) plt.figure(1) plt.plot( Prob0203_solution[3],Prob0203_solution[4]) plt.xlabel('x (m)') plt.ylabel('y (m)') plt.title('Cannonball trajectory with Drag: Variable density') # part (b) def deriv_b(t,u): # return derivatives of the array u """ cf. http://bulldog2.redlands.edu/facultyfolder/deweerd/tutorials/Tutorial-ODEs.pdf """ uxdot = (7.853*10**(-5)) *(u[0]**2 + u[1]**2)**(0.5)*(-u[0]) uydot = -9.8 + (7.853*10**(-5)) *(u[0]**2 + u[1]**2)**(0.5)*(-u[1]) return array([ uxdot,uydot,u[0],u[1] ]) Prob0203b = ode(deriv_b).set_integrator('dopri5') Prob0203b.set_initial_value(u0) Prob0203b.integrate(41.23) t1b = 41.225 Prob0203b.set_initial_value(u0) Prob0203b_solution = [] while Prob0203b.successful() and Prob0203b.t < t1b: Prob0203b_solution.append( [Prob0203b.t+dt,] + list( Prob0203b.integrate(Prob0203b.t+dt) ) ) Prob0203b_solution = map(list, zip(*Prob0203b_solution)) plt.figure(2) plt.plot( Prob0203b_solution[3],Prob0203b_solution[4]) plt.xlabel('x (m)') plt.ylabel('y (m)') plt.title('Cannonball trajectory with Drag: Constant density') # part (c) 300.**2/9.8*sin(2.*50./180.*N(pi) ) # 9044.15283378558 #parabola trajectory data Prob0203c_x = [i*10 for i in range(905)] Prob0203c_y = [ tan(50./180.*N(pi))*x - (9.8/2.)*x**2/(300.*cos(50./180.*N(pi)))**2 for x in Prob0203c_x] plt.figure(3) plt.plot( Prob0203_solution[3],Prob0203_solution[4], label="Drag: Variable density") plt.plot( Prob0203b_solution[3],Prob0203b_solution[4], label="Drag: Constant density") plt.plot( Prob0203c_x,Prob0203c_y, label="No Drag") plt.xlabel('x (m)') plt.ylabel('y (m)') plt.title('Trajectories of cannonball with Drag of variable density, Drag of constant density, and no drag') plt.legend() ######################### #### PS 4 ######################### #################### ### Problem 1 #################### # (b) k_Boltz = FundConst[ FundConst["Quantity"].str.contains("Boltzmann") ].loc[49,:] k_Boltz.Value k_Boltz.Unit N_Avog = FundConst[FundConst["Quantity"].str.contains("Avogadro") ] c_V = float( Decimal(1.5)*(N_Avog.Value)*(k_Boltz.Value))/M_0 c_P = float( Decimal(2.5)*(N_Avog.Value)*(k_Boltz.Value))/M_0 c_V.subs(M_0, 39.948/1000.) # 312.198102337360 c_V.subs(M_0, 131.293/1000.) # 94.9912774647001 c_P.subs(M_0, 39.948/1000.) # 520.330170562267 c_P.subs(M_0, 131.293/1000.) # 158.318795774500 tau = Symbol("tau",real=True) tau_0 = Symbol("tau_0",real=True) GAMMA = Symbol("GAMMA",positive=True) MachNo = Symbol("MachNo",positive=True) TratiovsMachNo = Eq( tau_0/tau, Rat(1) + (GAMMA - Rat(1))/Rat(2)*MachNo )
ernestyalumni/Propulsion
Propulsion.py
Python
gpl-2.0
11,699
[ "Avogadro" ]
e32ae895665f769bac6e98c68c78ad6d779e9ffb7715e7e9d275605e640a35ed
#!/usr/bin/env python # ====================================================================== # Globally useful modules, imported here and then accessible by all # functions in this file: import numpy,atpy,sys,getopt,string,subprocess,pyfits import localgroup # It would be nice to read in tables from FITS, but I cannot write them # out in FITS format yet :-/ Stick with ascii now: # tabletype = 'fits' tabletype = 'ascii' rad2deg = 180.0/numpy.pi # ====================================================================== def LGHPinfer(argv): """ NAME LGHPinfer.py PURPOSE Read in catalogs of paired halos in known format, and join to make a list of local group "triplets". Compute some new quantities including the importance of each sample given various observational constraints. Output a plain text catalog for use by CornerPlotter. COMMENTS If no input files are provided, 3 default files will be read in, assumed to contain triplet halo samples from the prior. These files are called LG_halos_M[W,31,33].fits - although they may not be in FITS format... USAGE LGHPinfer.py [flags] [options] pairs.dat FLAGS -h Print this message [0] -v Be verbose INPUTS MW_halos_01.txt Busha format plain text catalog M31_halos_01.txt Busha format plain text catalog M33_halos_01.txt Busha format plain text catalog OPTIONAL INPUTS OUTPUTS stdout Useful information *.cpt Catalogs for plotters to read EXAMPLES LGHPinfer.py -c constraints.txt *halos*.txt BUGS HISTORY 2011-10-21 started Marshall & Busha (Oxford) 2012-03-15 added kinematic constraints Marshall (Oxford) 2013-10-01 githubbed Marshall & Busha (KIPAC) """ # -------------------------------------------------------------------- try: opts, args = getopt.getopt(argv[1:], "hvr:", ["help","verbose","vt=","vr=","M1="]) except getopt.GetoptError, err: # print help information and exit: print str(err) # will print something like "option -a not recognized" print LGHPinfer.__doc__ sys.exit(2) vb = False headstart = False constrainMMW = False confile = None for o,a in opts: if o == "-v": vb = True elif o in ("-h", "--help"): print LGHPinfer.__doc__ return elif o in ("-MW"): bits = a.split(',') if len(bits) != 2: print "ERROR: supply MW constraint as 'M1bar,M1err'" return MMWbar = bits[0] MMWerr = bits[1] constrainMMW = True elif o in ("-c", "--constraints"): confile = a else: assert False, "unhandled option" if (len(args) > 0) and (len(args) % 3 == 0): inputs = args elif (len(args) == 0): headstart = True else: print LGHPinfer.__doc__ return # If FITS tables of halo triplets exist, read them in (fast): if headstart: if vb: print "Working from existing FITS tables of halos..." # -------------------------------------------------------------------- # PART 1: reading in raw ascii data, and selecting triplets. Output # of this part is 3 FITS files, one for each galaxy, with all relevant # quantities. else: # Parse list of input files, and read in data as appended tables: started = {'MW':0,'M31':0,'M33':0} for input in inputs: pieces = string.split(string.split(input,'.')[0],'_') object = pieces[0] box = pieces[2] epoch = pieces[3] # Read in table and add a column of strings for the consuelo run no: if vb: print "Reading data from "+input t = atpy.Table(input,type='ascii') consuelo_box = numpy.zeros(len(t),dtype='|S4') consuelo_box.fill(box) t.add_column('run',consuelo_box) consuelo_epoch = numpy.zeros(len(t),dtype='|S6') consuelo_epoch.fill(epoch) t.add_column('epoch',consuelo_epoch) if not started[object]: # Rename table to new one! if (object == 'MW'): MW = t elif (object == 'M31'): M31 = t elif (object == 'M33'): M33 = t else: print "Error: Unrecognised object: "+object return started[object] = 1 else: # Append table to one that has begun: if (object == 'MW'): MW.append(t) elif (object == 'M31'): M31.append(t) elif (object == 'M33'): M33.append(t) else: print "Error: Unrecognised object: "+object return # Now have all tables read in and combined into 3, one of r each object. if vb: print "Read in",len(MW),"MW halos, ",len(M31),"M31 halos, and",len(M33),"M33 halos" if (len(MW) != len(M31)): print "Error: mismatched dataset sizes: ",len(MW),"MW halos cf",len(M31),"M31 halos" return # Tables are matched via IDs, so sort by M33 ID, run, and MW_ID and reverse # to get high idM33 values at the top. Reversing idM33 vals # needs to be done first! sort_to_align(MW) sort_to_align(M31) # Check that arrays line up: fail = numpy.sum(MW.idMW - M31.idMW) if fail: print "Error: IDs in data files not consistent" print " MW.idMW = ",MW.idMW print " M31.idMW = ",M31.idMW return # Now sort M33 to match: sort_to_align(M33) # Can now make index array for M33 triplets with numpy.where, and use it # on MW and M31 tables: this is basically the first N samples in all 3 # tables. If M33 is not being used, can use all M31 and MW halos anyway. index = numpy.where(MW.idM33 >= -2) N_pairs = len(index[0]) indexM33 = numpy.where(MW.idM33 >= 0) N_triplets = len(indexM33[0]) if vb: print "Found",N_pairs,"MW-M31 pairs, and",N_triplets,"LG triplets" # Check that arrays line up: if N_triplets != len(M33): print "Warning: numbers of M33s in datafiles are inconsistent:" print " len(MW.idM33[indexM33]), len(M33.idM33) = ",len(MW.idM33[indexM33]), len(M3.idM33) print " MW.idM33[indexM33] = ",MW.idM33[indexM33] print " M33.idM33 = ",M33.idM33 return fail = numpy.sum(MW.idM33[indexM33] - M33.idM33) if fail: print "Warning: IDs in data files are inconsistent:" print " MW.idM33[indexM33] = ",MW.idM33[indexM33] print " M33.idM33 = ",M33.idM33 return # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Shift all halos to galactocentric coordinates, subtracting off MW # position and velocity - and rescale to include Hubble flow. Note # that distances in catalog are in Mpc!: H0 = 70.0 phase_space_shift_and_flow(MW,M31,M33,indexM33,H0) # Add columns with galactocentric distances D and galactocentric # radial velocity v (possibly slow): distances_and_velocities(M31,M33) # Add columns with M200 and c200 to complement Mvir and cvir massconvert(MW) massconvert(M31) massconvert(M33) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Make a unit importance array, and write out cpt files with just the # prior samples in them. Logs of mass are taken just before writing. w = numpy.ones(len(MW)) w_M33 = numpy.ones(len(M33)) # BUG: Some of Michael's halos are hundreds of Mpc away - some sort of # wraparound error. Re-impose the isolation selection via the weights: wrapped = numpy.where(M31.D > 1.5) w[wrapped] = 0.0 wrapped = numpy.where(M33.D > 1.5) w_M33[wrapped] = 0.0 # BUG: Occasionally a triplet has M31 = M33. Remove these! identical = numpy.where(M31.idM31 == M31.idM33) w[identical] = 0.0 w_M33[identical] = 0.0 # At this point, write out plain text catalogs for visualising prior - # random n samples: # n = 2000 # # filename = 'fig1_pairs_prior.cpt' # write_cpt_file(index,MW,M31,M33,w,n,filename) # if vb: print "Written",n,"lines to",filename # # filename = 'fig1_triplets_prior.cpt' # write_cpt_file(indexM33,MW,M31,M33,w_M33,n,filename) # if vb: print "Written",n,"lines to",filename # Write out pairs and triplets for future use (save having to read # in an enormous file # again). Weights are added at this point: write_fits_files('pairs',index,MW,M31,M33,w,vb) write_fits_files('triplets',indexM33,MW,M31,M33,w_M33,vb) # OK, now have 3+2 tables, each with a weight column, # identical for now. # # What's the probability that M_MW is > M_M31? # P_pairs = probgt(index,MW,M31,w) # P_triplets = probgt(indexM33,MW,M31,w) # print "Probability that M_MW > M_M31 is",P_pairs*100.0,"% for pairs," # print " and",P_triplets*100.0,"% for triplets" # End of Part 1. # -------------------------------------------------------------------- # Part 2: compute likelihood of observational data, and apply to each # triplet as an importance for drawing inferences. First need some # additional quantities, beyond D and vr. # Read in data tables afresh, from pre-prepared files. input = 'LG_triplets_MW.fits' MWt = atpy.Table(input,type=tabletype,name='MW') input = 'LG_triplets_M31.fits' M31t = atpy.Table(input,type=tabletype,name='M31') input = 'LG_triplets_M33.fits' M33t = atpy.Table(input,type=tabletype,name='M33') if vb: print "Read in",len(MWt),"MW halos,",len(M31t),"M31 halos, and",len(M33t),"M33 halos in triplets" # input = 'LG_pairs_MW.fits' # MWp = atpy.Table(input,type=tabletype,name='MW') # input = 'LG_pairs_M31.fits' # M31p = atpy.Table(input,type=tabletype,name='M31') # if vb: print "Read in",len(MWp),"MW halos and",len(M31p),"M31 halos in pairs" # Just make sure they all agree: fail = ((2*len(MWt)-len(M31t)-len(M33t)) != 0) if fail: print "Warning: triplet FITS tables have inconsistent lengths:" print " len(MWt) = ",len(MWt) print " len(M31t) = ",len(M31t) print " len(M33t) = ",len(M33t) return # fail = ((len(MWp)-len(M31p)) != 0) # if fail: # print "Warning: triplet FITS tables have inconsistent lengths:" # print " len(MWp) = ",len(MWp) # print " len(M31p) = ",len(M31p) # return # # pindex = numpy.where(MWp.idM31 >= 0) # np = len(pindex[0]) tindex = numpy.where(MWt.idM33 >= 0) nt = len(tindex[0]) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Add column with angsep to M33: angular_separation_on_sky(M31t,M33t) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Add column with timing argument mass, to M31 tables: # timing_argument(M31p) timing_argument(M31t) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Start out with prior weight (all ones except a few zeros): # wp0 = MWp.w * M31p.w wt0 = MWt.w * M31t.w * M33t.w # Read in constraints as an instance of the constraints class, which # contains the relevant observational data: obs = localgroup.bundle() # Calculate importances based on input MW and M31 constraints. # Triplets: # # Compute MW mass constraint, and save the posterior: # wt1 = wt0*obs.likelihood(MWt,'Mvir') # filename = 'LG_triplets_MW-Mvir.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt1,nt,filename,vb) # Compute M31 distance constraint, and save the posterior: wt2 = wt0*obs.likelihood(M31t,'D') # filename = 'LG_triplets_M31_D.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt2,nt,filename,vb) # Compute M31 radial velocity constraint, and save the posterior: wt3 = wt0*obs.likelihood(M31t,'vr') # filename = 'LG_triplets_M31_vr.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt3,nt,filename,vb) # Compute M33 distance constraint, and save the posterior: wt4 = wt0*obs.likelihood(M33t,'D') # filename = 'LG_triplets_M33_D.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt4,nt,filename,vb) # Compute M33 radial velocity constraint, and save the posterior: wt5 = wt0*obs.likelihood(M33t,'vr') # filename = 'LG_triplets_M33_vr.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt5,nt,filename,vb) # Combine all kinematic constraints, and save the posterior: wt6 = wt2*wt3*wt4*wt5 filename = 'LG_triplets_M33+M31_D+vr.cpt' write_cpt_file(tindex,MWt,M31t,M33t,wt6,nt,filename,vb) # # Combine just M31 kinematic constraints, and save the posterior: # wt7 = wt2*wt3 # filename = 'LG_triplets_M31_D+vr.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt7,nt,filename,vb) # # # Combine all constraints, and save the posterior: # wt8 = wt1*wt2*wt3*wt4*wt5 # filename = 'LG_triplets_M33+M31_D+vr_MW-Mvir.cpt' # write_cpt_file(tindex,MWt,M31t,M33t,wt8,nt,filename,vb) # Write out prior for completeness: filename = 'LG_triplets_prior.cpt' write_cpt_file(tindex,MWt,M31t,M33t,wt0,nt,filename,vb) # # Pairs: # # # Compute MW mass constraint, and save the posterior: # wp1 = wp0*obs.likelihood(MWp,'Mvir') # filename = 'LG_pairs_MW_Mvir.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp1,np,filename,vb) # # # Compute M31 distance constraint, and save the posterior: # wp2 = wp0*obs.likelihood(M31p,'D') # filename = 'LG_pairs_M31_D.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp2,np,filename,vb) # # # Compute M31 radial velocity constraint, and save the posterior: # wp3 = wp0*obs.likelihood(M31p,'vr') # filename = 'LG_pairs_M31_vr.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp3,np,filename,vb) # # # Combine both kinematic constraints, and save the posterior: # wp6 = wp2*wp3 # filename = 'LG_pairs_M31_D+vr.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp6,np,filename,vb) # # # Combine all constraints, and save the posterior: # wp8 = wp1*wp2*wp3 # filename = 'LG_pairs_M31_D+vr_MW-Mvir.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp8,np,filename,vb) # # # Write out prior for completeness: # filename = 'LG_pairs_prior.cpt' # write_cpt_file(pindex,MWp,M31p,M31p,wp0,np,filename,vb) # # # # Write out FITS files for Risa - all constraints except MW mass. # # write_fits_files('pairs_constrained-by-M31-D+vr',pindex,MWp,M31p,M31p,wp6,vb) # write_fits_files('triplets_constrained-by-M33+M31_D+vr',tindex,MWt,M31t,M33t,wt6,vb) # # -------------------------------------------------------------------- return # ====================================================================== def sort_to_align(t): t.sort(['idM33','run','idMW']) # print "Top 5 IDs: ",t.idM33[0:5] reverse = numpy.arange(len(t.idM33),0,-1) t.add_column('reverse',reverse) t.sort(['reverse']) t.remove_columns(['reverse']) # print "After reverse sorting, Top 5 IDs: ",t.idM33[0:5] return # ====================================================================== def phase_space_shift_and_flow(MW,M31,M33,i,H0): M31.x -= MW.x M31.y -= MW.y M31.z -= MW.z M33.x -= MW.x[i] M33.y -= MW.y[i] M33.z -= MW.z[i] MW.x -= MW.x MW.y -= MW.y MW.z -= MW.z M31.vx -= MW.vx M31.vy -= MW.vy M31.vz -= MW.vz M33.vx -= MW.vx[i] M33.vy -= MW.vy[i] M33.vz -= MW.vz[i] MW.vx -= MW.vx MW.vy -= MW.vy MW.vz -= MW.vz M31.vx += H0*M31.x M31.vy += H0*M31.y M31.vz += H0*M31.z M33.vx += H0*M33.x M33.vy += H0*M33.y M33.vz += H0*M33.z return # ====================================================================== def distances_and_velocities(M31,M33): D = numpy.sqrt(M31.x*M31.x + M31.y*M31.y + M31.z*M31.z) ii = numpy.where(D <= 0.0) if len(ii[0]) > 0: print "Error: M31 distance is zero or negative:" for i in ii[0]: print M31[i] # D[i] = 0.75 sys.exit() M31.add_column('D',D) D = numpy.sqrt(M33.x*M33.x + M33.y*M33.y + M33.z*M33.z) ii = numpy.where(D <= 0.0) if len(ii[0]) > 0: print "Error: M33 distance is zero or negative:" for i in ii[0]: print M33[i] # D[i] = 0.75 sys.exit() M33.add_column('D',D) # Project v onto radial vector to get vr, and onto the sphere to # get vt: v = numpy.sqrt(M31.vx*M31.vx + M31.vy*M31.vy + M31.vz*M31.vz) vr = (M31.vx*M31.x + M31.vy*M31.y + M31.vz*M31.z)/M31.D costheta = vr/v sintheta = numpy.sqrt(1.0-costheta*costheta) vt = v*sintheta M31.add_column('vr',vr) M31.add_column('vt',vt) v = numpy.sqrt(M33.vx*M33.vx + M33.vy*M33.vy + M33.vz*M33.vz) vr = (M33.vx*M33.x + M33.vy*M33.y + M33.vz*M33.z)/M33.D costheta = vr/v sintheta = numpy.sqrt(1.0-costheta*costheta) vt = v*sintheta M33.add_column('vr',vr) M33.add_column('vt',vt) return # ====================================================================== # Compute angular separation on sky - ignore Earth's position # relative to MW centre for now... This will need including though! # Just dot the position vectors together for now. def angular_separation_on_sky(M31,M33): cos_angsep = (M31.x*M33.x + M31.y*M33.y + M31.z*M33.z)/(M31.D*M33.D) angsep = numpy.arccos(cos_angsep)*rad2deg # Note - pay attention to weights to avoid identical twins: index = numpy.where(M31.w == 0.0) angsep[index] = 0.0 M33.add_column('angsep',angsep) return # ====================================================================== # Convert virial masses to M200c, to better compare with Li & White. def massconvert(table): NFW.vb = True # If we had z and c for each halo: # h = NFW.halo(table.Mvir,table.z,c=table.cvir,kind='virial') # Placeholder: z = 0.0 h = NFW.halo(table.Mvir,z,kind='virial') h.massconvert('virial') table.add_column('M200',h.M200) table.add_column('c200',h.c200) return # ====================================================================== # Compute timing argument LG mass, M_TA def timing_argument(M31): TimingArgument.vb = False M,a,chi,e = TimingArgument.mass(M31.D,M31.vr,approach='radial',t0scatter=True) M31.add_column('M_TA',M) return # ====================================================================== # Return the probability that A is greater than B: def probgt(index,A,B,w): # Check that weights sum to 1.0: ww = w[:] norm = numpy.sum(w[index]) ww[index] = w[index] / norm # Now sum probabilities: return numpy.sum(ww[numpy.where(A[index] > B[index])]) # ====================================================================== def write_cpt_file(index,MW,M31,M33,w,n,filename,vb): n_given = len(index[0]) n_required = n if n_required > n_given: n_required = n_given # First define labels and ranges: logM_MW_label = '$\log_{10} M_{\\rm MW} / M_{\odot}$, ' logM_MW_range = '10.0,14.0, ' logM_M31_label = '$\log_{10} M_{\\rm M31} / M_{\odot}$, ' logM_M31_range = '10.0,14.0, ' logM_M33_label = '$\log_{10} M_{\\rm M33} / M_{\odot}$, ' logM_M33_range = '10.0,14.0, ' logM_LG_pair_label = '$\log_{10} M\prime_{\\rm LG} / M_{\odot}$, ' logM_LG_pair_range = '10.0,14.0, ' logM_LG_triplet_label = '$\log_{10} M_{\\rm LG} / M_{\odot}$, ' logM_LG_triplet_range = '10.0,14.0, ' logMratio_label = '$\log_{10} M_{\\rm M31} / M_{\\rm MW}$, ' logMratio_range = '-3.0,3.0, ' D_M31_label = '$D_{\\rm M31} / {\\rm kpc}$, ' D_M31_range = '550,950, ' D_M33_label = '$D_{\\rm M33} / {\\rm kpc}$, ' D_M33_range = '550,950, ' vr_M31_label = '$v^{\\rm rad}_{\\rm M31} / {\\rm km s}^{-1}$, ' vr_M31_range = '-400,400, ' vt_M31_label = '$v^{\\rm tan}_{\\rm M31} / {\\rm km s}^{-1}$, ' vt_M31_range = '0,400, ' vr_M33_label = '$v^{\\rm rad}_{\\rm M33} / {\\rm km s}^{-1}$, ' vr_M33_range = '-400,400, ' vt_M33_label = '$v^{\\rm tan}_{\\rm M33} / {\\rm km s}^{-1}$, ' vt_M33_range = '0,400, ' angsep_label = '$\\Delta\\theta / {\\rm deg}$, ' angsep_range = '0,60, ' logM_TA_label = '$\log_{10} M_{\\rm TA} / M_{\odot}$, ' logM_TA_range = '10.0,14.0, ' logA200_label = '$\log_{10} A_{200}$, ' logA200_range = '-2.0,2.0, ' # Now parse target filename and select parameters to plot: stem = string.split(filename,'_')[1] if stem == 'pairs': Npars = 8 hline1 = '# importance, ' + logM_MW_label + logM_M31_label \ + D_M31_label + vr_M31_label + logMratio_label \ + logM_LG_pair_label \ + logM_TA_label + logA200_label # + vt_M31_label hline2 = '# 0,1, ' + logM_MW_range + logM_M31_range \ + D_M31_range + vr_M31_range + logMratio_range \ + logM_LG_pair_range \ + logM_TA_range + logA200_range # + vt_M31_range outbundle = numpy.zeros([n_given,Npars+1]) outbundle[:,0] = w[index] outbundle[:,1] = numpy.log10(MW.M200[index]) outbundle[:,2] = numpy.log10(M31.M200[index]) outbundle[:,3] = M31.D[index]*1000.0 outbundle[:,4] = M31.vr[index] outbundle[:,5] = numpy.log10(M31.M200[index]/MW.M200[index]) outbundle[:,6] = numpy.log10(MW.M200[index]+M31.M200[index]) outbundle[:,7] = numpy.log10(M31.M_TA[index]) outbundle[:,8] = numpy.log10(M31.M_TA[index]/(MW.M200[index]+M31.M200[index])) # outbundle[:,9] = M31.vt[index] elif stem == 'triplets': Npars = 13 hline1 = '# importance, ' \ + logM_MW_label + logM_M31_label + logM_M33_label \ + D_M31_label + vr_M31_label \ + D_M33_label + vr_M33_label \ + logMratio_label \ + logM_LG_pair_label + logM_LG_triplet_label \ + angsep_label \ + logM_TA_label + logA200_label # + vt_M31_label hline2 = '# 0,1, ' \ + logM_MW_range + logM_M31_range + logM_M33_range \ + D_M31_range + vr_M31_range \ + D_M33_range + vr_M33_range \ + logMratio_range \ + logM_LG_pair_range + logM_LG_triplet_range \ + angsep_range \ + logM_TA_range + logA200_range # + vt_M31_range outbundle = numpy.zeros([n_given,Npars+1]) outbundle[:,0] = w[index] outbundle[:,1] = numpy.log10(MW.M200[index]) outbundle[:,2] = numpy.log10(M31.M200[index]) outbundle[:,3] = numpy.log10(M33.M200[index]) outbundle[:,4] = M31.D[index]*1000.0 outbundle[:,5] = M31.vr[index] outbundle[:,6] = M33.D[index]*1000.0 outbundle[:,7] = M33.vr[index] outbundle[:,8] = numpy.log10(M31.M200[index]/MW.M200[index]) outbundle[:,9] = numpy.log10(MW.M200[index]+M31.M200[index]) outbundle[:,10] = numpy.log10(MW.M200[index]+M31.M200[index]+M33.M200[index]) outbundle[:,11] = M33.angsep[index] outbundle[:,12] = numpy.log10(M31.M_TA[index]) outbundle[:,13] = numpy.log10(M31.M_TA[index]/(MW.M200[index]+M31.M200[index])) # outbundle[:,14] = M31.vt[index] else: print "Error: unrecognised target filename: "+filename # # Now trim outbundle to random selection of n lines: # numpy.random.shuffle(outbundle) # outbundle = outbundle[0:n_required,:] # # Now trim outbundle to (last) n halos with highest weight: # outbundle.sort(0) # outbundle = outbundle[n_given-n_required:n_given,:] # The actual writing: # This is what I want to do - but header is not implemented in my # numpy :-( # hdr = hline1+'\n'+hline2 # numpy.savetxt(filename, outbundle, header=hdr) output = open(filename,'w') output.write("%s\n" % hline1) output.write("%s\n" % hline2) output.close() numpy.savetxt('junk', outbundle) cat = subprocess.call("cat junk >> " + filename, shell=True) rm = subprocess.call("rm junk", shell=True) if cat != 0 or rm != 0: print "Error: write subprocesses failed in some way :-/" sys.exit() if vb: print "Written",n,"lines to",filename return # ====================================================================== # NB. Files are named ".fits" but are actually of type tabletype, which # may well not be FITS... def write_fits_files(kind,index,MW,M31,M33,w,vb): filename = "LG_"+kind+"_MW.fits" t = MW.rows(index[0]) t.add_column('w',w) t.write(filename,type=tabletype,overwrite=True) if vb: print "Written",len(t),"rows to",filename filename = "LG_"+kind+"_M31.fits" t = M31.rows(index[0]) t.add_column('w',w) t.write(filename,type=tabletype,overwrite=True) if vb: print "Written",len(t),"rows to",filename if kind == 'triplets': filename = "LG_"+kind+"_M33.fits" t = M33.rows(index[0]) t.add_column('w',w) t.write(filename,type=tabletype,overwrite=True) if vb: print "Written",len(t),"rows to",filename return # ====================================================================== # If called as a script, the python variable __name__ will be set to # "__main__" - so test for this, and execute the main program if so. # Writing it like this allows the function plot_oguri_lens to be called # from the python command line as well as from the unix prompt. if __name__ == '__main__': LGHPinfer(sys.argv) # ======================================================================
drphilmarshall/LocalGroupHaloProps
attic/LGweights.py
Python
gpl-2.0
25,885
[ "Galaxy" ]
304ac96825621850b986463480dface463444aa9014fd6830b8012c0583005c6
# # Copyright (C) 2013-2018 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Tests particle property setters/getters from __future__ import print_function import unittest as ut import espressomd import numpy as np from espressomd.interactions import FeneBond class ParticleProperties(ut.TestCase): # Particle id to work on pid = 17 # Error tolerance when comparing arrays/tuples... tol = 1E-9 # Handle for espresso system system = espressomd.System(box_l=[100.0, 100.0, 100.0]) f1 = FeneBond(k=1, d_r_max=5) system.bonded_inter.add(f1) f2 = FeneBond(k=1, d_r_max=5) system.bonded_inter.add(f2) def setUp(self): if not self.system.part.exists(self.pid): self.system.part.add(id=self.pid, pos=(0, 0, 0)) def tearDown(self): self.system.part.clear() def generateTestForVectorProperty(_propName, _value): """Generates test cases for vectorial particle properties such as position, velocity... 1st arg: name of the property (e.g., "pos"), 2nd array: value to be used for testing. Has to be numpy.array of floats """ # This is executed, when generateTestForVectorProperty() is called propName = _propName value = _value def func(self): # This code is run at the execution of the generated function. # It will use the state of the variables in the outer function, # which was there, when the outer function was called setattr(self.system.part[self.pid], propName, value) np.testing.assert_allclose(np.array(getattr(self.system.part[ self.pid], propName)), value, err_msg=propName + ": value set and value gotten back differ.", atol=self.tol) return func def generateTestForScalarProperty(_propName, _value): """Generates test cases for scalar particle properties such as type, mass, charge... 1st arg: name of the property (e.g., "type"), 2nd array: value to be used for testing. int or float """ # This is executed, when generateTestForVectorProperty() is called propName = _propName value = _value def func(self): # This code is run at the execution of the generated function. # It will use the state of the variables in the outer function, # which was there, when the outer function was called setattr(self.system.part[self.pid], propName, value) self.assertEqual(getattr(self.system.part[self.pid], propName), value, propName + ": value set and value gotten back differ.") return func test_pos = generateTestForVectorProperty("pos", np.array([0.1, 0.2, 0.3])) test_v = generateTestForVectorProperty("v", np.array([0.2, 0.3, 0.4])) test_f = generateTestForVectorProperty("f", np.array([0.2, 0.3, 0.7])) test_type = generateTestForScalarProperty("type", int(3)) test_mol_id = generateTestForScalarProperty("mol_id", int(3)) test_bonds_property = generateTestForScalarProperty( "bonds", ((f1, 1), (f2, 2))) if espressomd.has_features(["MASS"]): test_mass = generateTestForScalarProperty("mass", 1.3) if espressomd.has_features(["ROTATION"]): for x in 0, 1: for y in 0, 1: for z in 0, 1: test_rotation = generateTestForVectorProperty( "rotation", np.array([x, y, z], dtype=int)) test_omega_lab = generateTestForVectorProperty( "omega_lab", np.array([4., 2., 1.])) test_omega_body = generateTestForVectorProperty( "omega_body", np.array([4., 72., 1.])) test_torque_lab = generateTestForVectorProperty( "torque_lab", np.array([4., 72., 3.7])) # The tested value has to be normalized! test_quat = generateTestForVectorProperty( "quat", np.array([0.5, 0.5, 0.5, 0.5])) if espressomd.has_features(["LANGEVIN_PER_PARTICLE"]): if espressomd.has_features(["PARTICLE_ANISOTROPY"]): test_gamma = generateTestForVectorProperty( "gamma", np.array([2., 9., 0.23])) def test_gamma_single(self): self.system.part[self.pid].gamma = 17.4 np.testing.assert_array_equal( np.copy(self.system.part[self.pid].gamma), np.array([17.4, 17.4, 17.4]), "gamma: value set and value gotten back differ.") else: test_gamma = generateTestForScalarProperty("gamma", 17.3) if espressomd.has_features(["PARTICLE_ANISOTROPY"]): test_gamma_rot = generateTestForVectorProperty( "gamma_rot", np.array([5., 10., 0.33])) def test_gamma_rot_single(self): self.system.part[self.pid].gamma_rot = 15.4 np.testing.assert_array_equal( np.copy(self.system.part[self.pid].gamma_rot), np.array([15.4, 15.4, 15.4]), "gamma_rot: value set and value gotten back differ.") else: test_gamma_rot = generateTestForScalarProperty( "gamma_rot", 14.23) # test_director=generateTestForVectorProperty("director",np.array([0.5,0.4,0.3])) if espressomd.has_features(["ELECTROSTATICS"]): test_charge = generateTestForScalarProperty("q", -19.7) if espressomd.has_features(["DIPOLES"]): test_dip = generateTestForVectorProperty( "dip", np.array([0.5, -0.5, 3])) test_dipm = generateTestForScalarProperty("dipm", -9.7) if espressomd.has_features(["VIRTUAL_SITES"]): test_virtual = generateTestForScalarProperty("virtual", 1) if espressomd.has_features(["VIRTUAL_SITES_RELATIVE"]): def test_yy_vs_relative(self): self.system.part.add(id=0, pos=(0, 0, 0)) self.system.part.add(id=1, pos=(0, 0, 0)) self.system.part[1].vs_relative = (0, 5.0, (0.5, -0.5, -0.5, -0.5)) self.system.part[1].vs_quat = [1, 2, 3, 4] np.testing.assert_array_equal( self.system.part[1].vs_quat, [1, 2, 3, 4]) res = self.system.part[1].vs_relative self.assertEqual(res[0], 0, "vs_relative: " + res.__str__()) self.assertEqual(res[1], 5.0, "vs_relative: " + res.__str__()) np.testing.assert_allclose( res[2], np.array( (0.5, -0.5, -0.5, -0.5)), err_msg="vs_relative: " + res.__str__(), atol=self.tol) @ut.skipIf(not espressomd.has_features("DIPOLES"), "Features not available, skipping test!") def test_contradicting_properties_dip_dipm(self): with self.assertRaises(ValueError): self.system.part.add(pos=[0, 0, 0], dip=[1, 1, 1], dipm=1.0) @ut.skipIf(not espressomd.has_features("DIPOLES", "ROTATION"), "Features not available, skipping test!") def test_contradicting_properties_dip_quat(self): with self.assertRaises(ValueError): self.system.part.add(pos=[0, 0, 0], dip=[ 1, 1, 1], quat=[1.0, 1.0, 1.0, 1.0]) @ut.skipIf(not espressomd.has_features("ELECTROSTATICS"), "Test needs ELECTROSTATICS") def test_particle_selection(self): s = self.system s.part.clear() positions = ((0.2, 0.3, 0.4), (0.4, 0.2, 0.3), (0.7, 0.7, 0.7)) charges = 0, 1E-6, -1, 1 # Place particles i = 0 for pos in positions: for q in charges: s.part.add(pos=pos, q=q, id=i) i += 1 # Scalar property res = s.part.select(q=0) self.assertEqual(len(res.id), len(positions)) for p in res: self.assertAlmostEqual(p.q, 0, places=13) # Vectorial property res = s.part.select(pos=(0.2, 0.3, 0.4)) self.assertEqual(len(res.id), len(charges)) for p in res: np.testing.assert_allclose( (0.2, 0.3, 0.4), np.copy(p.pos), atol=1E-12) # Two criteria res = s.part.select(pos=(0.2, 0.3, 0.4), q=0) self.assertEqual(tuple(res.id), (0,)) # Emtpy result res = s.part.select(q=17) self.assertEqual(tuple(res.id), ()) # User-specified criterion res = s.part.select(lambda p: p.pos[0] < 0.5) self.assertEqual(tuple(sorted(res.id)), (0, 1, 2, 3, 4, 5, 6, 7)) def test_image_box(self): s = self.system s.part.clear() pos = 1.5 * s.box_l s.part.add(pos=pos) np.testing.assert_equal(np.copy(s.part[0].image_box), [1, 1, 1]) def test_accessing_invalid_id_raises(self): self.system.part.clear() handle_to_non_existing_particle = self.system.part[42] with self.assertRaises(RuntimeError): handle_to_non_existing_particle.id def test_parallel_property_setters(self): s = self.system s.part.clear() s.part.add(pos=s.box_l * np.random.random((100, 3))) # Copy individual properties of particle 0 print( "If this test hangs, there is an mpi deadlock in a particle property setter.") for p in espressomd.particle_data.particle_attributes: # Uncomment to identify guilty property #print( p) if not hasattr(s.part[0], p): raise Exception( "Inconsistency between ParticleHandle and particle_data.particle_attributes") try: setattr(s.part[:], p, getattr(s.part[0], p)) except AttributeError: print("Skipping read-only", p) # Cause a differtn mpi callback to uncover deadlock immediately x = getattr(s.part[:], p) if __name__ == "__main__": #print("Features: ", espressomd.features()) ut.main()
hmenke/espresso
testsuite/python/particle.py
Python
gpl-3.0
10,814
[ "ESPResSo" ]
bdf3c9f6e0ab3934bcff453ad54027e836d0dd1a0b2d76a37160adaf7cfcdf86
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import tensorflow as tf from niftynet.layer.base_layer import Layer from niftynet.layer.fully_connected import FullyConnectedLayer from niftynet.layer.convolution import ConvolutionalLayer from niftynet.utilities.util_common import look_up_operations SUPPORTED_OP = set(['AVG', 'MAX']) class ChannelSELayer(Layer): """ Re-implementation of Squeeze-and-Excitation (SE) block described in:: Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507 """ def __init__(self, func='AVG', reduction_ratio=16, name='channel_squeeze_excitation'): self.func = func.upper() self.reduction_ratio = reduction_ratio super(ChannelSELayer, self).__init__(name=name) look_up_operations(self.func, SUPPORTED_OP) def layer_op(self, input_tensor): # spatial squeeze input_rank = len(input_tensor.shape) reduce_indices = list(range(input_rank))[1:-1] if self.func == 'AVG': squeeze_tensor = tf.reduce_mean(input_tensor, axis=reduce_indices) elif self.func == 'MAX': squeeze_tensor = tf.reduce_max(input_tensor, axis=reduce_indices) else: raise NotImplementedError("pooling function not supported") # channel excitation num_channels = int(squeeze_tensor.shape[-1]) reduction_ratio = self.reduction_ratio if num_channels % reduction_ratio != 0: raise ValueError( "reduction ratio incompatible with " "number of input tensor channels") num_channels_reduced = num_channels / reduction_ratio fc1 = FullyConnectedLayer(num_channels_reduced, with_bias=False, feature_normalization=None, acti_func='relu', name='se_fc_1') fc2 = FullyConnectedLayer(num_channels, with_bias=False, feature_normalization=None, acti_func='sigmoid', name='se_fc_2') fc_out_1 = fc1(squeeze_tensor) fc_out_2 = fc2(fc_out_1) while len(fc_out_2.shape) < input_rank: fc_out_2 = tf.expand_dims(fc_out_2, axis=1) output_tensor = tf.multiply(input_tensor, fc_out_2) return output_tensor class SpatialSELayer(Layer): """ Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:: Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, arXiv:1803.02579 """ def __init__(self, name='spatial_squeeze_excitation'): super(SpatialSELayer, self).__init__(name=name) def layer_op(self, input_tensor): # channel squeeze conv = ConvolutionalLayer(n_output_chns=1, kernel_size=1, feature_normalization=None, acti_func='sigmoid', name="se_conv") squeeze_tensor = conv(input_tensor) # spatial excitation output_tensor = tf.multiply(input_tensor, squeeze_tensor) return output_tensor class ChannelSpatialSELayer(Layer): """ Re-implementation of concurrent spatial and channel squeeze & excitation:: Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, arXiv:1803.02579 """ def __init__(self, func='AVG', reduction_ratio=16, name='channel_spatial_squeeze_excitation'): self.func = func.upper() self.reduction_ratio = reduction_ratio super(ChannelSpatialSELayer, self).__init__(name=name) look_up_operations(self.func, SUPPORTED_OP) def layer_op(self, input_tensor): cSE = ChannelSELayer(func=self.func, reduction_ratio=self.reduction_ratio, name='cSE') sSE = SpatialSELayer(name='sSE') output_tensor = tf.add(cSE(input_tensor), sSE(input_tensor)) return output_tensor
NifTK/NiftyNet
niftynet/layer/squeeze_excitation.py
Python
apache-2.0
4,408
[ "exciting" ]
bd56d0e85544f706d58cc5a29f6073e3b6efbd3bacf472c8db20d21a592862ee
# This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """visitor doing some postprocessing on the astng tree. Try to resolve definitions (namespace) dictionnary, relationship... This module has been imported from pyreverse :version: $Revision: 1.6 $ :author: Sylvain Thenault :copyright: 2003-2005 LOGILAB S.A. (Paris, FRANCE) :contact: http://www.logilab.fr/ -- mailto:python-projects@logilab.org :copyright: 2003-2005 Sylvain Thenault :contact: mailto:thenault@gmail.com """ from __future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import * from builtins import object __docformat__ = "restructuredtext en" from os.path import dirname from clonedigger.logilab.common.modutils import get_module_part, is_relative, \ is_standard_module from clonedigger.logilab import astng from clonedigger.logilab.astng.utils import LocalsVisitor class IdGeneratorMixIn(object): """ Mixin adding the ability to generate integer uid """ def __init__(self, start_value=0): self.id_count = start_value def init_counter(self, start_value=0): """init the id counter """ self.id_count = start_value def generate_id(self): """generate a new identifer """ self.id_count += 1 return self.id_count class Linker(IdGeneratorMixIn, LocalsVisitor): """ walk on the project tree and resolve relationships. According to options the following attributes may be added to visited nodes: * uid, a unique identifier for the node (on astng.Project, astng.Module, astng.Class and astng.locals_type). Only if the linker has been instantiad with tag=True parameter (False by default). * Function a mapping from locals'names to their bounded value, which may be a constant like a string or an integer, or an astng node (on astng.Module, astng.Class and astng.Function). * instance_attrs_type as locals_type but for klass member attributes (only on astng.Class) * implements, list of implemented interfaces _objects_ (only on astng.Class nodes) """ def __init__(self, project, inherited_interfaces=0, tag=False): IdGeneratorMixIn.__init__(self) LocalsVisitor.__init__(self) # take inherited interface in consideration or not self.inherited_interfaces = inherited_interfaces # tag nodes or not self.tag = tag # visited project self.project = project def visit_project(self, node): """visit an astng.Project node * optionaly tag the node wth a unique id """ if self.tag: node.uid = self.generate_id() for module in node.modules: self.visit(module) def visit_package(self, node): """visit an astng.Package node * optionaly tag the node wth a unique id """ if self.tag: node.uid = self.generate_id() for subelmt in list(node.values()): self.visit(subelmt) def visit_module(self, node): """visit an astng.Module node * set the locals_type mapping * set the depends mapping * optionaly tag the node wth a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} node.depends = [] if self.tag: node.uid = self.generate_id() def visit_class(self, node): """visit an astng.Class node * set the locals_type and instance_attrs_type mappings * set the implements list and build it * optionaly tag the node wth a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} if self.tag: node.uid = self.generate_id() # resolve ancestors for baseobj in node.ancestors(recurs=False): specializations = getattr(baseobj, 'specializations', []) specializations.append(node) baseobj.specializations = specializations # resolve instance attributes node.instance_attrs_type = {} for assattrs in list(node.instance_attrs.values()): for assattr in assattrs: self.visit_assattr(assattr, node) # resolve implemented interface try: node.implements = list(node.interfaces(self.inherited_interfaces)) except TypeError: node.implements = () def visit_function(self, node): """visit an astng.Function node * set the locals_type mapping * optionaly tag the node wth a unique id """ if hasattr(node, 'locals_type'): return node.locals_type = {} if self.tag: node.uid = self.generate_id() link_project = visit_project link_module = visit_module link_class = visit_class link_function = visit_function def visit_assname(self, node): """visit an astng.AssName node handle locals_type """ frame = node.frame() try: values = list(node.infer()) try: already_infered = frame.locals_type[node.name] for valnode in values: if not valnode in already_infered: already_infered.append(valnode) except KeyError: frame.locals_type[node.name] = values except astng.InferenceError: pass def visit_assattr(self, node, parent): """visit an astng.AssAttr node handle instance_attrs_type """ try: values = list(node.infer()) try: already_infered = parent.instance_attrs_type[node.attrname] for valnode in values: if not valnode in already_infered: already_infered.append(valnode) except KeyError: parent.instance_attrs_type[node.attrname] = values except astng.InferenceError: pass def visit_import(self, node): """visit an astng.Import node resolve module dependencies """ context_file = node.root().file for name in node.names: relative = is_relative(name[0], context_file) self._imported_module(node, name[0], relative) def visit_from(self, node): """visit an astng.From node resolve module dependencies """ basename = node.modname context_file = node.root().file if context_file is not None: relative = is_relative(basename, context_file) else: relative = False for name in node.names: if name[0] == '*': continue # analyze dependancies fullname = '%s.%s' % (basename, name[0]) if fullname.find('.') > -1: try: # XXX: don't use get_module_part, missing package precedence fullname = get_module_part(fullname) except ImportError: continue if fullname != basename: self._imported_module(node, fullname, relative) def compute_module(self, context_name, mod_path): """return true if the module should be added to dependencies""" package_dir = dirname(self.project.path) if context_name == mod_path: return 0 elif is_standard_module(mod_path, (package_dir,)): return 1 return 0 # protected methods ######################################################## def _imported_module(self, node, mod_path, relative): """notify an imported module, used to analyze dependancies """ module = node.root() context_name = module.name if relative: mod_path = '%s.%s' % ('.'.join(context_name.split('.')[:-1]), mod_path) if self.compute_module(context_name, mod_path): # handle dependancies if not hasattr(module, 'depends'): module.depends = [] mod_paths = module.depends if not mod_path in mod_paths: mod_paths.append(mod_path)
shaleh/clonedigger
clonedigger/logilab/astng/inspector.py
Python
gpl-3.0
9,335
[ "VisIt" ]
f1775240b8ae862785e53eb623f378ff0bfbd87ec37428b8fc815e9eab107053
#!/usr/bin/env python #-*- coding:utf-8 -*- # test_attributes.py """ Test the validity of graph, node, and edge attributes as well as the distribution generators. """ import os import unittest import numpy as np import pytest import nngt from base_test import TestBasis, XmlHandler, network_dir from tools_testing import foreach_graph # ---------- # # Test tools # # ---------- # def _results_theo(instruct): di_param = instruct["weights"] if (di_param["distribution"] == "uniform" or "corr" in di_param["distribution"]): return di_param["lower"], di_param["upper"] elif di_param["distribution"] == "gaussian": return di_param["avg"], di_param["std"] elif di_param["distribution"] == "lognormal": return di_param["position"], di_param["scale"] else: raise NotImplementedError("This distribution is not supported yet.") def _results_exp(attrib, instruct): di_param = instruct["weights"] if (di_param["distribution"] == "uniform" or "corr" in di_param["distribution"]): return attrib.min(), attrib.max() elif di_param["distribution"] == "gaussian": return np.average(attrib), np.std(attrib) elif di_param["distribution"] == "lognormal": m = np.average(attrib) v = np.var(attrib) return np.log(m/np.sqrt(1+v/m**2)), np.sqrt(np.log(1+v/m**2)) else: raise NotImplementedError("This distribution is not supported yet.") # ---------- # # Test class # # ---------- # class TestAttributes(TestBasis): ''' Class testing the main methods of the :mod:`~nngt.generation` module. ''' tolerance = 0.02 @property def test_name(self): return "test_attributes" @unittest.skipIf(nngt.get_config('mpi'), 'Not checking for MPI') def gen_graph(self, graph_name): di_instructions = self.parser.get_graph_options(graph_name) graph = nngt.generate(di_instructions) graph.set_name(graph_name) return graph, di_instructions def test_node_attr(self): ''' When generating graphs with weights, check that the expected properties are indeed obtained. ''' g = nngt.Graph(100) ref_result = np.random.uniform(-1, 4, g.node_nb()) g.set_node_attribute("nud", values=ref_result, value_type="double") computed_result = g.get_node_attributes(name="nud") self.assertTrue(np.allclose(ref_result, computed_result), '''Error on graph {}: unequal 'nud' attribute for tolerance {}. '''.format(g.name, self.tolerance)) def test_nattr_default_values(self): g2 = nngt.Graph() # add a new node with attributes attributes = { 'size': 2., 'color': 'blue', 'a': 5, 'blob': [] } attribute_types = { 'size': 'double', 'color': 'string', 'a': 'int', 'blob': 'object' } g2.new_node(attributes=attributes, value_types=attribute_types) g2.new_node(2) g2.new_node(3, attributes={'size': [4., 5., 1.], 'color': ['r', 'g', 'b']}, value_types={'size': 'double', 'color': 'string'}) # check all values # for the doubles: # NaN == NaN is false, so we need to check separately equality between # non-NaN entries and position of NaN entries double_res = np.array([2., np.NaN, np.NaN, 4., 5., 1.]) isnan1 = np.isnan(g2.node_attributes['size']) isnan2 = np.isnan(double_res) self.assertTrue(np.all(isnan1 == isnan2)) self.assertTrue( np.all(np.isclose( g2.node_attributes['size'][~isnan1], double_res[~isnan2])) ) # for the others, just compare the lists self.assertEqual( g2.node_attributes['color'].tolist(), ['blue', '', '', 'r', 'g', 'b']) self.assertEqual( g2.node_attributes['a'].tolist(), [5, 0, 0, 0, 0, 0]) self.assertEqual( g2.node_attributes['blob'].tolist(), [[], None, None, None, None, None]) def test_user_defined(self): ''' When generating graphs with weights, check that the expected properties are indeed obtained. ''' avg = 50 std = 6 g = nngt.generation.gaussian_degree(avg, std, nodes=200) ref_result = np.random.uniform(0, 5, g.edge_nb()) g.set_edge_attribute("ud", values=ref_result, value_type="double") computed_result = g.get_edge_attributes(name="ud") self.assertTrue(np.allclose(ref_result, computed_result), '''Error on graph {}: unequal 'ud' attribute for tolerance {}. '''.format(g.name, self.tolerance)) def test_user_defined2(self): ''' When generating graphs with weights, check that the expected properties are indeed obtained. ''' avg = 50 std = 6 g = nngt.generation.gaussian_degree(avg, std, nodes=200) ref_result = np.full(g.edge_nb(), 4.) g.set_edge_attribute("ud2", val=4., value_type="double") computed_result = g.get_edge_attributes(name="ud2") self.assertTrue(np.allclose(ref_result, computed_result), '''Error on graph {}: unequal 'ud2' attribute for tolerance {}. '''.format(g.name, self.tolerance)) @unittest.skipIf(nngt.get_config('mpi'), 'Not checking for MPI') def test_list_attributes(self): ''' For list attributes, test that they are preserved as lists, and that some nodes or edges do not own references to the same list. ''' avg = 25 std = 3 graph = nngt.generation.gaussian_degree(avg, std, nodes=1000) # --------------- # # node attributes # # --------------- # graph.new_node_attribute("nlist", value_type="object", val=[]) nodes = [i for i in range(8, 49)] graph.set_node_attribute("nlist", val=[1], nodes=nodes) # update a fraction of the previously updated nodes nodes = [i for i in range(0, 41)] # to update the values, we need to get them to update the lists nlists = graph.get_node_attributes(name="nlist", nodes=nodes) for l in nlists: l.append(2) graph.set_node_attribute("nlist", values=nlists, nodes=nodes) # check that all lists are present nlists = graph.get_node_attributes(name="nlist") self.assertTrue( np.all(np.unique(nlists) == np.unique([[], [1], [2], [1, 2]]))) # check that all nodes from 0 to 48 were updated self.assertTrue([] not in nlists[:49].tolist()) # --------------- # # edge attributes # # --------------- # graph.new_edge_attribute("elist", value_type="object", val=[]) nodes = [i for i in range(8, 49)] edges = graph.get_edges(source_node=nodes, target_node=nodes) graph.set_edge_attribute("elist", val=[1], edges=edges) # update a fraction of the previously updated nodes nodes = [i for i in range(0, 41)] edges2 = graph.get_edges(source_node=nodes, target_node=nodes) # to update the values, we need to get them to update the lists elists = graph.get_edge_attributes(name="elist", edges=edges2) for l in elists: l.append(2) graph.set_edge_attribute("elist", values=elists, edges=edges2) # check that all lists are present elists = graph.get_edge_attributes(name="elist") self.assertTrue( np.all(np.unique(elists) == np.unique([[], [1], [2], [1, 2]]))) # check that all edges where updated eattr1 = graph.get_edge_attributes(name="elist", edges=edges).tolist() eattr2 = graph.get_edge_attributes(name="elist", edges=edges2).tolist() self.assertTrue([] not in eattr1 and [] not in eattr2) @foreach_graph def test_weights(self, graph, instructions, **kwargs): ''' When generating graphs with weights, check that the expected properties are indeed obtained. ''' ref_result = _results_theo(instructions) weights = graph.get_weights() computed_result = _results_exp(weights, instructions) self.assertTrue(np.allclose( ref_result, computed_result, self.tolerance), '''Error on graph {}: unequal weights for tolerance {}. '''.format(graph.name, self.tolerance)) @foreach_graph def test_delays(self, graph, instructions, **kwargs): ''' Test entirely run only if NEST is present on the computer. Check that delay distribution generated in NNGT, then in NEST, is conform to what was instructed. ''' # get the informations from the weights di_distrib = instructions["weights"] distrib = di_distrib["distribution"] delays = graph.set_delays(distribution=distrib, parameters=di_distrib) ref_result = _results_theo(instructions) computed_result = _results_exp(delays, instructions) self.assertTrue(np.allclose( ref_result, computed_result, self.tolerance), '''Error on graph {}: unequal delays for tolerance {}. '''.format(graph.name, self.tolerance)) # @todo #~ if nngt._config['with_nest']: #~ from nngt.simulation import make_nest_network #~ gids = make_nest_network(graph) # ---------------------- # # Pytest formatted tests # # ---------------------- # @pytest.mark.mpi_skip def test_str_attr(): ''' Check string attributes ''' g = nngt.Graph(5) # set node attribute node_names = ["aa", "b", "c", "dddd", "eee"] g.new_node_attribute("name", "string", values=node_names) # set edges edges = [(0, 1), (1, 3), (1, 4), (2, 0), (3, 2), (4, 1)] g.new_edges(edges) # set edge attribute eattr = ["a"*i for i in range(len(edges))] g.new_edge_attribute("edata", "string", values=eattr) # check attributes assert list(g.node_attributes["name"]) == node_names assert list(g.edge_attributes["edata"]) == eattr # save and load string attributes current_dir = os.path.dirname(os.path.abspath(__file__)) + '/' filename = current_dir + "g.el" g.to_file(filename) h = nngt.load_from_file(filename) assert list(h.node_attributes["name"]) == node_names assert list(h.edge_attributes["edata"]) == eattr os.remove(filename) # change an attribute node_names[2] = "cc" h.set_node_attribute("name", values=node_names) assert not np.array_equal(h.node_attributes["name"], g.node_attributes["name"]) assert list(h.node_attributes["name"]) == node_names eattr[0] = "l" h.set_edge_attribute("edata", values=eattr) assert not np.array_equal(h.edge_attributes["edata"], g.edge_attributes["edata"]) assert list(h.edge_attributes["edata"]) == eattr @pytest.mark.mpi_skip def test_delays(): dmin = 1. dmax = 8. d = { "distribution": "lin_corr", "correl_attribute": "distance", "lower": dmin, "upper": dmax } g = nngt.generation.distance_rule(200., nodes=100, avg_deg=10, delays=d) delays = g.get_delays() assert np.isclose(delays.min(), dmin) assert np.isclose(delays.max(), dmax) distances = g.edge_attributes["distance"] slope = 0.003 g.set_delays(dmin + slope*distances) delays = g.get_delays() assert np.all(np.isclose(delays, dmin + slope*distances)) @pytest.mark.mpi_skip def test_attributes_are_copied(): ''' Check that the attributes returned are a copy ''' rng = np.random.default_rng() nnodes = 100 nedges = 1000 wghts = rng.uniform(0, 5, nedges) g = nngt.generation.erdos_renyi(nodes=nnodes, edges=nedges, weights=wghts) # check weights ww = g.get_weights() assert np.all(np.isclose(wghts, ww)) rng.shuffle(ww) assert np.all(np.isclose(wghts, g.get_weights())) assert not np.all(np.isclose(ww, g.get_weights())) # check edge attribute g.new_edge_attribute("etest", "double", values=2*ww) etest = g.edge_attributes["etest"] assert np.all(np.isclose(etest, 2*ww)) rng.shuffle(etest) assert np.all(np.isclose(2*ww, g.edge_attributes["etest"])) assert not np.all(np.isclose(2*ww, etest)) # check node attribute vv = rng.uniform(2, 3, nnodes) g.new_node_attribute("ntest", "double", values=vv) ntest = g.node_attributes["ntest"] assert np.all(np.isclose(ntest, vv)) rng.shuffle(ntest) assert np.all(np.isclose(vv, g.node_attributes["ntest"])) assert not np.all(np.isclose(vv, ntest)) # ---------- # # Test suite # # ---------- # if not nngt.get_config('mpi'): suite = unittest.TestLoader().loadTestsFromTestCase(TestAttributes) if __name__ == "__main__": unittest.main() test_str_attr() test_delays() test_attributes_are_copied()
Silmathoron/NNGT
testing/test_attributes.py
Python
gpl-3.0
13,217
[ "Gaussian" ]
a89a8c3bc01eca7c44a31a75ae6485ecd79450c8ffc4aca27f55df52cdff7cb7
#!/usr/bin/env python3 import sys import multiprocessing from pathlib import Path import jinja2 import pypandoc import pytoml as toml from common import validate_toml, get_tt_version if (sys.version_info.major, sys.version_info.minor) < (3, 6): raise Exception("need Python 3.6 or later") THIS_FILE = Path(__file__).resolve() ROOT_DIR = THIS_FILE.parent.parent TEMPLATE_DIR = ROOT_DIR / "utils" / "templates" DOCS_DIR = ROOT_DIR / "docs" OP_DOCS_DIR = DOCS_DIR / "ops" FONTS_DIR = ROOT_DIR / "utils" / "fonts" TT_VERSION = get_tt_version() VERSION_STR = " ".join(["Teletype", TT_VERSION["tag"], "Documentation"]) # We want to run inject_latex in parallel as it's quite slow, and we must run # it once for each op. # But it must be defined before the multiprocessing.Pool is defined (because... # python.) def inject_latex(value): latex = pypandoc.convert_text(value["short"], format="markdown", to="tex") value["short_latex"] = latex return value # create a multiprocessing pool pool = multiprocessing.Pool() # our jinja2 environment env = jinja2.Environment( autoescape=False, loader=jinja2.FileSystemLoader(str(TEMPLATE_DIR)), trim_blocks=True, lstrip_blocks=True ) # determines the order in which sections are displayed, # final column indicates that a new page is inserted _after_ that section OPS_SECTIONS = [ ("variables", "Variables", False), ("hardware", "Hardware", False), ("patterns", "Patterns", False), ("controlflow", "Control flow", False), ("maths", "Maths", False), ("metronome", "Metronome", False), ("delay", "Delay", False), ("stack", "Stack", False), ("queue", "Queue", False), ("seed", "Seed", False), ("turtle", "Turtle", True), ("grid", "Grid", True), ("midi_in", "MIDI In", True), ("i2c", "Generic I2C", True), ("ansible", "Ansible", False), ("whitewhale", "Whitewhale", False), ("meadowphysics", "Meadowphysics", False), ("earthsea", "Earthsea", False), ("orca", "Orca", True), ("justfriends", "Just Friends", False), ("wslash", "W/", False), ("er301", "ER-301", False), ("fader", "Fader", False), ("matrixarchate", "Matrixarchate", True), ("telex_i", "TELEXi", False), ("telex_o", "TELEXo", False), ("disting", "Disting EX", True), ("wslashdelay", "W/2.0 delay", False), ("wslashsynth", "W/2.0 synth", False), ("wslashtape", "W/2.0 tape", False), ("crow", "Crow", False) ] def latex_safe(s): # backslash must be first, otherwise it will duplicate itself unsafe = ["\\", "&", "%", "$", "#", "_", "{", "}", "^"] for u in unsafe: s = s.replace(u, "\\" + u) # ~ is special s = s.replace("~", "\\~{}") return s def cheatsheet_tex(): print(f"Using docs directory: {DOCS_DIR}") print(f"Using ops docs directory: {OP_DOCS_DIR}") print() output = VERSION_STR + "\n\n" for (section, title, new_page) in OPS_SECTIONS: toml_file = Path(OP_DOCS_DIR, section + ".toml") if toml_file.exists() and toml_file.is_file(): output += f"\\group{{{ title }}}\n\n" print(f"Reading {toml_file}") # n.b. Python 3.6 dicts maintain insertion order ops = toml.loads(toml_file.read_text()) validate_toml(ops) ops_array = pool.map(inject_latex, ops.values()) for op in ops_array: prototype = latex_safe(op["prototype"]) if "prototype_set" in op: prototype += " / " + latex_safe(op["prototype_set"]) output += "\\begin{op}" if "aliases" in op and len(op["aliases"]) > 0: output += "[" + latex_safe(" ".join(op["aliases"])) + "]" output += "{" + prototype + "}" output += "\n" output += op["short_latex"] output += "\\end{op}" output += "\n\n" if new_page: output += "\\pagebreak\n\n" return output def main(): if len(sys.argv) != 2: sys.exit("Please supply a filename") p = Path(sys.argv[1]).resolve() p.write_text(cheatsheet_tex()) if __name__ == "__main__": main()
monome/teletype
utils/cheatsheet.py
Python
gpl-2.0
4,577
[ "ORCA" ]
ada6ccd75645439447316f3d519abbaf351321ad3ddd873a82bfbae87b43cb04
""" A module with Gaussian functions for creating Gaussian distributions in 1D, 2D and 3D. There are also functions with the same name and _FIT which can be used to fit an intensity distribution in a ndarray with the corresponding Gaussian distribution using scipy's curve_fit. A 1D Lorentz and a 1D gaussLorentz function is also included for fitting zero loss peaks in EELS spectra. These functions were written assuming you use np.meshgrid() with indexing = 'xy'. If you use 'ij' indexing then be careful about how you pass in the x and y coordinates. """ import numpy as np def gauss1D(x, x0, sigma): """ Returns the value of a gaussian at a 2D set of points for the given standard deviation with maximum normalized to 1. Parameters ---------- x: ndarray A vector of size (N,) of points for the x values x0: float The center of the Gaussian. sigma: float Standard deviation of the Gaussian. Returns ------- g: ndarray A vector of size (N,) of the Gaussian distribution evaluated at the input x values. Note ---- Calculate the half width at half maximum (HWHM) as >> HWHM = sqrt(2*log(2))*stDev ~ 1.18*stDev or if x goes from -1 to 1 >> 0.5*size(x)*stDev """ return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) def lorentz1D(x, x0, w): """ Returns the probability density function of the Lorentzian (aka Cauchy) function with maximum at 1. Parameters ---------- x: ndarray or list A 1D vector of points for the dependent variable x0: float The center of the peak maximum w: float The parameter that modifies the width of the distribution. Returns ------- l: ndarray A vector of size (N,) of the Lorentzian distribution evaluated at the input x values. """ return w ** 2 / ((x - x0) ** 2 + w ** 2) def gaussLorentz1D(x, x0, w): """ A Gaussian-Lorentzian function in one dimension. Parameters ---------- x: ndarray or list A 1D vector of points for the dependent variable x0: float The center of the peak maximum w: float The parameter that modifies the width of the distribution. Returns ------- lg: ndarray A vector of size (N,) of the Lorentzian Gaussian distribution evaluated at the input x values. """ gSigma = w / (2 * np.sqrt(2 * np.log(2))) # change the width to sigma return gauss1D(x, x0, gSigma) + lorentz1D(x, x0, w) def gauss2D(x, y, x0, y0, sigma_x, sigma_y): """ Calculates the value of a Gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. Parameters ---------- x, y : ndarray A 2D array of size (M,N) of points for the x values. Use x, y = np.meshgrid(range(M),range(N),indexing='xy'). x0 : float The center of the Gaussian along the x direction. y0 : float The center of the Gaussian along the y direction. sigma_x : float Standard deviation of the Gaussian along x. sigma_y : float Standard deviation of the Gaussian along y. Returns ------- g : ndarray A ndarray of size (N, M) of the Gaussian distribution evaluated at the input x values. Note ---- The Gaussian is normalized such that the peak == 1. To normalize the integral divide by 2 * np.pi * sigma_x * sigma_y """ x0 = float(x0) y0 = float(y0) g2 = np.exp(-((x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2))) g2_norm = g2 / np.max(g2.flatten()) return g2_norm def gauss2D_FIT(xy, x0, y0, sigma_x, sigma_y): """ Version of gauss2D used for fitting (1 x_data input (xy) and flattened output). Returns the value of a gaussian at a 2D set of points for the given standard deviation with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. Parameters ---------- xy: tuple A (N,M) shaped tuple containing the vectors of points for the evaluation points. x0: float The x center of the Gaussian. y0: float The y center of the Gaussian. sigma_x: float The standard deviation of the Gaussian along x. sigma_y: float The standard deviation of the Gaussian along y. Returns ------- g2_norm: ndarray The Gaussian distribution with maximum value normalized to 1. The 2D ndarray is reshaped into a (N*M,) array for use in fitting functions in numpy and scipy. """ x0 = float(x0) y0 = float(y0) x = xy[0] y = xy[1] g2 = np.exp(-((x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2))) g2_norm = g2 / np.max(g2.flatten()) return g2_norm.reshape(-1) def gauss2D_theta(x, y, x0, y0, sigma_x, sigma_y, theta): """ Returns the value of a generalized Gaussian at a 2D set of points for the given standard deviation with maximum normalized to 1. The Gaussian axes (assumed to be 90 degrees from each other) can be oriented at different angles to the output array axes using theta. Parameters ---------- x: ndarray or list Evaluation points along x of size (N,) y: ndarray Evaluation points along y of size (M,) x0: float Center of the maximum along x. y0: float Center of the maximum along y. sigma_x: float Standard deviation along x. sigma_y: float Standard deviation along y. theta: float The rotation of the Gaussian principle axes from the array horizontal and vertical. In radians. Returns ------- g2_norm: ndarray A (N, M) sized 2D ndarray with a Gaussian distribution rotated. """ x0 = float(x0) y0 = float(y0) a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / (2 * sigma_y ** 2) b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / (4 * sigma_y ** 2) c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / (2 * sigma_y ** 2) g2 = np.exp(- (a * ((x - x0) ** 2) + 2 * b * (x - x0) * (y - y0) + c * ((y - y0) ** 2))) g2_norm = g2 / np.max(g2.flatten()) return g2_norm # return a 2D array def gauss2D_theta_FIT(xy, x0, y0, sigma_x, sigma_y, theta): """ Version of gauss2D_theta used for fitting (1 x_data input and flattened output). Returns the value of a gaussian at a 2D set of points for the given standard deviation with maximum normalized to 1. The Gaussian axes can be oriented at different angles (theta) in radians. Parameters ---------- xy: tuple Evaluation points along x and y of shape (N,M) x0: float Center of the maximum along x. y0: float Center of the maximum along y. sigma_x: float Standard deviation along x. sigma_y: float Standard deviation along y. theta: float The rotation of the Gaussian principle axes from the array horizontal and vertical. In radians. Returns ------- g2_norm: ndarray A (N, M) sized 2D ndarray with a Gaussian distribution rotated. """ x0 = float(x0) y0 = float(y0) x = xy[0] y = xy[1] a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / (2 * sigma_y ** 2) b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / (4 * sigma_y ** 2) c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / (2 * sigma_y ** 2) g2 = np.exp(- (a * ((x - x0) ** 2) + 2 * b * (x - x0) * (y - y0) + c * ((y - y0) ** 2))) g2_norm = g2 / np.max(g2.flatten()) return g2_norm.ravel() # return a 1D vector def gauss2D_poly_FIT(xy, x0, y0, A, B, C): """ Returns the flattened values of an elliptical gaussian at a 2D set of points for the given polynomial pre-factors with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. The matrix looks like [[A,B],[B,C]]. See https://en.wikipedia.org/wiki/Gaussian_function Parameters ---------- xy: tuple Evaluation points along x and y of shape (N,M) x0: float Center of the maximum along x. y0: float Center of the maximum along y. A: float The A pre-factor for the polynomial expansion in the exponent. B: float The B pre-factor for the polynomial expansion in the exponent. C: float The C pre-factor for the polynomial expansion in the exponent. Returns ------- g2_norm: ndarray A (N, M) sized 2D ndarray with a Gaussaian distribution rotated. """ x0 = float(x0) y0 = float(y0) x = xy[0] # retrieve the array from the tuple y = xy[1] g2 = np.exp(-(A * (x - x0) ** 2 + 2 * B * (x - x0) * (y - y0) + C * (y - y0) ** 2)) g2_norm = g2 / np.max(g2.flatten()) return g2_norm.ravel() def gauss3D(x, y, z, x0, y0, z0, sigma_x, sigma_y, sigma_z): """ gauss3D(x,y,z,x0,y0,z0,sigma_x,sigma_y,sigma_z) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. Note ----- Be careful about the indexing used in meshgrid and the order in which you pass the x, y, z variables in. Parameters ---------- x, y, z: ndarray, from numpy.meshgrid 2D arrays of points (from meshgrid) x0, y0, z0: float The x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z: float The standard deviations of the Gaussian. Returns ------- g3_norm: ndarray A 3D ndarray """ x0 = float(x0) y0 = float(y0) z0 = float(z0) g3 = np.exp(-((x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2) + (z - z0) ** 2 / ( 2 * sigma_z ** 2))) g3_norm = g3 / np.max(g3.flatten()) return g3_norm def gauss3D_FIT(xyz, x0, y0, z0, sigma_x, sigma_y, sigma_z): """ gauss3D_FIT((x,y,z),x0,y0,z0,sigma_x,sigma_y,sigma_z) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are assumed to be 90 degrees from each other. xyz - x0, y0, z0 = the x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z = The std. deviations of the Gaussian. Note ----- Be careful about the indexing used in meshgrid and the order in which you pass the x, y, z variables in. Parameters ---------- xyz: tuple of ndarrays A tuple containing the 3D arrays of points (from meshgrid) x0, y0, z0: float The x, y, z centers of the Gaussian sigma_x, sigma_y, sigma_z: float The standard deviations of the Gaussian. Returns ------- g3_norm: ndarray A flattened array for fitting. """ x0 = float(x0) y0 = float(y0) z0 = float(z0) x = xyz[0] y = xyz[1] z = xyz[2] g3 = np.exp(-((x - x0) ** 2 / (2 * sigma_x ** 2) + (y - y0) ** 2 / (2 * sigma_y ** 2) + (z - z0) ** 2 / ( 2 * sigma_z ** 2))) g3_norm = g3 / np.max(g3.flatten()) return g3_norm.ravel() def gauss3D_poly(x, y, z, x0, y0, z0, A, B, C, D, E, F): """ gauss3Dpoly_FIT((x,y,z),x0,y0,z0,A,B,C,D,E,F) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Gaussian axes are not locked to be 90 degrees. Parameters ---------- x, y, z : ndarray, 3D 3D arrays of points (from meshgrid) x0, y0, z0 : float The x, y, z centers of the Gaussian A, B, C, D, E, F : float The polynomial values for the fit Returns ------- : ndarray, 3D The 3D Gaussian """ x0 = float(x0) y0 = float(y0) z0 = float(z0) g3 = np.exp(-(A * (x - x0) ** 2 + B * (y - y0) ** 2 + C * (z - z0) ** 2 + 2 * D * (x - x0) * (y - y0) + 2 * E * ( x - x0) * (z - z0) + 2 * F * (y - y0) * (z - z0))) g3_norm = g3 / np.max(g3.flatten()) return g3_norm def gauss3D_poly_FIT(xyz, x0, y0, z0, A, B, C, D, E, F): """ gauss3Dpoly_FIT((x,y,z),x0,y0,z0,A,B,C,D,E,F) Returns the value of a gaussian at a 2D set of points for the given standard deviations with maximum normalized to 1. The Guassian axes are not locked to be 90 degrees. Parameters ---------- xyz : tuple of ndarrays 3D arrays of points (from meshgrid) combined in a tuple x0, y0, z0 : float The x, y, z centers of the Gaussian A, B, C, D, E, F : float The polynomial values for the fit Returns ------- : ndarray, 3D The 3D Gaussian """ x0 = float(x0) y0 = float(y0) z0 = float(z0) x = xyz[0] y = xyz[1] z = xyz[2] g3 = np.exp(-(A * (x - x0) ** 2 + B * (y - y0) ** 2 + C * (z - z0) ** 2 + 2 * D * (x - x0) * (y - y0) + 2 * E * ( x - x0) * (z - z0) + 2 * F * (y - y0) * (z - z0))) g3_norm = g3 / np.max(g3.flatten()) return g3_norm.ravel() def gauss3DGEN_FIT(xyz, x0, y0, z0, sigma_x, sigma_y, sigma_z, Angle1, Angle2, Angle3, BG, Height): """ gauss3DGEN_FIT((x,y,z),x0,y0,z0,sigma_x,sigma_y,sigma_z,Angle1,Angle2,Angle3,BG,Height) Returns the value of a gaussian at a 3D set of points for the given sub-pixel positions with standard deviations, 3D Eular rotation angles, constant Background value, and Gaussian peak height. adapted from code by Yongsoo Yang, yongsoo.ysyang@gmail.com Note ---- This is a work in progress. Needs more testing. Parameters ---------- xyz : tuple of 3 np.ndarray 3D arrays of points (from meshgrid) combined in a tuple x0, y0, z0 : float The x, y, z centers of the Gaussian sigma_x,sigma_y,sigma_z: float standard deviations along x,y,z direction before 3D angular rotation Angle1, Angle2, Angle3 : float, degrees Tait-Bryan angles in ZYX convention for 3D rotation in degrees BG : float Background Height: float The peak height of the Gaussian function Returns ------- : ndarray, 3D The 3D Gaussian """ # 3D vectors for each sampled positions print('Warning: this function is using Fortran ordered array for use in tomviz. Need to test this') # Todo: Remove Fortran ordering v = np.array([xyz[0].reshape(-1, order='F') - x0, xyz[1].reshape(-1, order='F') - y0, xyz[2].reshape(-1, order='F') - z0]) # rotation axes for Tait-Bryan angles vector1 = np.array([0, 0, 1]) rotmat1 = MatrixQuaternionRot(vector1, Angle1) vector2 = np.array([0, 1, 0]) rotmat2 = MatrixQuaternionRot(vector2, Angle2) vector3 = np.array([1, 0, 0]) rotmat3 = MatrixQuaternionRot(vector3, Angle3) # full rotation matrix # Todo : Remove dependency on np.matrix() rotMAT = np.matrix(rotmat3) * np.matrix(rotmat2) * np.matrix(rotmat1) # 3x3 matrix for applying sigmas D = np.matrix(np.array([[1. / (2 * sigma_x ** 2), 0, 0, ], [0, 1. / (2 * sigma_y ** 2), 0], [0, 0, 1. / (2 * sigma_z ** 2)]])) # apply 3D rotation to the sigma matrix WidthMat = np.transpose(rotMAT) * D * rotMAT # calculate 3D Gaussian RHS_calc = WidthMat * np.matrix(v) Result = Height * np.exp(-1 * np.sum(v * RHS_calc.A, axis=0)) + BG return Result def MatrixQuaternionRot(vector, theta): """ Quaternion tp rotate a given theta angle around the given vector. adapted from code by Yongsoo Yang, yongsoo.ysyang@gmail.com Parameters ---------- vector : ndarray, 3-element A non-zero 3-element numpy array representing rotation axis theta : float, degrees Rotation angle in degrees Returns ------- Author: Yongsoo Yang, Dept. of Physics and Astronomy, UCLA yongsoo.ysyang@gmail.com """ theta = theta * np.pi / 180 vector = vector / np.float(np.sqrt(np.dot(vector, vector))) w = np.cos(theta / 2) x = -np.sin(theta / 2) * vector[0] y = -np.sin(theta / 2) * vector[1] z = -np.sin(theta / 2) * vector[2] RotM = np.array([[1. - 2 * y ** 2. - 2 * z ** 2, 2. * x * y + 2 * w * z, 2. * x * z - 2. * w * y], [2. * x * y - 2. * w * z, 1. - 2. * x ** 2 - 2. * z ** 2, 2. * y * z + 2. * w * x], [2 * x * z + 2 * w * y, 2 * y * z - 2. * w * x, 1 - 2. * x ** 2 - 2. * y ** 2]]) return RotM
ercius/openNCEM
ncempy/algo/gaussND.py
Python
gpl-3.0
17,632
[ "Gaussian" ]
e4a53a6282b22007e18da741d19ad0aac853a7df8bfb006e6ffd617e3217e007
#!/usr/bin/env python ##This Python file uses the following encoding: utf-8 ## ## (C) 2017 Muthiah Annamalai, ## XML from __future__ import print_function from xml.dom.minidom import parse import sys import os import tamil import codecs import re import pprint from xml.dom.minidom import parse as xml_parse from xml.dom.minidom import parseString as xml_parse_string PYTHON3 = (sys.version[0] == '3') if PYTHON3: unicode = str import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk, GObject, GLib, Pango from syntaxhighlighing import EzhilSyntaxHighlightingEditor from ezhilpopuptools import PopupForTextView from resources import getResourceFile # represents DTD of our XML # Rules: # 1) root <chapter> has 'title' attr # 2) <section> placeholder in <chapter>; all useful tags are within <section> # 3) atomic tags <list>, <code>, <b>, <i> and <u> class XMLtoDocVisitor(object): def __init__(self): object.__init__(self) self.dom = None pass def visit(self,dom_in): for child in dom_in.childNodes: #print(dir(child)) name = child.nodeName if name == "chapter": self.visit_chapter(child) self.visit(child) elif name == "section": self.visit_section(child) self.visit(child) elif name == "list": #terminal node self.visit_list(child) elif name == "code": #terminal node self.visit_code(child) elif name in ["b","i","u"]: self.visit_fmt(child,name) else: #child.name == "text": #terminal node self.visit_text(child) def visit_fmt(self,*args): raise NotImplementedError() def visit_chapter(self,*args): raise NotImplementedError() def visit_section(self,*args): raise NotImplementedError() def visit_code(self,*args): raise NotImplementedError() def visit_text(self,text): raise NotImplementedError() def visit_list(self,*args): raise NotImplementedError() # class worries about the layouts and Gtk ops class DocLayoutWidgetActions(XMLtoDocVisitor): def __init__(self): XMLtoDocVisitor.__init__(self) self.highlighter = EzhilSyntaxHighlightingEditor() self.highlighter.append_mode = True self.chapters = {} self.tag = {} self.pageno = 0 self.default_font = "Sans 18" self.default_font_chapter = "Serif 16" self.default_font_title = "Sans 24" self.textbuffer = None self.layoutpos = {"title":u"","section":0} def build_tags(self,textbuffer): self.tag["comment"] = textbuffer.create_tag("comment", weight=Pango.Weight.SEMIBOLD,foreground="red",font=self.default_font) self.tag["bold"] = textbuffer.create_tag("bold", weight=Pango.Weight.BOLD,font=self.default_font,foreground="black") self.tag["italic"] = textbuffer.create_tag("italic", style=Pango.Style.ITALIC,font=self.default_font,foreground="black") self.tag["underline"] = textbuffer.create_tag("underline", underline=Pango.Underline.SINGLE,font=self.default_font,foreground="black") self.tag["code"] = textbuffer.create_tag("code", style=Pango.Style.ITALIC,font=self.default_font,foreground="green") # use for chapter title self.tag["title"] = textbuffer.create_tag("title", weight=Pango.Weight.BOLD,foreground="blue",font=self.default_font_title) self.tag["keyword"] = textbuffer.create_tag("keyword", weight=Pango.Weight.BOLD,foreground="blue",font=self.default_font) # use for text/section tags self.tag["chapter"] = textbuffer.create_tag("chapter", weight=Pango.Weight.SEMIBOLD,foreground="black",font=self.default_font_chapter) self.tag["text"] = textbuffer.create_tag("text",font=self.default_font,foreground="black") self.tag["literal"] = textbuffer.create_tag("literal", style=Pango.Style.ITALIC,font=self.default_font,foreground="green") self.tag["operator"] = textbuffer.create_tag("operator", weight=Pango.Weight.SEMIBOLD,font=self.default_font,foreground="olive") self.tag["found"] = textbuffer.create_tag("found",font=self.default_font, background="yellow") self.tag["list"] = textbuffer.create_tag("list", weight=Pango.Weight.SEMIBOLD,font=self.default_font,foreground="purple") self.tag["pass"] = textbuffer.create_tag("pass", weight=Pango.Weight.SEMIBOLD,font=self.default_font,foreground="green") self.highlighter.tag_comment = self.tag["comment"] self.highlighter.tag_keyword = self.tag["keyword"] self.highlighter.tag_literal = self.tag["literal"] self.highlighter.tag_operator = self.tag["operator"] self.highlighter.tag_found = self.tag["found"] self.highlighter.tag_text = self.tag["text"] self.highlighter.tag_fail = self.tag["list"] self.highlighter.tag_pass = self.tag["pass"] def visit_fmt(self,*args): #pprint.pprint(args) child = args[0] fmt = args[1] if fmt.startswith("i"): tag = self.tag["italic"] elif fmt.startswith("b"): tag = self.tag["italic"] elif fmt.startswith("u"): tag = self.tag["underline"] else: raise Exception("Tag %s not implemented"%fmt) self.append_text_with_tag(child.childNodes[0].data,tag) def visit_chapter(self,*args): child = args[0] # skip chapter 0 - ithu managatti mathiri irukku title = ((self.pageno > 0) and u"%d) "%self.pageno or u" ") + child.getAttribute("title")+u"\n" self.layoutpos["title"]=title self.append_text_with_tag(title,self.tag["title"]) def visit_section(self,*args): child = args[0] self.layoutpos["section"] += 1 self.append_text_with_tag(u"_"*100+u"\n",self.tag["text"]) self.append_text_with_tag(u"பிரிவு %d\n"%self.layoutpos["section"], self.tag["found"]) def visit_code(self,*args): child = args[0] #print("Code => %s"%str(child)) ref_text = None for node in child.childNodes: if node.nodeType == node.TEXT_NODE: ref_text = node break if ref_text: self.highlighter.run_syntax_highlighting(ref_text.data) pass def visit_text(self,text): child = text #print("Text => %s"%str(child)) self.append_text_with_tag(child.data,self.pageno == 0 and self.tag["chapter"] or self.tag["text"]) def visit_list(self,*args): child = args[0] #print("List => %s"%str(child)) ref_text = None for node in child.childNodes: if node.nodeType == node.TEXT_NODE: ref_text = node break self.append_text_with_tag(u"\n",self.tag["list"]) if ref_text: idx = 0 for _,line in enumerate(re.split("\n+",ref_text.data.strip())): line = line.strip() line = re.sub("^\*"," ",line) if len(line) < 1: continue idx = idx + 1 self.append_text_with_tag(u" %d)"%idx+line+u"\n",self.tag["list"]) pass def append_text_with_tag(self,text,tag): textbuffer = self.textbuffer self.highlighter.textbuffer = self.textbuffer textbuffer.insert_at_cursor( text ) n_end = textbuffer.get_end_iter() n_start = textbuffer.get_iter_at_offset(textbuffer.get_char_count()-len(text)) textbuffer.apply_tag(tag,n_start,n_end) return def render_page(self,pageno,textbuffer): if len(self.tag.keys()) == 0: self.build_tags(textbuffer) self.pageno = pageno self.textbuffer = textbuffer # reset self.layoutpos = {"title":u"","section":0} self.textbuffer.set_text(u"") dom = self.chapters[pageno]['dom'] self.visit(dom) #print("==========END VISITOR=======") #with codecs.open(self.chapters[pageno]['file'],'r','utf-8') as fp: # data = fp.read() # str_val = dom.getElementsByTagName("chapter")[0].getAttribute("title") # print("Title => %s"%str_val) # textbuffer.set_text( str_val ) # n_end = textbuffer.get_end_iter() # n_start = textbuffer.get_iter_at_offset(textbuffer.get_char_count()-len(str_val)) # textbuffer.apply_tag(self.tag["title"],n_start,n_end) return True def update_toc(self,box,parent): toc_list = [u"<chapter title=\"தமிழில் நிரல் எழுது - புத்தக உள்ளீடு\">",] for pos,chapter in self.chapters.items(): btn = Gtk.Button(u"%d. <b>%s</b>"%(pos,chapter['title'])) btn.get_children()[0].set_use_markup(True) btn.set_alignment(0.0,0.0) btn.connect('clicked',parent.on_navigate_to,chapter['title'],pos) box.pack_start(btn,True,True,0) toc_list.append(u"<section>%s</section>"%chapter['title']) toc_list.append(u"</chapter>") toc_str = u"\n".join(toc_list) toc_dom = xml_parse_string(PYTHON3 and toc_str or u'{0}'.format(toc_str).encode('utf-8')) self.chapters[0] = {'dom':toc_dom,'title':u'தமிழில் நிரல் எழுது - புத்தக உள்ளீடு','file':u':auto:'} return True # class contains the books class XMLtoDoc(DocLayoutWidgetActions): def __init__(self,chapters_in_order): DocLayoutWidgetActions.__init__(self) self.chapters_in_order = chapters_in_order for idx,chapter in enumerate(self.chapters_in_order): dom = xml_parse(chapter) title = dom.getElementsByTagName("chapter")[0].getAttribute("title") self.chapters[idx+1] = {'dom':dom,'title':title,'file':chapter} self.build_index() self.build_toc() def pages(self): return len(self.chapters) def build_index(self): pass def build_toc(self): pass class DocBrowserWindow(object): def __init__(self,ref_editor=None,default_font=None): object.__init__(self) self.builder = Gtk.Builder() title="Ezhil Help Browser" self.default_font = default_font book_chapters = ['ch1.xml', 'ch2.xml', 'ch3.xml', 'ch4.xml', 'ch5.xml', 'ch6.xml', 'ch7.xml', 'ch8.xml','appendix.xml'] self.book = XMLtoDoc( map(lambda x: getResourceFile('xmlbook',x),book_chapters) ) self.page = 0 #TOC/HOME self.builder.add_from_file(getResourceFile("helper.glade")) self.window = self.builder.get_object("appEzhilHelpBook") self.window.set_position(Gtk.WindowPosition.CENTER_ALWAYS) try: self.window.set_icon_from_file(getResourceFile("img","ezhil_square_2015_128px.png")) except Exception as ie: pass self.window.set_title(u"தமிழில் நிரல் எழுது - எழில் கணினி மொழி") self.tocbox = self.builder.get_object("boxToc") self.book.update_toc(self.tocbox,self) self.textview = self.builder.get_object("textview1") self.textbuffer = self.textview.get_buffer() self.textview.set_editable(False) self.textview.set_cursor_visible(True) self.textview_popup = PopupForTextView(self.textview,'EXECUTE_SELECTION') self.btn_next = self.builder.get_object("btnNext") self.btn_next.connect("clicked",lambda arg: self.on_navigate(arg,'->')) self.btn_prev = self.builder.get_object("btnPrev") self.btn_prev.connect("clicked",lambda arg: self.on_navigate(arg,'<-')) self.btn_home = self.builder.get_object("btnHome") self.btn_home.connect("clicked",lambda arg: self.on_navigate(arg,'x')) if not ref_editor: self.window.connect("delete-event", Gtk.main_quit) self.render_page() self.window.show_all() def on_navigate(self,widget,direction): error = False errormsg = u"" if direction == '->': #print(u"forward ") if (self.page+1) < self.book.pages(): self.page += 1 else: error = True errormsg = u"இதுவே கடைசி பக்கம்" elif direction == '<-': #print(u"backward ") if self.page >= 1: self.page -= 1 else: error = True errormsg = u"இதுவே முதல் பக்கம்" elif direction == 'x': #print(u"home") self.page = 0 #print("current page -> %d"%self.page) if error: dialog = Gtk.MessageDialog(self.window, 0, Gtk.MessageType.INFO,Gtk.ButtonsType.OK, errormsg) dialog.format_secondary_text(u"உதவி பக்கத்திற்கு செல்ல முடியாது.") response = dialog.run() dialog.destroy() #OK or Cancel don't matter return True self.render_page() return True def on_navigate_to(self,widget,chapter_name,pos): #print(u'Navigating to -> %s @ pos = %d'%(chapter_name,pos)) self.page = pos self.render_page() return True def render_page(self): self.book.default_font = self.default_font self.book.render_page(self.page,self.textbuffer) return True def on_selection_button_clicked(self, widget): return True if __name__ == u"__main__": win = DocBrowserWindow() Gtk.main()
arcturusannamalai/Ezhil-Lang
editor/DocView.py
Python
gpl-3.0
13,991
[ "VisIt" ]
aba87f23b3e80a3886e6ffd91bd06fe46dace4b568c1a6084fe311b4feb0265e
# coding=utf-8 # Copyright 2022 The Uncertainty Baselines Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rank-1 BNN ResNet-50 on on Kaggle's Diabetic Retinopathy Detection. A Rank-1 Bayesian neural net (Rank-1 BNN) [1] is an efficient and scalable approach to variational BNNs that posits prior distributions on rank-1 factors of the weights and optimizes global mixture variational posterior distributions. References: [1]: Michael W. Dusenberry*, Ghassen Jerfel*, Yeming Wen, Yian Ma, Jasper Snoek, Katherine Heller, Balaji Lakshminarayanan, Dustin Tran. Efficient and Scalable Bayesian Neural Nets with Rank-1 Factors. In Proc. of International Conference on Machine Learning (ICML) 2020. https://arxiv.org/abs/2005.07186 TODO(nband): update uncertainty estimation code for Rank 1 Mixture of Gaussian ensembling, in addition to Deep Ensemble--style ensembling. """ # pylint: disable=g-bare-generic # pylint: disable=g-doc-args # pylint: disable=g-doc-return-or-yield # pylint: disable=g-importing-member # pylint: disable=g-no-space-after-docstring-summary # pylint: disable=g-short-docstring-punctuation # pylint: disable=logging-format-interpolation # pylint: disable=logging-fstring-interpolation # pylint: disable=missing-function-docstring import datetime import os import pathlib import pprint import time from absl import app from absl import flags from absl import logging import tensorflow as tf import uncertainty_baselines as ub import utils # local file import from baselines.diabetic_retinopathy_detection import wandb from tensorboard.plugins.hparams import api as hp DEFAULT_NUM_EPOCHS = 90 # Data load / output flags. flags.DEFINE_string( 'output_dir', '/tmp/diabetic_retinopathy_detection/rank1_bnn', 'The directory where the model weights and ' 'training/evaluation summaries are stored.') flags.DEFINE_string('data_dir', None, 'Path to training and testing data.') flags.DEFINE_bool('use_validation', True, 'Whether to use a validation split.') flags.DEFINE_bool('use_test', False, 'Whether to use a test split.') flags.DEFINE_string('preproc_builder_config', 'btgraham-300', ( 'Determines the preprocessing procedure for the images. Supported options: ' '{btgraham-300, blur-3-btgraham-300, blur-5-btgraham-300, ' 'blur-10-btgraham-300, blur-20-btgraham-300}.')) flags.DEFINE_string( 'dr_decision_threshold', 'moderate', ('specifies where to binarize the labels {0, 1, 2, 3, 4} to create the ' 'binary classification task. Only affects the APTOS dataset partitioning. ' "'mild': classify {0} vs {1, 2, 3, 4}, i.e., mild DR or worse?" "'moderate': classify {0, 1} vs {2, 3, 4}, i.e., moderate DR or worse?")) flags.DEFINE_bool('load_from_checkpoint', False, 'Attempt to load from checkpoint') flags.DEFINE_string('checkpoint_dir', None, 'Path to load Keras checkpoints.') flags.DEFINE_bool('cache_eval_datasets', False, 'Caches eval datasets.') # Logging and hyperparameter tuning. flags.DEFINE_bool('use_wandb', False, 'Use wandb for logging.') flags.DEFINE_string('wandb_dir', 'wandb', 'Directory where wandb logs go.') flags.DEFINE_string('project', 'ub-debug', 'Wandb project name.') flags.DEFINE_string('exp_name', None, 'Give experiment a name.') flags.DEFINE_string('exp_group', None, 'Give experiment a group name.') # OOD flags. flags.DEFINE_string( 'distribution_shift', None, ('Specifies distribution shift to use, if any.' 'aptos: loads APTOS (India) OOD validation and test datasets. ' ' Kaggle/EyePACS in-domain datasets are unchanged.' 'severity: uses DiabeticRetinopathySeverityShift dataset, a subdivision ' ' of the Kaggle/EyePACS dataset to hold out clinical severity labels ' ' as OOD.')) flags.DEFINE_bool( 'load_train_split', True, 'Should always be enabled - required to load train split of the dataset.') # Learning rate / SGD flags. flags.DEFINE_float('base_learning_rate', 0.032299, 'Base learning rate.') flags.DEFINE_float('one_minus_momentum', 0.066501, 'Optimizer momentum.') flags.DEFINE_integer( 'lr_warmup_epochs', 1, 'Number of epochs for a linear warmup to the initial ' 'learning rate. Use 0 to do no warmup.') flags.DEFINE_float('lr_decay_ratio', 0.2, 'Amount to decay learning rate.') flags.DEFINE_list('lr_decay_epochs', ['30', '60'], 'Epochs to decay learning rate by.') flags.DEFINE_float('fast_weight_lr_multiplier', 1.2390, 'fast weights lr multiplier.') # Rank-1 BNN flags. flags.DEFINE_integer('num_mc_samples_train', 1, 'Number of MC samples used during training.') flags.DEFINE_integer('num_mc_samples_eval', 5, 'Number of MC samples to use for prediction.') flags.DEFINE_integer('kl_annealing_epochs', 200, 'Number of epochs over which to anneal the KL term to 1.') flags.DEFINE_string('alpha_initializer', 'trainable_normal', 'Initializer name for the alpha parameters.') flags.DEFINE_string('gamma_initializer', 'trainable_normal', 'Initializer name for the gamma parameters.') flags.DEFINE_string('alpha_regularizer', 'normal_kl_divergence', 'Regularizer name for the alpha parameters.') flags.DEFINE_string('gamma_regularizer', 'normal_kl_divergence', 'Regularizer name for the gamma parameters.') flags.DEFINE_boolean('use_additive_perturbation', False, 'Use additive perturbations instead of multiplicative.') flags.DEFINE_float( 'dropout_rate', 1e-3, 'Dropout rate. Only used if alpha/gamma initializers are, ' 'e.g., trainable normal with a fixed stddev.') flags.DEFINE_float( 'prior_stddev', 0.05, 'Prior stddev. Sort of like a prior on dropout rate, where ' 'it encourages defaulting/shrinking to this value.') flags.DEFINE_float('random_sign_init', 0.75, 'Use random sign init for fast weights.') flags.DEFINE_bool('use_ensemble_bn', False, 'Whether to use ensemble bn.') # General model flags. flags.DEFINE_integer('seed', 42, 'Random seed.') flags.DEFINE_string( 'class_reweight_mode', None, 'Dataset is imbalanced (19.6%, 18.8%, 19.2% positive examples in train, val,' 'test respectively). `None` (default) will not perform any loss reweighting. ' '`constant` will use the train proportions to reweight the binary cross ' 'entropy loss. `minibatch` will use the proportions of each minibatch to ' 'reweight the loss.') flags.DEFINE_float('l2', 0.000022417, 'L2 coefficient.') flags.DEFINE_integer('ensemble_size', 1, 'Size of ensemble.') flags.DEFINE_integer('per_core_batch_size', 16, 'Batch size per TPU core/GPU.') flags.DEFINE_integer('train_epochs', DEFAULT_NUM_EPOCHS, 'Number of training epochs.') flags.DEFINE_integer( 'checkpoint_interval', 25, 'Number of epochs between saving checkpoints. Use -1 to ' 'never save checkpoints.') # Metric flags. flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.') # Accelerator flags. flags.DEFINE_bool('force_use_cpu', False, 'If True, force usage of CPU') flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.') flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.') flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores or number of GPUs.') flags.DEFINE_string( 'tpu', None, 'Name of the TPU. Only used if force_use_cpu and use_gpu are both False.' 'Specify `read-from-file` to retrieve the name from tpu_name.txt.') FLAGS = flags.FLAGS def main(argv): del argv # unused arg tf.random.set_seed(FLAGS.seed) # Wandb Setup if FLAGS.use_wandb: pathlib.Path(FLAGS.wandb_dir).mkdir(parents=True, exist_ok=True) wandb_args = dict( project=FLAGS.project, entity='uncertainty-baselines', dir=FLAGS.wandb_dir, reinit=True, name=FLAGS.exp_name, group=FLAGS.exp_group) wandb_run = wandb.init(**wandb_args) wandb.config.update(FLAGS, allow_val_change=True) output_dir = str( os.path.join(FLAGS.output_dir, datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))) else: wandb_run = None output_dir = FLAGS.output_dir tf.io.gfile.makedirs(output_dir) logging.info('Saving checkpoints at %s', output_dir) # Log Run Hypers hypers_dict = { 'per_core_batch_size': FLAGS.per_core_batch_size, 'base_learning_rate': FLAGS.base_learning_rate, 'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2 } logging.info('Hypers:') logging.info(pprint.pformat(hypers_dict)) # Initialize distribution strategy on flag-specified accelerator strategy = utils.init_distribution_strategy(FLAGS.force_use_cpu, FLAGS.use_gpu, FLAGS.tpu) use_tpu = not (FLAGS.force_use_cpu or FLAGS.use_gpu) per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size batch_size = per_core_batch_size * FLAGS.num_cores # Reweighting loss for class imbalance class_reweight_mode = FLAGS.class_reweight_mode if class_reweight_mode == 'constant': class_weights = utils.get_diabetic_retinopathy_class_balance_weights() else: class_weights = None # Load in datasets. datasets, steps = utils.load_dataset( train_batch_size=batch_size, eval_batch_size=batch_size, flags=FLAGS, strategy=strategy) available_splits = list(datasets.keys()) test_splits = [split for split in available_splits if 'test' in split] eval_splits = [ split for split in available_splits if 'validation' in split or 'test' in split ] # Iterate eval datasets eval_datasets = {split: iter(datasets[split]) for split in eval_splits} dataset_train = datasets['train'] train_steps_per_epoch = steps['train'] train_dataset_size = train_steps_per_epoch * batch_size if FLAGS.use_bfloat16: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16') summary_writer = tf.summary.create_file_writer( os.path.join(output_dir, 'summaries')) with strategy.scope(): logging.info('Building Keras ResNet-50 Rank-1 BNN model.') model = None if FLAGS.load_from_checkpoint: initial_epoch, model = utils.load_keras_checkpoints( FLAGS.checkpoint_dir, load_ensemble=False, return_epoch=True) else: initial_epoch = 0 model = ub.models.resnet50_rank1( input_shape=utils.load_input_shape(dataset_train), num_classes=1, # binary classification task alpha_initializer=FLAGS.alpha_initializer, gamma_initializer=FLAGS.gamma_initializer, alpha_regularizer=FLAGS.alpha_regularizer, gamma_regularizer=FLAGS.gamma_regularizer, use_additive_perturbation=FLAGS.use_additive_perturbation, ensemble_size=FLAGS.ensemble_size, random_sign_init=FLAGS.random_sign_init, dropout_rate=FLAGS.dropout_rate, prior_stddev=FLAGS.prior_stddev, use_tpu=use_tpu, use_ensemble_bn=FLAGS.use_ensemble_bn) utils.log_model_init_info(model=model) # Scale learning rate and decay epochs by vanilla settings. base_lr = FLAGS.base_learning_rate lr_decay_epochs = [ (int(start_epoch_str) * FLAGS.train_epochs) // DEFAULT_NUM_EPOCHS for start_epoch_str in FLAGS.lr_decay_epochs ] learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule( steps_per_epoch=train_steps_per_epoch, base_learning_rate=base_lr, decay_ratio=FLAGS.lr_decay_ratio, decay_epochs=lr_decay_epochs, warmup_epochs=FLAGS.lr_warmup_epochs) optimizer = tf.keras.optimizers.SGD( learning_rate=learning_rate, momentum=1.0 - FLAGS.one_minus_momentum, nesterov=True) metrics = utils.get_diabetic_retinopathy_base_metrics( use_tpu=use_tpu, num_bins=FLAGS.num_bins, use_validation=FLAGS.use_validation, available_splits=available_splits) # Rank-1 specific metrics metrics.update({ 'train/kl': tf.keras.metrics.Mean(), 'train/kl_scale': tf.keras.metrics.Mean() }) # TODO(nband): debug or remove # checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) # latest_checkpoint = tf.train.latest_checkpoint(output_dir) # if latest_checkpoint: # # checkpoint.restore must be within a strategy.scope() so that optimizer # # slot variables are mirrored. # checkpoint.restore(latest_checkpoint) # logging.info('Loaded checkpoint %s', latest_checkpoint) # initial_epoch = optimizer.iterations.numpy() // steps_per_epoch # Define metrics outside the accelerator scope for CPU eval. # This will cause an error on TPU. if not use_tpu: metrics.update( utils.get_diabetic_retinopathy_cpu_metrics( available_splits=available_splits, use_validation=FLAGS.use_validation)) for test_split in test_splits: metrics.update({f'{test_split}/ms_per_example': tf.keras.metrics.Mean()}) # Initialize loss function based on class reweighting setting loss_fn = utils.get_diabetic_retinopathy_loss_fn( class_reweight_mode=class_reweight_mode, class_weights=class_weights) # * Prepare for Evaluation * # Get the wrapper function which will produce uncertainty estimates for # our choice of method and Y/N ensembling. uncertainty_estimator_fn = utils.get_uncertainty_estimator( 'rank1', use_ensemble=False, use_tf=True) # Wrap our estimator to predict probabilities (apply sigmoid on logits) eval_estimator = utils.wrap_retinopathy_estimator( model, use_mixed_precision=FLAGS.use_bfloat16, numpy_outputs=False) estimator_args = {'num_samples': FLAGS.num_mc_samples_eval} def compute_l2_loss(model): filtered_variables = [] for var in model.trainable_variables: # Apply l2 on the BN parameters and bias terms. This # excludes only fast weight approximate posterior/prior parameters, # but pay caution to their naming scheme. if ('kernel' in var.name or 'batch_norm' in var.name or 'bias' in var.name): filtered_variables.append(tf.reshape(var, (-1,))) l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss( tf.concat(filtered_variables, axis=0)) return l2_loss @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images = inputs['features'] labels = inputs['labels'] if FLAGS.ensemble_size > 1: images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1]) labels = tf.tile(labels, [FLAGS.ensemble_size]) # For minibatch class reweighting, initialize per-batch loss function if class_reweight_mode == 'minibatch': batch_loss_fn = utils.get_minibatch_reweighted_loss_fn(labels=labels) else: batch_loss_fn = loss_fn with tf.GradientTape() as tape: if FLAGS.num_mc_samples_train > 1: # Pythonic Implem logits_list = [] for _ in range(FLAGS.num_mc_samples_train): logits = model(images, training=True) logits = tf.squeeze(logits, axis=-1) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) logits_list.append(logits) # Logits dimension is (num_samples, batch_size). logits_list = tf.stack(logits_list, axis=0) probs_list = tf.nn.sigmoid(logits_list) probs = tf.reduce_mean(probs_list, axis=0) negative_log_likelihood = tf.reduce_mean( batch_loss_fn( y_true=tf.expand_dims(labels, axis=-1), y_pred=probs, from_logits=False)) else: # Single train step logits = model(images, training=True) if FLAGS.use_bfloat16: logits = tf.cast(logits, tf.float32) negative_log_likelihood = tf.reduce_mean( batch_loss_fn( y_true=tf.expand_dims(labels, axis=-1), y_pred=logits, from_logits=True)) probs = tf.squeeze(tf.nn.sigmoid(logits)) l2_loss = compute_l2_loss(model) kl = sum(model.losses) / train_dataset_size kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype) kl_scale /= train_steps_per_epoch * FLAGS.kl_annealing_epochs kl_scale = tf.minimum(1., kl_scale) kl_loss = kl_scale * kl # Scale the loss given the TPUStrategy will reduce sum all gradients. loss = negative_log_likelihood + l2_loss + kl_loss scaled_loss = loss / strategy.num_replicas_in_sync # elbo = -(negative_log_likelihood + l2_loss + kl) grads = tape.gradient(scaled_loss, model.trainable_variables) # Separate learning rate implementation. if FLAGS.fast_weight_lr_multiplier != 1.0: grads_and_vars = [] for grad, var in zip(grads, model.trainable_variables): # Apply different learning rate on the fast weights. This excludes BN # and slow weights, but pay caution to the naming scheme. if ('batch_norm' not in var.name and 'kernel' not in var.name): grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier, var)) else: grads_and_vars.append((grad, var)) optimizer.apply_gradients(grads_and_vars) else: optimizer.apply_gradients(zip(grads, model.trainable_variables)) metrics['train/loss'].update_state(loss) metrics['train/negative_log_likelihood'].update_state( negative_log_likelihood) metrics['train/kl'].update_state(kl) metrics['train/kl_scale'].update_state(kl_scale) metrics['train/accuracy'].update_state(labels, probs) metrics['train/auprc'].update_state(labels, probs) metrics['train/auroc'].update_state(labels, probs) if not use_tpu: metrics['train/ece'].add_batch(probs, label=labels) for _ in tf.range(tf.cast(train_steps_per_epoch, tf.int32)): strategy.run(step_fn, args=(next(iterator),)) start_time = time.time() train_iterator = iter(dataset_train) for epoch in range(initial_epoch, FLAGS.train_epochs): logging.info('Starting to run epoch: %s', epoch) train_step(train_iterator) current_step = (epoch + 1) * train_steps_per_epoch max_steps = train_steps_per_epoch * FLAGS.train_epochs time_elapsed = time.time() - start_time steps_per_sec = float(current_step) / time_elapsed eta_seconds = (max_steps - current_step) / steps_per_sec message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. ' 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format( current_step / max_steps, epoch + 1, FLAGS.train_epochs, steps_per_sec, eta_seconds / 60, time_elapsed / 60)) logging.info(message) # Run evaluation on all evaluation datasets, and compute metrics per_pred_results, total_results = utils.evaluate_model_and_compute_metrics( strategy, eval_datasets, steps, metrics, eval_estimator, uncertainty_estimator_fn, batch_size, available_splits, estimator_args=estimator_args, call_dataset_iter=False, is_deterministic=False, num_bins=FLAGS.num_bins, use_tpu=use_tpu, return_per_pred_results=True) # Optionally log to wandb if FLAGS.use_wandb: wandb.log(total_results, step=epoch) with summary_writer.as_default(): for name, result in total_results.items(): if result is not None: tf.summary.scalar(name, result, step=epoch + 1) for metric in metrics.values(): metric.reset_states() if (FLAGS.checkpoint_interval > 0 and (epoch + 1) % FLAGS.checkpoint_interval == 0): # checkpoint_name = checkpoint.save(os.path.join( # output_dir, 'checkpoint')) # logging.info('Saved checkpoint to %s', checkpoint_name) # TODO(nband): debug checkpointing # Also save Keras model, due to checkpoint.save issue. keras_model_name = os.path.join(output_dir, f'keras_model_{epoch + 1}') model.save(keras_model_name) logging.info('Saved keras model to %s', keras_model_name) # Save per-prediction metrics utils.save_per_prediction_results( output_dir, epoch + 1, per_pred_results, verbose=False) # final_checkpoint_name = checkpoint.save( # os.path.join(output_dir, 'checkpoint')) # logging.info('Saved last checkpoint to %s', final_checkpoint_name) keras_model_name = os.path.join(output_dir, f'keras_model_{FLAGS.train_epochs}') model.save(keras_model_name) logging.info('Saved keras model to %s', keras_model_name) # Save per-prediction metrics utils.save_per_prediction_results( output_dir, FLAGS.train_epochs, per_pred_results, verbose=False) with summary_writer.as_default(): hp.hparams({ 'base_learning_rate': FLAGS.base_learning_rate, 'one_minus_momentum': FLAGS.one_minus_momentum, 'l2': FLAGS.l2, 'fast_weight_lr_multiplier': FLAGS.fast_weight_lr_multiplier, 'num_mc_samples_eval': FLAGS.num_mc_samples_eval, }) if wandb_run is not None: wandb_run.finish() if __name__ == '__main__': app.run(main)
google/uncertainty-baselines
baselines/diabetic_retinopathy_detection/rank1_bnn.py
Python
apache-2.0
22,076
[ "Gaussian" ]
a6f21e4b5ddb2bcf1e277684311ce0e66313de2119e055ebed8e7f9231ed32af
import glob import pandas as pd import numpy as np pd.set_option('display.max_columns', 50) # print all rows import os os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files") normalB = glob.glob("binary_position_RRBS_normal_B_cell*") mcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27mcell*") pcell = glob.glob("binary_position_RRBS_NormalBCD19pCD27pcell*") cd19cell = glob.glob("binary_position_RRBS_NormalBCD19pcell*") print(len(normalB)) print(len(mcell)) print(len(pcell)) print(len(cd19cell)) totalfiles = normalB + mcell + pcell + cd19cell print(len(totalfiles)) df_list = [] for file in totalfiles: df = pd.read_csv(file) df = df.drop("Unnamed: 0", axis=1) df["chromosome"] = df["position"].map(lambda x: str(x)[:5]) df = df[df["chromosome"] == "chr4_"] df = df.drop("chromosome", axis=1) df_list.append(df) print(len(df_list)) total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object) total_matrix = total_matrix.drop("index", axis=1) len(total_matrix.columns) total_matrix.columns = ["RRBS_normal_B_cell_A1_24_TAAGGCGA.ACAACC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.ACCGCG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.ACGTGG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.AGGATG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.ATAGCG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.ATCGAC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.CAAGAG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.CATGAC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.CGGTAG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.CTATTG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.CTCAGC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.GACACG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.GCTGCC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.GGCATC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.GTGAGG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.GTTGAG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.TAGCGG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.TATCTC", "RRBS_normal_B_cell_A1_24_TAAGGCGA.TCTCTG", "RRBS_normal_B_cell_A1_24_TAAGGCGA.TGACAG", "RRBS_normal_B_cell_B1_24_CGTACTAG.ACAACC", "RRBS_normal_B_cell_B1_24_CGTACTAG.ACCGCG", "RRBS_normal_B_cell_B1_24_CGTACTAG.ACTCAC", "RRBS_normal_B_cell_B1_24_CGTACTAG.ATAGCG", "RRBS_normal_B_cell_B1_24_CGTACTAG.CAAGAG", "RRBS_normal_B_cell_B1_24_CGTACTAG.CATGAC", "RRBS_normal_B_cell_B1_24_CGTACTAG.CCTTCG", "RRBS_normal_B_cell_B1_24_CGTACTAG.CGGTAG", "RRBS_normal_B_cell_B1_24_CGTACTAG.CTATTG", "RRBS_normal_B_cell_B1_24_CGTACTAG.CTCAGC", "RRBS_normal_B_cell_B1_24_CGTACTAG.GACACG", "RRBS_normal_B_cell_B1_24_CGTACTAG.GCATTC", "RRBS_normal_B_cell_B1_24_CGTACTAG.GGCATC", "RRBS_normal_B_cell_B1_24_CGTACTAG.GTGAGG", "RRBS_normal_B_cell_B1_24_CGTACTAG.GTTGAG", "RRBS_normal_B_cell_B1_24_CGTACTAG.TAGCGG", "RRBS_normal_B_cell_B1_24_CGTACTAG.TATCTC", "RRBS_normal_B_cell_B1_24_CGTACTAG.TCTCTG", "RRBS_normal_B_cell_B1_24_CGTACTAG.TGACAG", "RRBS_normal_B_cell_B1_24_CGTACTAG.TGCTGC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ACAACC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ACCGCG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ACGTGG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ACTCAC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.AGGATG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ATAGCG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.ATCGAC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.CAAGAG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.CATGAC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.CGGTAG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.CTATTG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GACACG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GCATTC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GCTGCC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GGCATC", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GTGAGG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.GTTGAG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.TAGCGG", "RRBS_normal_B_cell_C1_24_AGGCAGAA.TATCTC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.ACAACC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.ACCGCG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.ACGTGG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.ACTCAC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.AGGATG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.ATCGAC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CAAGAG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CATGAC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CCTTCG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CGGTAG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CTATTG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.CTCAGC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.GACACG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.GCATTC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.GCTGCC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.GGCATC", "RRBS_normal_B_cell_D1_24_TCCTGAGC.GTTGAG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.TAGCGG", "RRBS_normal_B_cell_D1_24_TCCTGAGC.TATCTC", "RRBS_normal_B_cell_G1_22_GGACTCCT.ACAACC", "RRBS_normal_B_cell_G1_22_GGACTCCT.ACCGCG", "RRBS_normal_B_cell_G1_22_GGACTCCT.ACGTGG", "RRBS_normal_B_cell_G1_22_GGACTCCT.ACTCAC", "RRBS_normal_B_cell_G1_22_GGACTCCT.AGGATG", "RRBS_normal_B_cell_G1_22_GGACTCCT.ATAGCG", "RRBS_normal_B_cell_G1_22_GGACTCCT.ATCGAC", "RRBS_normal_B_cell_G1_22_GGACTCCT.CAAGAG", "RRBS_normal_B_cell_G1_22_GGACTCCT.CATGAC", "RRBS_normal_B_cell_G1_22_GGACTCCT.CGGTAG", "RRBS_normal_B_cell_G1_22_GGACTCCT.CTATTG", "RRBS_normal_B_cell_G1_22_GGACTCCT.CTCAGC", "RRBS_normal_B_cell_G1_22_GGACTCCT.GACACG", "RRBS_normal_B_cell_G1_22_GGACTCCT.GCATTC", "RRBS_normal_B_cell_G1_22_GGACTCCT.GCTGCC", "RRBS_normal_B_cell_G1_22_GGACTCCT.GGCATC", "RRBS_normal_B_cell_G1_22_GGACTCCT.GTGAGG", "RRBS_normal_B_cell_G1_22_GGACTCCT.TAGCGG", "RRBS_normal_B_cell_G1_22_GGACTCCT.TATCTC", "RRBS_normal_B_cell_H1_22_TAGGCATG.ACCGCG", "RRBS_normal_B_cell_H1_22_TAGGCATG.ACGTGG", "RRBS_normal_B_cell_H1_22_TAGGCATG.ACTCAC", "RRBS_normal_B_cell_H1_22_TAGGCATG.AGGATG", "RRBS_normal_B_cell_H1_22_TAGGCATG.ATCGAC", "RRBS_normal_B_cell_H1_22_TAGGCATG.CAAGAG", "RRBS_normal_B_cell_H1_22_TAGGCATG.CATGAC", "RRBS_normal_B_cell_H1_22_TAGGCATG.CCTTCG", "RRBS_normal_B_cell_H1_22_TAGGCATG.CTATTG", "RRBS_normal_B_cell_H1_22_TAGGCATG.CTCAGC", "RRBS_normal_B_cell_H1_22_TAGGCATG.GCATTC", "RRBS_normal_B_cell_H1_22_TAGGCATG.GCTGCC", "RRBS_normal_B_cell_H1_22_TAGGCATG.GGCATC", "RRBS_normal_B_cell_H1_22_TAGGCATG.GTGAGG", "RRBS_normal_B_cell_H1_22_TAGGCATG.GTTGAG", "RRBS_normal_B_cell_H1_22_TAGGCATG.TCTCTG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACCGCG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACGTGG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ACTCAC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATAGCG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.ATCGAC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CAAGAG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CATGAC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CCTTCG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTATTG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.CTCAGC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GACACG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCATTC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GCTGCC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GGCATC", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTGAGG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.GTTGAG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TAGCGG", "RRBS_NormalBCD19pCD27mcell1_22_CGAGGCTG.TATCTC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACAACC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACCGCG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACGTGG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ACTCAC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.AGGATG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATAGCG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.ATCGAC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CAAGAG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CATGAC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CCTTCG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CGGTAG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTATTG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.CTCAGC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GACACG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GCATTC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTGAGG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.GTTGAG", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TATCTC", "RRBS_NormalBCD19pCD27mcell23_44_GTAGAGGA.TCTCTG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACAACC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACGTGG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ACTCAC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.AGGATG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATAGCG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.ATCGAC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CAAGAG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CATGAC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CCTTCG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CGGTAG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTATTG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.CTCAGC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GACACG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.GTGAGG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TAGCGG", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TATCTC", "RRBS_NormalBCD19pCD27mcell45_66_TAAGGCGA.TCTCTG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACAACC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACCGCG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACGTGG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ACTCAC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.AGGATG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATAGCG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.ATCGAC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CAAGAG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CATGAC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CCTTCG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CGGTAG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTATTG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.CTCAGC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GACACG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GCATTC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GGCATC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTGAGG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.GTTGAG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TAGCGG", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TATCTC", "RRBS_NormalBCD19pCD27mcell67_88_CGTACTAG.TCTCTG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACAACC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACCGCG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ACTCAC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.AGGATG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATAGCG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.ATCGAC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CAAGAG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CATGAC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CCTTCG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CGGTAG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTATTG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.CTCAGC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCATTC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GCTGCC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GGCATC", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTGAGG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.GTTGAG", "RRBS_NormalBCD19pCD27pcell1_22_TAGGCATG.TAGCGG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACAACC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACCGCG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACGTGG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ACTCAC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.AGGATG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATAGCG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.ATCGAC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CAAGAG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CATGAC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CCTTCG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CGGTAG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTATTG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.CTCAGC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GACACG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCATTC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GCTGCC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GGCATC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTGAGG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.GTTGAG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TAGCGG", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TATCTC", "RRBS_NormalBCD19pCD27pcell23_44_CTCTCTAC.TCTCTG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACCGCG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ACTCAC", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.ATAGCG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CAAGAG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CCTTCG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.CTATTG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GACACG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.GTGAGG", "RRBS_NormalBCD19pCD27pcell45_66_CAGAGAGG.TAGCGG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACAACC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACCGCG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACGTGG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ACTCAC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.AGGATG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATAGCG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.ATCGAC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CATGAC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CCTTCG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CGGTAG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTATTG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.CTCAGC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GACACG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCATTC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GCTGCC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GGCATC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTGAGG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.GTTGAG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TAGCGG", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TATCTC", "RRBS_NormalBCD19pCD27pcell67_88_GCTACGCT.TCTCTG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACAACC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACCGCG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACGTGG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ACTCAC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.AGGATG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATAGCG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.ATCGAC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CAAGAG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CATGAC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CCTTCG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CGGTAG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTATTG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.CTCAGC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.GACACG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCATTC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.GCTGCC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.GGCATC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.GTTGAG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.TAGCGG", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.TATCTC", "RRBS_NormalBCD19pcell1_22_TAAGGCGA.TCTCTG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ACAACC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ACCGCG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ACGTGG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ACTCAC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.AGGATG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ATAGCG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.ATCGAC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.CATGAC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.CCTTCG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.CGGTAG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.CTATTG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.CTCAGC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.GACACG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.GCATTC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.GCTGCC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.GGCATC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.GTGAGG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.TAGCGG", "RRBS_NormalBCD19pcell23_44_CGTACTAG.TATCTC", "RRBS_NormalBCD19pcell23_44_CGTACTAG.TCTCTG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACAACC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACCGCG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACGTGG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ACTCAC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.AGGATG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATAGCG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.ATCGAC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CAAGAG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CATGAC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CCTTCG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CGGTAG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTATTG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.CTCAGC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GACACG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCATTC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GCTGCC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GGCATC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTGAGG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.GTTGAG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.TAGCGG", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.TATCTC", "RRBS_NormalBCD19pcell45_66_AGGCAGAA.TCTCTG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACAACC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACCGCG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACGTGG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ACTCAC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.AGGATG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATAGCG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.ATCGAC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CAAGAG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CATGAC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CCTTCG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CGGTAG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTATTG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.CTCAGC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCATTC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.GCTGCC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.GGCATC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTGAGG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.GTTGAG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.TAGCGG", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.TATCTC", "RRBS_NormalBCD19pcell67_88_TCCTGAGC.TCTCTG"] print(total_matrix.shape) total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?")) total_matrix = total_matrix.astype(str).apply(''.join) tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' ')) tott.to_csv("normal_chrom4.phy", header=None, index=None) print(tott.shape)
evanbiederstedt/RRBSfun
trees/chrom_scripts/normal_chr04.py
Python
mit
25,843
[ "MCell" ]
d23fe98df6ad27851e55e2688b39b5a3c8463b90cf561b19617eee9cb69b9286
""" Some utilities for FTS3... """ import json import datetime import random import threading from DIRAC.DataManagementSystem.Client.DataManager import DataManager from DIRAC.FrameworkSystem.Client.Logger import gLogger from DIRAC.Core.Utilities.Decorators import deprecated from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus def _checkSourceReplicas(ftsFiles): """ Check the active replicas :params ftsFiles: list of FT3Files :returns: Successful/Failed {lfn : { SE1 : PFN1, SE2 : PFN2 } , ... } """ lfns = list(set([f.lfn for f in ftsFiles])) res = DataManager().getActiveReplicas(lfns) return res @deprecated("Not in use in the code, selectUniqueRandomSource prefered") def selectUniqueSourceforTransfers(multipleSourceTransfers): """ When we have several possible source for a given SE, choose one. In this particular case, we always choose the one that has the biggest amount of replicas, :param multipleSourceTransfers : { sourceSE : [FTSFiles] } :return { source SE : [ FTSFiles] } where each LFN appears only once """ # the more an SE has files, the more likely it is that it is a big good old T1 site. # So we start packing with these SEs orderedSources = sorted(multipleSourceTransfers, key=lambda srcSE: len(multipleSourceTransfers[srcSE]), reverse=True) transfersBySource = {} usedLFNs = set() for sourceSE in orderedSources: transferList = [] for ftsFile in multipleSourceTransfers[sourceSE]: if ftsFile.lfn not in usedLFNs: transferList.append(ftsFile) usedLFNs.add(ftsFile.lfn) if transferList: transfersBySource[sourceSE] = transferList return S_OK(transfersBySource) @deprecated("Not in use in the code, selectUniqueRandomSource prefered") def generatePossibleTransfersBySources(ftsFiles, allowedSources=None): """ For a list of FTS3files object, group the transfer possible sources CAUTION ! a given LFN can be in multiple source You still have to choose your source ! :param allowedSources : list of allowed sources :param ftsFiles : list of FTS3File object :return S_OK({ sourceSE: [ FTS3Files] }) """ _log = gLogger.getSubLogger("generatePossibleTransfersBySources", True) # destGroup will contain for each target SE a dict { possible source : transfer metadata } groupBySource = {} # For all files, check which possible sources they have res = _checkSourceReplicas(ftsFiles) if not res['OK']: return res filteredReplicas = res['Value'] for ftsFile in ftsFiles: if ftsFile.lfn in filteredReplicas['Failed']: _log.error("Failed to get active replicas", "%s,%s" % (ftsFile.lfn, filteredReplicas['Failed'][ftsFile.lfn])) continue replicaDict = filteredReplicas['Successful'][ftsFile.lfn] for se in replicaDict: # if we are imposed a source, respect it if allowedSources and se not in allowedSources: continue groupBySource.setdefault(se, []).append(ftsFile) return S_OK(groupBySource) def selectUniqueRandomSource(ftsFiles, allowedSources=None): """ For a list of FTS3files object, select a random source, and group the files by source. :param allowedSources : list of allowed sources :param ftsFiles : list of FTS3File object :return: S_OK({ sourceSE: [ FTS3Files] }) """ _log = gLogger.getSubLogger("selectUniqueRandomSource") # destGroup will contain for each target SE a dict { source : [list of FTS3Files] } groupBySource = {} # For all files, check which possible sources they have res = _checkSourceReplicas(ftsFiles) if not res['OK']: return res filteredReplicas = res['Value'] for ftsFile in ftsFiles: if ftsFile.lfn in filteredReplicas['Failed']: _log.error("Failed to get active replicas", "%s,%s" % (ftsFile.lfn, filteredReplicas['Failed'][ftsFile.lfn])) continue replicaDict = filteredReplicas['Successful'][ftsFile.lfn] # pick a random source randSource = random.choice(list(replicaDict)) # one has to convert to list groupBySource.setdefault(randSource, []).append(ftsFile) return S_OK(groupBySource) def groupFilesByTarget(ftsFiles): """ For a list of FTS3files object, group the Files by target :param ftsFiles : list of FTS3File object :return: {targetSE : [ ftsFiles] } } """ # destGroup will contain for each target SE a dict { possible source : transfer metadata } destGroup = {} for ftsFile in ftsFiles: destGroup.setdefault(ftsFile.targetSE, []).append(ftsFile) return S_OK(destGroup) class FTS3Serializable(object): """ This is the base class for all the FTS3 objects that needs to be serialized, so FTS3Operation, FTS3File and FTS3Job The inheriting classes just have to define a class attribute called _attrToSerialize, which is a list of strings, which correspond to the name of the attribute they want to serialize """ _datetimeFormat = '%Y-%m-%d %H:%M:%S' # MUST BE OVERWRITTEN IN THE CHILD CLASS _attrToSerialize = [] def toJSON(self, forPrint=False): """ Returns the JSON formated string :param forPrint: if set to True, we don't include the 'magic' arguments used for rebuilding the object """ jsonStr = json.dumps(self, cls=FTS3JSONEncoder, forPrint=forPrint) return jsonStr def __str__(self): import pprint js = json.loads(self.toJSON(forPrint=True)) return pprint.pformat(js) def _getJSONData(self, forPrint=False): """ Returns the data that have to be serialized by JSON :param forPrint: if set to True, we don't include the 'magic' arguments used for rebuilding the object :return dictionary to be transformed into json """ jsonData = {} datetimeAttributes = [] for attrName in self._attrToSerialize: # IDs might not be set since it is managed by SQLAlchemy if not hasattr(self, attrName): continue value = getattr(self, attrName) if isinstance(value, datetime.datetime): # We convert date time to a string jsonData[attrName] = value.strftime(self._datetimeFormat) datetimeAttributes.append(attrName) else: jsonData[attrName] = value if not forPrint: jsonData['__type__'] = self.__class__.__name__ jsonData['__module__'] = self.__module__ jsonData['__datetime__'] = datetimeAttributes return jsonData class FTS3JSONEncoder(json.JSONEncoder): """ This class is an encoder for the FTS3 objects """ def __init__(self, *args, **kwargs): if 'forPrint' in kwargs: self._forPrint = kwargs.pop('forPrint') else: self._forPrint = False super(FTS3JSONEncoder, self).__init__(*args, **kwargs) def default(self, obj): # pylint: disable=method-hidden if hasattr(obj, '_getJSONData'): return obj._getJSONData(forPrint=self._forPrint) else: return json.JSONEncoder.default(self, obj) class FTS3JSONDecoder(json.JSONDecoder): """ This class is an decoder for the FTS3 objects """ def __init__(self, *args, **kargs): json.JSONDecoder.__init__(self, object_hook=self.dict_to_object, *args, **kargs) def dict_to_object(self, dataDict): """ Convert the dictionary into an object """ import importlib # If it is not an FTS3 object, just return the structure as is if not ('__type__' in dataDict and '__module__' in dataDict): return dataDict # Get the class and module className = dataDict.pop('__type__') modName = dataDict.pop('__module__') datetimeAttributes = dataDict.pop('__datetime__', []) datetimeSet = set(datetimeAttributes) try: # Load the module mod = importlib.import_module(modName) # import the class cl = getattr(mod, className) # Instantiate the object obj = cl() # Set each attribute for attrName, attrValue in dataDict.iteritems(): # If the value is None, do not set it # This is needed to play along well with SQLalchemy if attrValue is None: continue if attrName in datetimeSet: attrValue = datetime.datetime.strptime(attrValue, FTS3Serializable._datetimeFormat) setattr(obj, attrName, attrValue) return obj except Exception as e: gLogger.error('exception in FTS3JSONDecoder %s for type %s' % (e, className)) dataDict['__type__'] = className dataDict['__module__'] = modName dataDict['__datetime__'] = datetimeAttributes return dataDict threadLocal = threading.local() class FTS3ServerPolicy(object): """ This class manages the policy for choosing a server """ def __init__(self, serverDict, serverPolicy="Random"): """ Call the init of the parent, and initialize the list of FTS3 servers """ self.log = gLogger.getSubLogger("FTS3ServerPolicy") self._serverDict = serverDict self._serverList = serverDict.keys() self._maxAttempts = len(self._serverList) self._nextServerID = 0 self._resourceStatus = ResourceStatus() methName = "_%sServerPolicy" % serverPolicy.lower() if not hasattr(self, methName): self.log.error('Unknown server policy %s. Using Random instead' % serverPolicy) methName = "_randomServerPolicy" self._policyMethod = getattr(self, methName) def _failoverServerPolicy(self, _attempt): """ Returns always the server at a given position (normally the first one) :param attempt: position of the server in the list """ if _attempt >= len(self._serverList): raise Exception( "FTS3ServerPolicy.__failoverServerPolicy: attempt to reach non existing server index") return self._serverList[_attempt] def _sequenceServerPolicy(self, _attempt): """ Every time the this policy is called, return the next server on the list """ fts3server = self._serverList[self._nextServerID] self._nextServerID = (self._nextServerID + 1) % len(self._serverList) return fts3server def _randomServerPolicy(self, _attempt): """ return a server from shuffledServerList """ if getattr(threadLocal, 'shuffledServerList', None) is None: threadLocal.shuffledServerList = self._serverList[:] random.shuffle(threadLocal.shuffledServerList) fts3Server = threadLocal.shuffledServerList[_attempt] if _attempt == self._maxAttempts - 1: random.shuffle(threadLocal.shuffledServerList) return fts3Server def _getFTSServerStatus(self, ftsServer): """ Fetch the status of the FTS server from RSS """ res = self._resourceStatus.getElementStatus(ftsServer, 'FTS') if not res['OK']: return res result = res['Value'] if ftsServer not in result: return S_ERROR("No FTS Server %s known to RSS" % ftsServer) if result[ftsServer]['all'] == 'Active': return S_OK(True) return S_OK(False) def chooseFTS3Server(self): """ Choose the appropriate FTS3 server depending on the policy """ fts3Server = None attempt = 0 while not fts3Server and attempt < self._maxAttempts: fts3Server = self._policyMethod(attempt) res = self._getFTSServerStatus(fts3Server) if not res['OK']: self.log.warn("Error getting the RSS status for %s: %s" % (fts3Server, res)) fts3Server = None attempt += 1 continue ftsServerStatus = res['Value'] if not ftsServerStatus: self.log.warn('FTS server %s is not in good shape. Choose another one' % fts3Server) fts3Server = None attempt += 1 if fts3Server: return S_OK(self._serverDict[fts3Server]) return S_ERROR("Could not find an FTS3 server (max attempt reached)")
arrabito/DIRAC
DataManagementSystem/private/FTS3Utilities.py
Python
gpl-3.0
12,089
[ "DIRAC" ]
845dc95f6090d2e217194447f8b7c297cbdcc7954870fff5160da9a9c9500732
# This script is executed in the main console namespace so # that all the variables defined here become console variables. import ddapp import os import sys import PythonQt import json from PythonQt import QtCore, QtGui from time import time import imp import ddapp.applogic as app from ddapp import drcargs from ddapp import vtkAll as vtk from ddapp import matlab from ddapp import jointcontrol from ddapp import callbacks from ddapp import camerabookmarks from ddapp import cameracontrol from ddapp import bihandeddemo from ddapp import debrisdemo from ddapp import doordemo from ddapp import drilldemo from ddapp import tabledemo from ddapp import mappingdemo from ddapp import valvedemo from ddapp import drivingplanner from ddapp import egressplanner from ddapp import polarisplatformplanner from ddapp import surprisetask from ddapp import continuouswalkingdemo from ddapp import sitstandplanner from ddapp import walkingtestdemo from ddapp import terraintask from ddapp import ik from ddapp import ikplanner from ddapp import objectmodel as om from ddapp import spreadsheet from ddapp import transformUtils from ddapp import tdx from ddapp import skybox from ddapp import perception from ddapp import segmentation from ddapp import cameraview from ddapp import colorize from ddapp import drakevisualizer from ddapp.fieldcontainer import FieldContainer from ddapp import robotstate from ddapp import roboturdf from ddapp import robotsystem from ddapp import affordancepanel from ddapp import filterUtils from ddapp import footstepsdriver from ddapp import footstepsdriverpanel from ddapp import framevisualization from ddapp import lcmloggerwidget from ddapp import lcmgl from ddapp import atlasdriver from ddapp import atlasdriverpanel from ddapp import multisensepanel from ddapp import navigationpanel from ddapp import mappingpanel from ddapp import handcontrolpanel from ddapp import sensordatarequestpanel from ddapp import tasklaunchpanel from ddapp import pfgrasp from ddapp import pfgrasppanel from ddapp.jointpropagator import JointPropagator from ddapp import coursemodel from ddapp import copmonitor from ddapp import robotplanlistener from ddapp import handdriver from ddapp import planplayback from ddapp import playbackpanel from ddapp import screengrabberpanel from ddapp import splinewidget from ddapp import teleoppanel from ddapp import vtkNumpy as vnp from ddapp import viewbehaviors from ddapp import visualization as vis from ddapp import actionhandlers from ddapp.timercallback import TimerCallback from ddapp.pointpicker import PointPicker, ImagePointPicker from ddapp import segmentationpanel from ddapp import lcmUtils from ddapp.utime import getUtime from ddapp.shallowCopy import shallowCopy from ddapp import segmentationroutines from ddapp import trackers from ddapp import gamepad from ddapp import blackoutmonitor from ddapp.tasks import robottasks as rt from ddapp.tasks import taskmanagerwidget from ddapp.tasks.descriptions import loadTaskDescriptions import drc as lcmdrc from collections import OrderedDict import functools import math import numpy as np from ddapp.debugVis import DebugData from ddapp import ioUtils as io drcargs.requireStrict() drcargs.args() app.startup(globals()) om.init(app.getMainWindow().objectTree(), app.getMainWindow().propertiesPanel()) actionhandlers.init() quit = app.quit exit = quit view = app.getDRCView() camera = view.camera() tree = app.getMainWindow().objectTree() orbit = cameracontrol.OrbitController(view) showPolyData = segmentation.showPolyData updatePolyData = segmentation.updatePolyData ############################################################################### robotSystem = robotsystem.create(view) globals().update(dict(robotSystem)) useIk = True useAtlasConvexHull = False useRobotState = True usePerception = True useGrid = True useSpreadsheet = True useFootsteps = True useHands = True usePlanning = True useAtlasDriver = True useLCMGL = True useLightColorScheme = True useLoggingWidget = True useDrakeVisualizer = True useNavigationPanel = True useFootContactVis = True useFallDetectorVis = True useImageWidget = False useCameraFrustumVisualizer = True useControllerRate = True useForceDisplay = False useSkybox = False useDataFiles = True usePFGrasp = False useGamepad = True useBlackoutText = True useRandomWalk = True useCOPMonitor = True useCourseModel = False poseCollection = PythonQt.dd.ddSignalMap() costCollection = PythonQt.dd.ddSignalMap() if useSpreadsheet: spreadsheet.init(poseCollection, costCollection) if useIk: def onIkStartup(ikServer, startSuccess): if startSuccess: app.getMainWindow().statusBar().showMessage('Planning server started.', 2000) else: app.showErrorMessage('Error detected while starting the matlab planning server. ' 'Please check the output console for more information.', title='Error starting matlab') ikServer.outputConsole = app.getOutputConsole() ikServer.infoFunc = app.displaySnoptInfo ikServer.connectStartupCompleted(onIkStartup) startIkServer() if useAtlasDriver: atlasdriver.systemStatus.outputConsole = app.getOutputConsole() atlasdriverpanel.init(atlasDriver) if usePerception: segmentationpanel.init() cameraview.init() colorize.init() cameraview.cameraView.rayCallback = segmentation.extractPointsAlongClickRay multisensepanel.init(perception.multisenseDriver, neckDriver) sensordatarequestpanel.init() # for kintinuous, use 'CAMERA_FUSED', 'CAMERA_TSDF' disparityPointCloud = segmentation.DisparityPointCloudItem('stereo point cloud', 'CAMERA', 'CAMERA_LEFT', cameraview.imageManager) disparityPointCloud.addToView(view) om.addToObjectModel(disparityPointCloud, parentObj=om.findObjectByName('sensors')) def createPointerTracker(): return trackers.PointerTracker(robotStateModel, disparityPointCloud) if useGrid: grid = vis.showGrid(view, color=[0,0,0], alpha=0.1) grid.setProperty('Surface Mode', 'Surface with edges') app.setBackgroundColor([0.3, 0.3, 0.35], [0.95,0.95,1]) viewOptions = vis.ViewOptionsItem(view) om.addToObjectModel(viewOptions, parentObj=om.findObjectByName('sensors')) class ViewBackgroundLightHandler(object): def __init__(self, viewOptions, grid): self.viewOptions = viewOptions self.action = app.getToolsMenuActions()['ActionToggleBackgroundLight'] self.action.connect('triggered()', self.toggle) self.properties = { viewOptions : {'Gradient background':True, 'Background color':[0.0, 0.0, 0.0], 'Background color 2':[0.3, 0.3, 0.3]}, grid : {'Surface Mode':'Wireframe', 'Alpha':0.05, 'Color':[1.0, 1.0, 1.0], 'Color By':0} } self.cachedProperties = {} self.storeProperties() def storeProperties(self): def grab(obj, props): for key in props.keys(): self.cachedProperties.setdefault(obj, dict())[key] = obj.getProperty(key) for obj, props in self.properties.iteritems(): grab(obj, props) def applyProperties(self, properties): def send(obj, props): for key, value in props.iteritems(): obj.setProperty(key, value) for obj, props in properties.iteritems(): send(obj, props) def toggle(self): if self.action.checked: self.storeProperties() self.applyProperties(self.properties) else: self.applyProperties(self.cachedProperties) viewBackgroundLightHandler = ViewBackgroundLightHandler(viewOptions, grid) if not useLightColorScheme: viewBackgroundLightHandler.action.trigger() if useHands: handcontrolpanel.init(lHandDriver, rHandDriver, robotStateModel, robotStateJointController, view) if useFootsteps: footstepsPanel = footstepsdriverpanel.init(footstepsDriver, robotStateModel, robotStateJointController, irisDriver) if useLCMGL: lcmglManager = lcmgl.init(view) app.MenuActionToggleHelper('Tools', 'Renderer - LCM GL', lcmglManager.isEnabled, lcmglManager.setEnabled) if useDrakeVisualizer: drakeVisualizer = drakevisualizer.DrakeVisualizer(view) app.MenuActionToggleHelper('Tools', 'Renderer - Drake', drakeVisualizer.isEnabled, drakeVisualizer.setEnabled) if usePlanning: def showPose(pose): playbackRobotModel.setProperty('Visible', True) playbackJointController.setPose('show_pose', pose) def playPlan(plan): playPlans([plan]) def playPlans(plans): planPlayback.stopAnimation() playbackRobotModel.setProperty('Visible', True) planPlayback.playPlans(plans, playbackJointController) def playManipPlan(): playPlan(manipPlanner.lastManipPlan) def playWalkingPlan(): playPlan(footstepsDriver.lastWalkingPlan) def plotManipPlan(): planPlayback.plotPlan(manipPlanner.lastManipPlan) def planStand(): ikPlanner.computeStandPlan(robotStateJointController.q) def planNominal(): ikPlanner.computeNominalPlan(robotStateJointController.q) def fitDrillMultisense(): pd = om.findObjectByName('Multisense').model.revPolyData om.removeFromObjectModel(om.findObjectByName('debug')) segmentation.findAndFitDrillBarrel(pd) def refitBlocks(autoApprove=True): polyData = om.findObjectByName('Multisense').model.revPolyData segmentation.updateBlockAffordances(polyData) if autoApprove: approveRefit() def approveRefit(): for obj in om.getObjects(): if isinstance(obj, segmentation.BlockAffordanceItem): if 'refit' in obj.getProperty('Name'): originalObj = om.findObjectByName(obj.getProperty('Name').replace(' refit', '')) if originalObj: originalObj.params = obj.params originalObj.polyData.DeepCopy(obj.polyData) originalObj.actor.GetUserTransform().SetMatrix(obj.actor.GetUserTransform().GetMatrix()) originalObj.actor.GetUserTransform().Modified() obj.setProperty('Visible', False) def sendDataRequest(requestType, repeatTime=0.0): msg = lcmdrc.data_request_t() msg.type = requestType msg.period = int(repeatTime*10) # period is specified in tenths of a second msgList = lcmdrc.data_request_list_t() msgList.utime = getUtime() msgList.requests = [msg] msgList.num_requests = len(msgList.requests) lcmUtils.publish('DATA_REQUEST', msgList) def sendSceneHeightRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.HEIGHT_MAP_SCENE, repeatTime) def sendWorkspaceDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_WORKSPACE_C, repeatTime) def sendSceneDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.DEPTH_MAP_SCENE, repeatTime) def sendFusedDepthRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.FUSED_DEPTH, repeatTime) def sendFusedHeightRequest(repeatTime=0.0): sendDataRequest(lcmdrc.data_request_t.FUSED_HEIGHT, repeatTime) teleopJointPropagator = JointPropagator(robotStateModel, teleopRobotModel, roboturdf.getRobotiqJoints() + ['neck_ay']) playbackJointPropagator = JointPropagator(robotStateModel, playbackRobotModel, roboturdf.getRobotiqJoints()) def doPropagation(model=None): if teleopRobotModel.getProperty('Visible'): teleopJointPropagator.doPropagation() if playbackRobotModel.getProperty('Visible'): playbackJointPropagator.doPropagation() robotStateModel.connectModelChanged(doPropagation) #app.addToolbarMacro('scene height', sendSceneHeightRequest) #app.addToolbarMacro('scene depth', sendSceneDepthRequest) #app.addToolbarMacro('stereo height', sendFusedHeightRequest) #app.addToolbarMacro('stereo depth', sendFusedDepthRequest) jointLimitChecker = teleoppanel.JointLimitChecker(robotStateModel, robotStateJointController) jointLimitChecker.setupMenuAction() jointLimitChecker.start() spindleSpinChecker = multisensepanel.SpindleSpinChecker(spindleMonitor) spindleSpinChecker.setupMenuAction() postureShortcuts = teleoppanel.PosturePlanShortcuts(robotStateJointController, ikPlanner) def drillTrackerOn(): om.findObjectByName('Multisense').model.showRevolutionCallback = fitDrillMultisense def drillTrackerOff(): om.findObjectByName('Multisense').model.showRevolutionCallback = None def fitPosts(): segmentation.fitVerticalPosts(segmentation.getCurrentRevolutionData()) affordancePanel.onGetRaycastTerrain() ikPlanner.addPostureGoalListener(robotStateJointController) if 'fixedBaseArm' in drcargs.getDirectorConfig()['userConfig']: ikPlanner.fixedBaseArm = True playbackPanel = playbackpanel.init(planPlayback, playbackRobotModel, playbackJointController, robotStateModel, robotStateJointController, manipPlanner) footstepsDriver.walkingPlanCallback = playbackPanel.setPlan manipPlanner.connectPlanReceived(playbackPanel.setPlan) teleopPanel = teleoppanel.init(robotStateModel, robotStateJointController, teleopRobotModel, teleopJointController, ikPlanner, manipPlanner, affordanceManager, playbackPanel.setPlan, playbackPanel.hidePlan) if useGamepad: gamePad = gamepad.Gamepad(teleopPanel, teleopJointController, ikPlanner, view) if useBlackoutText: blackoutMonitor = blackoutmonitor.BlackoutMonitor(robotStateJointController, view, cameraview, mapServerSource) debrisDemo = debrisdemo.DebrisPlannerDemo(robotStateModel, robotStateJointController, playbackRobotModel, ikPlanner, manipPlanner, atlasdriver.driver, lHandDriver, perception.multisenseDriver, refitBlocks) tableDemo = tabledemo.TableDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans, teleopPanel) tableTaskPanel = tabledemo.TableTaskPanel(tableDemo) drillDemo = drilldemo.DrillPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, teleopPanel.showPose, cameraview, segmentationpanel) drillTaskPanel = drilldemo.DrillTaskPanel(drillDemo) valveDemo = valvedemo.ValvePlannerDemo(robotStateModel, footstepsDriver, footstepsPanel, manipPlanner, ikPlanner, lHandDriver, rHandDriver, robotStateJointController) valveTaskPanel = valvedemo.ValveTaskPanel(valveDemo) drivingPlannerPanel = drivingplanner.DrivingPlannerPanel(robotSystem) walkingDemo = walkingtestdemo.walkingTestDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, robotStateJointController, playPlans, showPose) bihandedDemo = bihandeddemo.BihandedPlannerDemo(robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose, cameraview, segmentationpanel) mappingDemo = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans) doorDemo = doordemo.DoorDemo(robotStateModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose) doorTaskPanel = doordemo.DoorTaskPanel(doorDemo) terrainTaskPanel = terraintask.TerrainTaskPanel(robotSystem) terrainTask = terrainTaskPanel.terrainTask surpriseTaskPanel = surprisetask.SurpriseTaskPanel(robotSystem) surpriseTask = surpriseTaskPanel.planner egressPanel = egressplanner.EgressPanel(robotSystem) egressPlanner = egressPanel.egressPlanner taskPanels = OrderedDict() taskPanels['Driving'] = drivingPlannerPanel.widget taskPanels['Egress'] = egressPanel.widget taskPanels['Door'] = doorTaskPanel.widget taskPanels['Valve'] = valveTaskPanel.widget taskPanels['Drill'] = drillTaskPanel.widget taskPanels['Surprise'] = surpriseTaskPanel.widget taskPanels['Terrain'] = terrainTaskPanel.widget taskPanels['Table'] = tableTaskPanel.widget tasklaunchpanel.init(taskPanels) splinewidget.init(view, handFactory, robotStateModel) rt.robotSystem = robotSystem taskManagerPanel = taskmanagerwidget.init() for taskDescription in loadTaskDescriptions(): taskManagerPanel.taskQueueWidget.loadTaskDescription(taskDescription[0], taskDescription[1]) taskManagerPanel.taskQueueWidget.setCurrentQueue('Task library') for obj in om.getObjects(): obj.setProperty('Deletable', False) if useCOPMonitor: copMonitor = copmonitor.COPMonitor(robotSystem, view); if useNavigationPanel: navigationPanel = navigationpanel.init(robotStateJointController, footstepsDriver) picker = PointPicker(view, callback=navigationPanel.pointPickerStoredFootsteps, numberOfPoints=2) #picker.start() continuouswalkingDemo = continuouswalkingdemo.ContinousWalkingDemo(robotStateModel, footstepsPanel, robotStateJointController, ikPlanner, teleopJointController, navigationPanel, cameraview) if useLoggingWidget: w = lcmloggerwidget.LCMLoggerWidget(statusBar=app.getMainWindow().statusBar()) app.getMainWindow().statusBar().addPermanentWidget(w.button) useMappingPanel = True if useMappingPanel: mappingPanel = mappingpanel.init(robotStateJointController, footstepsDriver) if useControllerRate: class ControllerRateLabel(object): ''' Displays a controller frequency in the status bar ''' def __init__(self, atlasDriver, statusBar): self.atlasDriver = atlasDriver self.label = QtGui.QLabel('') statusBar.addPermanentWidget(self.label) self.timer = TimerCallback(targetFps=1) self.timer.callback = self.showRate self.timer.start() def showRate(self): rate = self.atlasDriver.getControllerRate() rate = 'unknown' if rate is None else '%d hz' % rate self.label.text = 'Controller rate: %s' % rate controllerRateLabel = ControllerRateLabel(atlasDriver, app.getMainWindow().statusBar()) if useForceDisplay: class LCMForceDisplay(object): ''' Displays foot force sensor signals in a status bar widget or label widget ''' def onAtlasState(self,msg): self.l_foot_force_z = msg.force_torque.l_foot_force_z self.r_foot_force_z = msg.force_torque.r_foot_force_z def __init__(self, channel, statusBar=None): self.sub = lcmUtils.addSubscriber(channel, lcmdrc.atlas_state_t, self.onAtlasState) self.label = QtGui.QLabel('') statusBar.addPermanentWidget(self.label) self.timer = TimerCallback(targetFps=10) self.timer.callback = self.showRate self.timer.start() self.l_foot_force_z = 0 self.r_foot_force_z = 0 def __del__(self): lcmUtils.removeSubscriber(self.sub) def showRate(self): global leftInContact, rightInContact self.label.text = '%.2f | %.2f' % (self.l_foot_force_z,self.r_foot_force_z) rateComputer = LCMForceDisplay('ATLAS_STATE', app.getMainWindow().statusBar()) if useSkybox: skyboxDataDir = os.path.expanduser('~/Downloads/skybox') imageMap = skybox.getSkyboxImages(skyboxDataDir) skyboxObjs = skybox.createSkybox(imageMap, view) skybox.connectSkyboxCamera(view) #skybox.createTextureGround(os.path.join(skyboxDataDir, 'Dirt_seamless.jpg'), view) #view.camera().SetViewAngle(60) class RobotLinkHighligher(object): def __init__(self, robotModel): self.robotModel = robotModel self.previousColors = {} def highlightLink(self, linkName, color): currentColor = self.robotModel.model.getLinkColor(linkName) if not currentColor.isValid(): return if linkName not in self.previousColors: self.previousColors[linkName] = currentColor alpha = self.robotModel.getProperty('Alpha') newColor = QtGui.QColor(color[0]*255, color[1]*255, color[2]*255, alpha*255) self.robotModel.model.setLinkColor(linkName, newColor) def dehighlightLink(self, linkName): color = self.previousColors.pop(linkName, None) if color is None: return color.setAlpha(self.robotModel.getProperty('Alpha')*255) self.robotModel.model.setLinkColor(linkName, color) robotHighlighter = RobotLinkHighligher(robotStateModel) if useFootContactVis: def onFootContact(msg): leftInContact = msg.left_contact > 0.0 rightInContact = msg.right_contact > 0.0 for linkName, inContact in [['l_foot', msg.left_contact > 0.0], ['r_foot', msg.right_contact > 0.0]]: if inContact: robotHighlighter.highlightLink(linkName, [0, 0, 1]) else: robotHighlighter.dehighlightLink(linkName) #robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['leftFootLink'], contactColor if leftInContact else noContactColor) #robotStateModel.model.setLinkColor(drcargs.getDirectorConfig()['rightFootLink'], contactColor if rightInContact else noContactColor) footContactSub = lcmUtils.addSubscriber('FOOT_CONTACT_ESTIMATE', lcmdrc.foot_contact_estimate_t, onFootContact) footContactSub.setSpeedLimit(60) if useFallDetectorVis: def onPlanStatus(msg): links = ['pelvis', 'utorso'] if msg.plan_type == lcmdrc.plan_status_t.RECOVERING: for link in links: robotHighlighter.highlightLink(link, [1,0.4,0.0]) elif msg.plan_type == lcmdrc.plan_status_t.BRACING: for link in links: robotHighlighter.highlightLink(link, [1, 0, 0]) else: for link in links: robotHighlighter.dehighlightLink(link) fallDetectorSub = lcmUtils.addSubscriber("PLAN_EXECUTION_STATUS", lcmdrc.plan_status_t, onPlanStatus) fallDetectorSub.setSpeedLimit(10) if useDataFiles: for filename in drcargs.args().data_files: polyData = io.readPolyData(filename) if polyData: vis.showPolyData(polyData, os.path.basename(filename)) if useImageWidget: imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'CAMERA_LEFT', view) #imageWidget = cameraview.ImageWidget(cameraview.imageManager, 'KINECT_RGB', view) if useCameraFrustumVisualizer: cameraFrustumVisualizer = cameraview.CameraFrustumVisualizer(robotStateModel, cameraview.imageManager, 'CAMERA_LEFT') class ImageOverlayManager(object): def __init__(self): self.viewName = 'CAMERA_LEFT' #self.viewName = 'KINECT_RGB' self.size = 400 self.position = [0, 0] self.usePicker = False self.imageView = None self.imagePicker = None self._prevParent = None def show(self): if self.imageView: return imageView = cameraview.views[self.viewName] self.imageView = imageView self._prevParent = imageView.view.parent() imageView.view.hide() imageView.view.setParent(view) imageView.view.resize(self.size, self.size) imageView.view.move(*self.position) imageView.view.show() if self.usePicker: self.imagePicker = ImagePointPicker(imageView) self.imagePicker.start() def hide(self): if self.imageView: self.imageView.view.hide() self.imageView.view.setParent(self._prevParent) self.imageView.view.show() self.imageView = None if self.imagePicker: self.imagePicker.stop() class ToggleImageViewHandler(object): def __init__(self, manager): self.action = app.getToolsMenuActions()['ActionToggleImageView'] self.action.connect('triggered()', self.toggle) self.manager = manager def toggle(self): if self.action.checked: self.manager.show() else: self.manager.hide() imageOverlayManager = ImageOverlayManager() imageViewHandler = ToggleImageViewHandler(imageOverlayManager) showImageOverlay = imageOverlayManager.show hideImageOverlay = imageOverlayManager.hide screengrabberpanel.init(view) framevisualization.init(view) affordancePanel = affordancepanel.init(view, affordanceManager, ikServer, robotStateJointController, raycastDriver) camerabookmarks.init(view) def getLinkFrame(linkName, model=None): model = model or robotStateModel return model.getLinkFrame(linkName) def showLinkFrame(linkName, model=None): frame = getLinkFrame(linkName, model) if not frame: raise Exception('Link not found: ' + linkName) return vis.updateFrame(frame, linkName, parent='link frames') def sendEstRobotState(pose=None): if pose is None: pose = robotStateJointController.q msg = robotstate.drakePoseToRobotState(pose) lcmUtils.publish('EST_ROBOT_STATE', msg) def enableArmEncoders(): msg = lcmdrc.utime_t() msg.utime = 1 lcmUtils.publish('ENABLE_ENCODERS', msg) def disableArmEncoders(): msg = lcmdrc.utime_t() msg.utime = -1 lcmUtils.publish('ENABLE_ENCODERS', msg) def sendDesiredPumpPsi(desiredPsi): atlasDriver.sendDesiredPumpPsi(desiredPsi) app.setCameraTerrainModeEnabled(view, True) app.resetCamera(viewDirection=[-1,0,0], view=view) viewBehaviors = viewbehaviors.ViewBehaviors(view) # Drill Demo Functions for in-image rendering: useDrillDemo = False if useDrillDemo: def spawnHandAtCurrentLocation(side='left'): if (side is 'left'): tf = transformUtils.copyFrame( getLinkFrame( 'l_hand_face') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'left') else: tf = transformUtils.copyFrame( getLinkFrame( 'right_pointer_tip') ) handFactory.placeHandModelWithTransform( tf , app.getCurrentView(), 'right') def drawFrameInCamera(t, frameName='new frame',visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) res = vis.showFrame( vtk.vtkTransform() , 'temp',view=v, visible=True, scale = 0.2) om.removeFromObjectModel(res) pd = res.polyData pd = filterUtils.transformPolyData(pd, t) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd ) vis.showPolyData(pd, ('overlay ' + frameName), view=v, colorByName='Axes',parent='camera overlay',visible=visible) def drawObjectInCamera(objectName,visible=True): v = imageView.view q = cameraview.imageManager.queue localToCameraT = vtk.vtkTransform() q.getTransform('local', 'CAMERA_LEFT', localToCameraT) obj = om.findObjectByName(objectName) if obj is None: return objToLocalT = transformUtils.copyFrame(obj.actor.GetUserTransform() or vtk.vtkTransform()) objPolyDataOriginal = obj.polyData pd = objPolyDataOriginal pd = filterUtils.transformPolyData(pd, objToLocalT) pd = filterUtils.transformPolyData(pd, localToCameraT) q.projectPoints('CAMERA_LEFT', pd) vis.showPolyData(pd, ('overlay ' + objectName), view=v, color=[0,1,0],parent='camera overlay',visible=visible) def projectDrillDemoInCamera(): q = om.findObjectByName('camera overlay') om.removeFromObjectModel(q) imageView = cameraview.views['CAMERA_LEFT'] imageView.imageActor.SetOpacity(.2) drawFrameInCamera(drillDemo.drill.frame.transform, 'drill frame',visible=False) tf = transformUtils.copyFrame( drillDemo.drill.frame.transform ) tf.PreMultiply() tf.Concatenate( drillDemo.drill.drillToButtonTransform ) drawFrameInCamera(tf, 'drill button') tf2 = transformUtils.copyFrame( tf ) tf2.PreMultiply() tf2.Concatenate( transformUtils.frameFromPositionAndRPY( [0,0,0] , [180,0,0] ) ) drawFrameInCamera(tf2, 'drill button flip') drawObjectInCamera('drill',visible=False) drawObjectInCamera('sensed pointer tip') obj = om.findObjectByName('sensed pointer tip frame') if (obj is not None): drawFrameInCamera(obj.transform, 'sensed pointer tip frame',visible=False) #drawObjectInCamera('left robotiq',visible=False) #drawObjectInCamera('right pointer',visible=False) v = imageView.view v.render() showImageOverlay() drillDemo.pointerTracker = createPointerTracker() drillDemo.projectCallback = projectDrillDemoInCamera drillYawPreTransform = vtk.vtkTransform() drillYawPreTransform.PostMultiply() def onDrillYawSliderChanged(value): yawOffset = value - 180.0 drillDemo.drillYawSliderValue = yawOffset drillDemo.updateDrillToHand() app.getMainWindow().macrosToolBar().addWidget(QtGui.QLabel('drill yaw:')) slider = QtGui.QSlider(QtCore.Qt.Horizontal) slider.setMaximum(360) slider.setValue(180) slider.setMaximumWidth(200) slider.connect('valueChanged(int)', onDrillYawSliderChanged) app.getMainWindow().macrosToolBar().addWidget(slider) def sendPointerPrep(): drillDemo.planPointerPressGaze(-0.05) def sendPointerPress(): drillDemo.planPointerPressGaze(0.01) def sendPointerPressDeep(): drillDemo.planPointerPressGaze(0.015) app.addToolbarMacro('drill posture', drillDemo.planBothRaisePowerOn) app.addToolbarMacro('pointer prep', sendPointerPrep) app.addToolbarMacro('pointer press', sendPointerPress) app.addToolbarMacro('pointer press deep', sendPointerPressDeep) if usePFGrasp: pfgrasper = pfgrasp.PFGrasp(drillDemo, robotStateModel, playbackRobotModel, teleopRobotModel, footstepsDriver, manipPlanner, ikPlanner, lHandDriver, rHandDriver, atlasdriver.driver, perception.multisenseDriver, fitDrillMultisense, robotStateJointController, playPlans, showPose, cameraview, segmentationpanel) showImageOverlay() hideImageOverlay() pfgrasppanel.init(pfgrasper, _prevParent, imageView, imagePicker, cameraview) import signal def sendMatlabSigint(): ikServer.comm.client.proc.send_signal(signal.SIGINT) #app.addToolbarMacro('Ctrl+C MATLAB', sendMatlabSigint) class AffordanceTextureUpdater(object): def __init__(self, affordanceManager): self.affordanceManager = affordanceManager self.timer = TimerCallback(targetFps=10) self.timer.callback = self.updateTextures self.timer.start() def updateTexture(self, obj): if obj.getProperty('Camera Texture Enabled'): cameraview.applyCameraTexture(obj, cameraview.imageManager) else: cameraview.disableCameraTexture(obj) obj._renderAllViews() def updateTextures(self): for aff in affordanceManager.getAffordances(): self.updateTexture(aff) affordanceTextureUpdater = AffordanceTextureUpdater(affordanceManager) def drawCenterOfMass(model): stanceFrame = footstepsDriver.getFeetMidPoint(model) com = list(model.model.getCenterOfMass()) com[2] = stanceFrame.GetPosition()[2] d = DebugData() d.addSphere(com, radius=0.015) obj = vis.updatePolyData(d.getPolyData(), 'COM %s' % model.getProperty('Name'), color=[1,0,0], visible=False, parent=model) def initCenterOfMassVisulization(): for model in [robotStateModel, teleopRobotModel, playbackRobotModel]: model.connectModelChanged(drawCenterOfMass) drawCenterOfMass(model) initCenterOfMassVisulization() class RobotMoverWidget(object): def __init__(self, jointController): self.jointController = jointController pos, rpy = jointController.q[:3], jointController.q[3:6] t = transformUtils.frameFromPositionAndRPY(pos, np.degrees(rpy)) self.frame = vis.showFrame(t, 'mover widget', scale=0.3) self.frame.setProperty('Edit', True) self.frame.connectFrameModified(self.onFrameModified) def onFrameModified(self, frame): pos, rpy = self.frame.transform.GetPosition(), transformUtils.rollPitchYawFromTransform(self.frame.transform) q = self.jointController.q.copy() q[:3] = pos q[3:6] = rpy self.jointController.setPose('moved_pose', q) class RobotGridUpdater(object): def __init__(self, gridFrame, robotModel, jointController): self.gridFrame = gridFrame self.robotModel = robotModel self.jointController = jointController self.robotModel.connectModelChanged(self.updateGrid) def updateGrid(self, model): pos = self.jointController.q[:3] x = int(np.round(pos[0])) / 10 y = int(np.round(pos[1])) / 10 z = int(np.round(pos[2] - 0.85)) / 1 t = vtk.vtkTransform() t.Translate((x*10,y*10,z)) self.gridFrame.copyFrame(t) gridUpdater = RobotGridUpdater(grid.getChildFrame(), robotStateModel, robotStateJointController) class IgnoreOldStateMessagesSelector(object): def __init__(self, jointController): self.jointController = jointController self.action = app.addMenuAction('Tools', 'Ignore Old State Messages') self.action.setCheckable(True) self.action.setChecked(self.jointController.ignoreOldStateMessages) self.action.connect('triggered()', self.toggle) def toggle(self): self.jointController.ignoreOldStateMessages = bool(self.action.checked) IgnoreOldStateMessagesSelector(robotStateJointController) class RandomWalk(object): def __init__(self, max_distance_per_plan=2): self.subs = [] self.max_distance_per_plan=max_distance_per_plan def handleStatus(self, msg): if msg.plan_type == msg.STANDING: goal = transformUtils.frameFromPositionAndRPY( np.array([robotStateJointController.q[0] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5), robotStateJointController.q[1] + 2 * self.max_distance_per_plan * (np.random.random() - 0.5), robotStateJointController.q[2] - 0.84]), [0, 0, robotStateJointController.q[5] + 2 * np.degrees(np.pi) * (np.random.random() - 0.5)]) request = footstepsDriver.constructFootstepPlanRequest(robotStateJointController.q, goal) request.params.max_num_steps = 18 footstepsDriver.sendFootstepPlanRequest(request) def handleFootstepPlan(self, msg): footstepsDriver.commitFootstepPlan(msg) def start(self): sub = lcmUtils.addSubscriber('PLAN_EXECUTION_STATUS', lcmdrc.plan_status_t, self.handleStatus) sub.setSpeedLimit(0.2) self.subs.append(sub) self.subs.append(lcmUtils.addSubscriber('FOOTSTEP_PLAN_RESPONSE', lcmdrc.footstep_plan_t, self.handleFootstepPlan)) def stop(self): for sub in self.subs: lcmUtils.removeSubscriber(sub) if useRandomWalk: randomWalk = RandomWalk() if useCourseModel: courseModel = coursemodel.CourseModel() if 'useKuka' in drcargs.getDirectorConfig()['userConfig']: import kinectlcm #kinectlcm.init() imageOverlayManager.viewName = "KINECT_RGB" #ikPlanner.fixedBaseArm = True #showImageOverlay() if 'exo' in drcargs.args(): if (drcargs.args().exo): ikPlanner.pushToMatlab = False def roomMap(): mappingPanel.onStartMappingButton() t = mappingdemo.MappingDemo(robotStateModel, playbackRobotModel, ikPlanner, manipPlanner, footstepsDriver, atlasdriver.driver, lHandDriver, rHandDriver, perception.multisenseDriver, view, robotStateJointController, playPlans) t.visOnly = False t.optionalUserPromptEnabled = False q = t.autonomousExecuteRoomMap() q.connectTaskEnded(mappingSweepEnded) q.start() def mappingSweepEnded(taskQ, task): if task.func_name == 'doneIndicator': import time as qq mappingPanel.onStopMappingButton() qq.sleep(3) mappingPanel.onShowMapButton() print "DONE WITH MAPPING ROOM"
edowson/director
src/python/ddapp/startup.py
Python
bsd-3-clause
37,735
[ "VTK" ]
2d44a3a7a64e7ce63b7b34c098246c4bf640e877bd93bad73dfce5337b77ddd6
import os import sys import argparse import roblib __author__ = 'Rob Edwards' def read_blast_file(filename, query=True, evalue=10, bitscore=0): """ Read the blast output file and return a dict of hits that has contig, start, stop. # crAssphage_C NODE_1_length_14386_cov_54.5706_ID_1703 94.64 1157 62 0 82 1238 4975 3819 0.0 1794 1238 14386 Using -outfmt '6 std qlen slen' the columns are: 0: query 1: database 2: percent id 3: alignment length 4: gaps 5: mismatches 6: query start 7: query end 8: database start 9; database end 10: e value 11: bit score 12: query len 13: subject len :param query: Retrieve hits from the query sequence (if false, we'll get them from the database sequence) :type query: bool :param bitscore: minimum bitscore to be included as a hit :type bitscore: int :param evalue: maximum E value to be included as a hit :type evalue: float :param filename: blast output filename (in tab separated text format) :type filename: str :return: dictionary of contigs, starts and stops for all hits :rtype: dict """ if not os.path.exists(filename): sys.exit("{} not found\n".format(filename)) hits = {} with open(filename, 'r') as fin: for l in fin: p = l.strip().split("\t") for i in range(3, len(p)): if i == 2 or i == 10 or i == 11: p[i] = float(p[i]) else: p[i] = int(p[i]) if p[11] < bitscore: continue if p[10] > evalue: continue if query: hitname = p[1] contig, start, end = p[0], p[6], p[7] else: hitname = p[0] contig, start, end = p[1], p[8], p[9] if contig not in hits: hits[contig] = [] rc = False if start > end: start, end, rc = end, start, True start -= 1 else: start -= 1 hits[contig].append((start, end, rc, hitname)) return hits def extract_sequences(fastafile, hits, addhitname=False): """ Extract the sequences from a fasta file :param fastafile: The fasta file to get the sequences from :type fastafile: str :param hits: The dict of hits using contig, start, end :type hits: dict :return: A dict of the sequences with contig_start_end as ID and sequence as value :rtype: dict """ sequences = {} if not os.path.exists(fastafile): sys.exit("{} not found\n".format(fastafile)) fa = roblib.read_fasta(fastafile) for contig in hits: if contig not in fa: sys.stderr.write("WARNING: {} was not found in {}\n".format(contig, fastafile)) for tple in hits[contig]: seq = fa[contig][tple[0]:tple[1]] if tple[2]: seq = roblib.rc(seq) loc = "_".join(map(str, [contig, tple[0]+1, tple[1]])) if addhitname: loc += " [hit={}]".format(tple[3]) sequences[loc] = seq return sequences if __name__ == "__main__": parser = argparse.ArgumentParser(description='extract sequences based on blast hits') parser.add_argument('-b', help='blast output file', required=True) parser.add_argument('-f', help='fasta file', required=True) parser.add_argument('-d', help='use database sequences (default: query sequences', action="store_true") parser.add_argument('-e', help='Maximum evalue (default = 10)', type=float) parser.add_argument('-s', help='Minimum bit score (default = 0)', type=float) parser.add_argument('-i', help='Add database (query) ID to output', action='store_true', default=False) args = parser.parse_args() usequery = not args.d useeval = 10 usebits = 0 if args.e: useeval = args.e if args.s: usebits = args.s blasthits = read_blast_file(args.b, query=usequery, evalue=useeval, bitscore=usebits) blastseqs = extract_sequences(args.f, blasthits, args.i) for i in blastseqs: print(">{}\n{}".format(i, blastseqs[i]))
linsalrob/EdwardsLab
blast/blast_to_sequences.py
Python
mit
4,322
[ "BLAST" ]
c379d34121aa7607c7cf5be12ebff6a0cdb34bc48175f5663eff74037869bed5
#!/usr/bin/env python ######################################################################## # File : dirac-proxy-init.py # Author : Adrian Casajus ######################################################################## import os import sys import glob import time import datetime import DIRAC from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.Core.Base import Script from DIRAC.FrameworkSystem.Client import ProxyGeneration, ProxyUpload from DIRAC.Core.Security import X509Chain, ProxyInfo, Properties, VOMS from DIRAC.ConfigurationSystem.Client.Helpers import Registry from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient __RCSID__ = "$Id$" class Params( ProxyGeneration.CLIParams ): uploadProxy = False uploadPilot = False addVOMSExt = False def setUploadProxy( self, _arg ): self.uploadProxy = True return S_OK() def setUploadPilotProxy( self, _arg ): self.uploadPilot = True return S_OK() def setVOMSExt( self, _arg ): self.addVOMSExt = True return S_OK() def registerCLISwitches( self ): ProxyGeneration.CLIParams.registerCLISwitches( self ) Script.registerSwitch( "U", "upload", "Upload a long lived proxy to the ProxyManager", self.setUploadProxy ) Script.registerSwitch( "P", "uploadPilot", "Upload a long lived pilot proxy to the ProxyManager", self.setUploadPilotProxy ) Script.registerSwitch( "M", "VOMS", "Add voms extension", self.setVOMSExt ) class ProxyInit( object ): def __init__( self, piParams ): self.__piParams = piParams self.__issuerCert = False self.__proxyGenerated = False self.__uploadedInfo = {} def getIssuerCert( self ): if self.__issuerCert: return self.__issuerCert proxyChain = X509Chain.X509Chain() resultProxyChainFromFile = proxyChain.loadChainFromFile( self.__piParams.certLoc ) if not resultProxyChainFromFile[ 'OK' ]: gLogger.error( "Could not load the proxy: %s" % resultProxyChainFromFile[ 'Message' ] ) sys.exit( 1 ) resultIssuerCert = proxyChain.getIssuerCert() if not resultIssuerCert[ 'OK' ]: gLogger.error( "Could not load the proxy: %s" % resultIssuerCert[ 'Message' ] ) sys.exit( 1 ) self.__issuerCert = resultIssuerCert[ 'Value' ] return self.__issuerCert def certLifeTimeCheck( self ): minLife = Registry.getGroupOption( self.__piParams.diracGroup, "SafeCertificateLifeTime", 2592000 ) resultIssuerCert = self.getIssuerCert() resultRemainingSecs = resultIssuerCert.getRemainingSecs() #pylint: disable=no-member if not resultRemainingSecs[ 'OK' ]: gLogger.error( "Could not retrieve certificate expiration time", resultRemainingSecs[ 'Message' ] ) return lifeLeft = resultRemainingSecs[ 'Value' ] if minLife > lifeLeft: daysLeft = int( lifeLeft / 86400 ) msg = "Your certificate will expire in less than %d days. Please renew it!" % daysLeft sep = "=" * ( len( msg ) + 4 ) msg = "%s\n %s \n%s" % ( sep, msg, sep ) gLogger.notice( msg ) def getGroupsToUpload( self ): uploadGroups = [] if self.__piParams.uploadProxy or Registry.getGroupOption( self.__piParams.diracGroup, "AutoUploadProxy", False ): uploadGroups.append( self.__piParams.diracGroup ) if not self.__piParams.uploadPilot: if not Registry.getGroupOption( self.__piParams.diracGroup, "AutoUploadPilotProxy", False ): return uploadGroups issuerCert = self.getIssuerCert() resultUserDN = issuerCert.getSubjectDN() #pylint: disable=no-member if not resultUserDN['OK']: return resultUserDN userDN = resultUserDN[ 'Value' ] resultGroups = Registry.getGroupsForDN( userDN ) if not resultGroups[ 'OK' ]: gLogger.error( "No groups defined for DN %s" % userDN ) return [] availableGroups = resultGroups[ 'Value' ] for group in availableGroups: groupProps = Registry.getPropertiesForGroup( group ) if Properties.PILOT in groupProps or Properties.GENERIC_PILOT in groupProps: uploadGroups.append( group ) return uploadGroups def addVOMSExtIfNeeded( self ): addVOMS = self.__piParams.addVOMSExt or Registry.getGroupOption( self.__piParams.diracGroup, "AutoAddVOMS", False ) if not addVOMS: return S_OK() vomsAttr = Registry.getVOMSAttributeForGroup( self.__piParams.diracGroup ) if not vomsAttr: return S_ERROR( "Requested adding a VOMS extension but no VOMS attribute defined for group %s" % self.__piParams.diracGroup ) resultVomsAttributes = VOMS.VOMS().setVOMSAttributes( self.__proxyGenerated, attribute = vomsAttr, vo = Registry.getVOMSVOForGroup( self.__piParams.diracGroup ) ) if not resultVomsAttributes[ 'OK' ]: return S_ERROR( "Could not add VOMS extensions to the proxy\nFailed adding VOMS attribute: %s" % resultVomsAttributes[ 'Message' ] ) gLogger.notice( "Added VOMS attribute %s" % vomsAttr ) chain = resultVomsAttributes['Value'] chain.dumpAllToFile( self.__proxyGenerated ) return S_OK() def createProxy( self ): """ Creates the proxy on disk """ gLogger.notice( "Generating proxy..." ) resultProxyGenerated = ProxyGeneration.generateProxy( piParams ) if not resultProxyGenerated[ 'OK' ]: gLogger.error( resultProxyGenerated[ 'Message' ] ) sys.exit( 1 ) self.__proxyGenerated = resultProxyGenerated[ 'Value' ] return resultProxyGenerated def uploadProxy( self, userGroup = False ): """ Upload the proxy to the proxyManager service """ issuerCert = self.getIssuerCert() resultUserDN = issuerCert.getSubjectDN() #pylint: disable=no-member if not resultUserDN['OK']: return resultUserDN userDN = resultUserDN['Value'] if not userGroup: userGroup = self.__piParams.diracGroup gLogger.notice( "Uploading proxy for %s..." % userGroup ) if userDN in self.__uploadedInfo: expiry = self.__uploadedInfo[ userDN ].get( userGroup ) if expiry: if issuerCert.getNotAfterDate()[ 'Value' ] - datetime.timedelta( minutes = 10 ) < expiry: #pylint: disable=no-member gLogger.info( "SKipping upload for group %s. Already uploaded" % userGroup ) return S_OK() gLogger.info( "Uploading %s proxy to ProxyManager..." % self.__piParams.diracGroup ) upParams = ProxyUpload.CLIParams() upParams.onTheFly = True upParams.proxyLifeTime = issuerCert.getRemainingSecs()[ 'Value' ] - 300 #pylint: disable=no-member upParams.rfcIfPossible = self.__piParams.rfc upParams.diracGroup = userGroup for k in ( 'certLoc', 'keyLoc', 'userPasswd' ): setattr( upParams, k , getattr( self.__piParams, k ) ) resultProxyUpload = ProxyUpload.uploadProxy( upParams ) if not resultProxyUpload[ 'OK' ]: gLogger.error( resultProxyUpload[ 'Message' ] ) sys.exit( 1 ) self.__uploadedInfo = resultProxyUpload[ 'Value' ] gLogger.info( "Proxy uploaded" ) return S_OK() def printInfo( self ): """ Printing utilities """ resultProxyInfoAsAString = ProxyInfo.getProxyInfoAsString( self.__proxyGenerated ) if not resultProxyInfoAsAString['OK']: gLogger.error( 'Failed to get the new proxy info: %s' % resultProxyInfoAsAString['Message'] ) else: gLogger.notice( "Proxy generated:" ) gLogger.notice( resultProxyInfoAsAString[ 'Value' ] ) if self.__uploadedInfo: gLogger.notice( "\nProxies uploaded:" ) maxDNLen = 0 maxGroupLen = 0 for userDN in self.__uploadedInfo: maxDNLen = max( maxDNLen, len( userDN ) ) for group in self.__uploadedInfo[ userDN ]: maxGroupLen = max( maxGroupLen, len( group ) ) gLogger.notice( " %s | %s | Until (GMT)" % ( "DN".ljust( maxDNLen ), "Group".ljust( maxGroupLen ) ) ) for userDN in self.__uploadedInfo: for group in self.__uploadedInfo[ userDN ]: gLogger.notice( " %s | %s | %s" % ( userDN.ljust( maxDNLen ), group.ljust( maxGroupLen ), self.__uploadedInfo[ userDN ][ group ].strftime( "%Y/%m/%d %H:%M" ) ) ) def checkCAs( self ): if not "X509_CERT_DIR" in os.environ: gLogger.warn( "X509_CERT_DIR is unset. Abort check of CAs" ) return caDir = os.environ[ "X509_CERT_DIR" ] # In globus standards .r0 files are CRLs. They have the same names of the CAs but diffent file extension searchExp = os.path.join( caDir, "*.r0" ) crlList = glob.glob( searchExp ) if not crlList: gLogger.warn( "No CRL files found for %s. Abort check of CAs" % searchExp ) return newestFPath = max( crlList, key=os.path.getmtime ) newestFTime = os.path.getmtime( newestFPath ) if newestFTime > ( time.time() - ( 2 * 24 * 3600 ) ): # At least one of the files has been updated in the last 2 days return S_OK() if not os.access(caDir, os.W_OK): gLogger.error("Your CRLs appear to be outdated, but you have no access to update them.") # Try to continue anyway... return S_OK() # Update the CAs & CRLs gLogger.notice( "Your CRLs appear to be outdated; attempting to update them..." ) bdc = BundleDeliveryClient() res = bdc.syncCAs() if not res[ 'OK' ]: gLogger.error( "Failed to update CAs", res[ 'Message' ] ) res = bdc.syncCRLs() if not res[ 'OK' ]: gLogger.error( "Failed to update CRLs", res[ 'Message' ] ) # Continue even if the update failed... return S_OK() def doTheMagic( self ): proxy = self.createProxy() if not proxy[ 'OK' ]: return proxy self.checkCAs() pI.certLifeTimeCheck() resultProxyWithVOMS = pI.addVOMSExtIfNeeded() if not resultProxyWithVOMS[ 'OK' ]: if "returning a valid AC for the user" in resultProxyWithVOMS['Message']: gLogger.error( resultProxyWithVOMS[ 'Message' ] ) gLogger.error("\n Are you sure you are properly registered in the VO?") elif "Missing voms-proxy" in resultProxyWithVOMS['Message']: gLogger.notice( "Failed to add VOMS extension: no standard grid interface available" ) else: gLogger.error( resultProxyWithVOMS['Message'] ) if self.__piParams.strict: return resultProxyWithVOMS for pilotGroup in pI.getGroupsToUpload(): resultProxyUpload = pI.uploadProxy( userGroup = pilotGroup ) if not resultProxyUpload[ 'OK' ]: if self.__piParams.strict: return resultProxyUpload return S_OK() if __name__ == "__main__": piParams = Params() piParams.registerCLISwitches() Script.disableCS() Script.parseCommandLine( ignoreErrors = True ) DIRAC.gConfig.setOptionValue( "/DIRAC/Security/UseServerCertificate", "False" ) pI = ProxyInit( piParams ) resultDoTheMagic = pI.doTheMagic() if not resultDoTheMagic[ 'OK' ]: gLogger.fatal( resultDoTheMagic[ 'Message' ] ) sys.exit( 1 ) pI.printInfo() sys.exit( 0 )
andresailer/DIRAC
FrameworkSystem/scripts/dirac-proxy-init.py
Python
gpl-3.0
11,024
[ "DIRAC" ]
b9a82f417ad5cbe8c62c51a5ed8384763fd7b9360dd0c457772b6239d4eff790
# detection anomalies from SDSS using global PCA method from ex import * from feature import GetFeatures import sdss_info as sinfo from detector import * import report def usage(): print(''' get the anomaly scores using global pca method python --feature --scorer [--nproc={number of parallel processes}] ''') sys.exit(1) custom_flags={ 1: 'star', 2: 'galaxy', 3: 'quasar' } if __name__ == '__main__': InitLog() opts=getopt(sys.argv[1:], ['nproc=', 'feature=', 'scorer=']) nproc=int(opts.get('--nproc', 1)) feature_names=opts.get('--feature', 'Spectrum') scorer=opts.get('--scorer', 'pca:accum_err:0.98').lower() output_dir='./detection_point/' MakeDir(output_dir) tag="[{0}][{1}]".format(feature_names, scorer) log.info('Run name: {0}'.format(tag)) scorer, method, param = scorer.split(':')[:3] # get the feature feature, info = GetFeatures(feature_names, nproc = nproc) # scoring if scorer == 'pca': E = float(param) scores = PCAAnomalyScore(feature, feature, E, method) elif scorer == 'knn': K = int(param) scores = KNNAnomalyScore(feature, feature, K, method, nproc) elif scorer == 'mmf': rk = int(param) scores = MMFAnomalyScore(feature, feature, rk, method)[0] else: raise ValueError('unknown scorer') info['scores'] = scores # add the run id info['run_id']= sinfo.GetDetectionRunID(feature_names, scorer+'_'+method, custom_flags[info['spec_cln'][0]]) output_file="{0}/score_{1}.pkl".format(output_dir, tag) SavePickle(output_file, info) # write the report html_an, html_all=report.GenReportIndividual( info['specObjID'], info['scores'], info['rdz']) SaveText('{0}/report_point_{1}_abnormal.html'.format(output_dir,tag), html_an) SaveText('{0}/report_point_{1}_all.html'.format(output_dir,tag), html_all)
excelly/xpy-ml
sdss/detection/detection_point.py
Python
apache-2.0
1,920
[ "Galaxy" ]
a6de0e1687a3a314a9ba541ab0f011055fe03bc08191aa1924425974e896f118
#!/usr/bin/python """This script run the orf prediction """ try: import sys, re, csv, traceback from os import path, _exit, rename import logging.handlers from optparse import OptionParser, OptionGroup from libs.python_modules.utils.sysutil import pathDelim from libs.python_modules.utils.metapathways_utils import fprintf, printf, eprintf, exit_process from libs.python_modules.utils.sysutil import getstatusoutput from libs.python_modules.utils.pathwaytoolsutils import * from libs.python_modules.utils.errorcodes import error_message, get_error_list, insert_error except: print """ Could not load some user defined module functions""" print """ Make sure your typed 'source MetaPathwaysrc'""" print """ """ print traceback.print_exc(10) sys.exit(3) PATHDELIM=pathDelim() def fprintf(file, fmt, *args): file.write(fmt % args) def printf(fmt, *args): sys.stdout.write(fmt % args) def files_exist( files , errorlogger = None): status = True for file in files: if not path.exists(file): if errorlogger: errorlogger.write( 'ERROR\tCould not find ptools input file : ' + file ) status = False return not status usage = sys.argv[0] + """ -i input -o output [algorithm dependent options]""" parser = None def createParser(): global parser epilog = """This script is used for running a homology search algorithm such as BLAST or LAST on a set of query amino acid sequences against a target of reference protein sequences. Currently it supports the BLASTP and LAST algorithm. Any other homology search algorithm can be added by first adding the new algorithm name in upper caseusing in to the choices parameter in the algorithm option of this script. The results are put in a tabular form in the folder blast_results, with individual files for each of the databases. The files are named as "<samplename>.<dbname>.<algorithm>out" In the case of large number of amino acid sequences, this step of the computation can be also done using multiple grids (to use batch processing system) """ epilog = re.sub(r'\s+',' ', epilog) parser = OptionParser(usage=usage, epilog=epilog) # Input options parser.add_option('--algorithm', dest='algorithm', default='BLAST', choices = ['BLAST', 'LAST'], help='the homology search algorithm') blast_group = OptionGroup(parser, 'BLAST parameters') blast_group.add_option('--blast_query', dest='blast_query', default=None, help='Query amino acid sequences for BLASTP') blast_group.add_option('--blast_db', dest='blast_db', default=None, help='Target reference database sequenes for BLASTP') blast_group.add_option('--blast_out', dest='blast_out', default=None, help='BLAST output file') blast_group.add_option('--blast_outfmt', dest='blast_outfmt', default='6', help='BLASTP output format [default 6, tabular]') blast_group.add_option('--blast_evalue', dest='blast_evalue', default=None, help='The e-value cutoff for the BLASTP') blast_group.add_option('--num_threads', dest='num_threads', default='1', type='str', help='Number of BLAST threads') blast_group.add_option('--blast_max_target_seqs', dest='blast_max_target_seqs', default=None, help='Maximum number of target hits per query') blast_group.add_option('--blast_executable', dest='blast_executable', default=None, help='The BLASTP executable') blast_group.add_option('--num_hits', dest='num_hits', default='10', type='str', help='The BLASTP executable') parser.add_option_group(blast_group) last_group = OptionGroup(parser, 'LAST parameters') last_group.add_option('--last_query', dest='last_query', default=None, help='Query amino acid sequences for LAST') last_group.add_option('--last_db', dest='last_db', default=None, help='Target reference database sequenes for LAST') last_group.add_option('--last_f', dest='last_f', default='0', help='LAST output format [default 0, tabular]') last_group.add_option('--last_o', dest='last_o', default=None, help='LAST output file') last_group.add_option('--last_executable', dest='last_executable', default=None, help='The LAST executable') parser.add_option_group(last_group) def main(argv, errorlogger = None, runcommand = None, runstatslogger = None): global parser options, args = parser.parse_args(argv) if options.algorithm == 'BLAST': (code, message) = _execute_BLAST(options, logger = errorlogger) elif options.algorithm == 'LAST': (code, message) = _execute_LAST(options, logger = errorlogger) else: eprintf("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n") if errorlogger: errorlogger.printf("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n") #exit_process("ERROR\tUnrecognized algorithm name for FUNC_SEARCH\n") return -1 if code != 0: a= '\nERROR\tCannot successfully execute the %s for FUNC_SEARCH\n' %(options.algorithm) b ='ERROR\t%s\n' % (message) c = "INFO\tDatabase you are searching against may not be formatted correctly (if it was formatted for an earlier version) \n" d = "INFO\tTry removing the files for that database in \'formatted\' subfolder for MetaPathways to trigger reformatting \n" if options.algorithm == 'BLAST': e = "INFO\tYou can remove as \'rm %s.*\','\n" %( options.blast_db) if options.algorithm == 'LAST': e = "INFO\tYou can remove as \'rm %s.*\','\n" %( options.last_db) (code, message) = _execute_LAST(options, logger = errorlogger) f = "INFO\tIf removing the files did not work then format it manually (see manual)" outputStr = a + b + c + d + e + f eprintf(outputStr + "\n") if errorlogger: errorlogger.printf(outputStr +"\n") return code return 0 def _execute_LAST(options, logger = None): args= [ ] if options.last_executable : args.append( options.last_executable ) if options.last_f: args += [ "-f", options.last_f ] if options.last_o: args += [ "-o", options.last_o + ".tmp"] if options.num_threads: args += [ "-P", options.num_threads] args += [ " -K", options.num_hits] if options.last_db: args += [ options.last_db ] if options.last_query: args += [ options.last_query ] result =None print ' '.join(args) try: result = getstatusoutput(' '.join(args) ) rename(options.last_o + ".tmp", options.last_o) except: message = "Could not run LASTAL correctly" if result and len(result) > 1: message = result[1] if logger: logger.printf("ERROR\t%s\n", message) return (1, message) return (result[0], result[1]) def _execute_BLAST(options, logger = None): args= [ ] if options.blast_executable : args.append( options.blast_executable ) if options.blast_max_target_seqs: args +=["-max_target_seqs", options.blast_max_target_seqs] if options.num_threads: args += [ "-num_threads", options.num_threads ] if options.blast_outfmt: args += [ "-outfmt", options.blast_outfmt ] if options.blast_db: args += [ "-db", options.blast_db ] if options.blast_query: args += [ "-query", options.blast_query ] if options.blast_evalue: args += [ "-evalue", options.blast_evalue ] if options.blast_out: args += [ "-out", options.blast_out + ".tmp" ] try: result = getstatusoutput(' '.join(args) ) rename(options.blast_out + ".tmp", options.blast_out) except: return (1, "Cannot execute BLAST successfully") return (result[0], result[1]) def MetaPathways_func_search(argv, extra_command = None, errorlogger = None, runstatslogger =None): if errorlogger != None: errorlogger.write("#STEP\tFUNC_SEARCH\n") createParser() try: code = main(argv, errorlogger = errorlogger, runcommand= extra_command, runstatslogger = runstatslogger) except: insert_error(4) return (0,'') return (0,'') if __name__ == '__main__': createParser() main(sys.argv[1:])
wholebiome/MetaPathways_Python_Koonkie.3.0
libs/python_scripts/MetaPathways_func_search.py
Python
mit
8,808
[ "BLAST" ]
6f03825d20515ad6a9b86a009566ea32ca590b98fce3a12324f27896d8278098
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Create the RenderWindow, Renderer and both Actors # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create a soccer ball # points = vtk.vtkPoints() # first point repeated because polygons were 1-offset points.InsertNextPoint(0.348012,0,0.93749) points.InsertNextPoint(0.348012,0,0.93749) points.InsertNextPoint(0.107542,0.330979,0.93749) points.InsertNextPoint(-0.281548,0.204556,0.93749) points.InsertNextPoint(-0.281548,-0.204556,0.93749) points.InsertNextPoint(0.107542,-0.330979,0.93749) points.InsertNextPoint(0.694318,0,0.719669) points.InsertNextPoint(0.799191,-0.327801,0.502204) points.InsertNextPoint(0.965027,-0.20654,0.154057) points.InsertNextPoint(0.965027,0.20654,0.154057) points.InsertNextPoint(0.799191,0.327801,0.502204) points.InsertNextPoint(0.214556,0.660335,0.719669) points.InsertNextPoint(0.558721,0.65878,0.502204) points.InsertNextPoint(0.494641,0.853971,0.154057) points.InsertNextPoint(0.101778,0.981619,0.154057) points.InsertNextPoint(-0.0647933,0.861372,0.502204) points.InsertNextPoint(-0.561715,0.40811,0.719669) points.InsertNextPoint(-0.453883,0.734949,0.502204) points.InsertNextPoint(-0.659322,0.734323,0.154057) points.InsertNextPoint(-0.902124,0.400134,0.154057) points.InsertNextPoint(-0.839236,0.204556,0.502204) points.InsertNextPoint(-0.561715,-0.40811,0.719669) points.InsertNextPoint(-0.839236,-0.204556,0.502204) points.InsertNextPoint(-0.902124,-0.400134,0.154057) points.InsertNextPoint(-0.659322,-0.734323,0.154057) points.InsertNextPoint(-0.453883,-0.734949,0.502204) points.InsertNextPoint(0.214556,-0.660335,0.719669) points.InsertNextPoint(-0.0647933,-0.861372,0.502204) points.InsertNextPoint(0.101778,-0.981619,0.154057) points.InsertNextPoint(0.494641,-0.853971,0.154057) points.InsertNextPoint(0.558721,-0.65878,0.502204) points.InsertNextPoint(0.902124,0.400134,-0.154057) points.InsertNextPoint(0.839236,0.204556,-0.502204) points.InsertNextPoint(0.561715,0.40811,-0.719669) points.InsertNextPoint(0.453883,0.734949,-0.502204) points.InsertNextPoint(0.659322,0.734323,-0.154057) points.InsertNextPoint(-0.101778,0.981619,-0.154057) points.InsertNextPoint(0.0647933,0.861372,-0.502204) points.InsertNextPoint(-0.214556,0.660335,-0.719669) points.InsertNextPoint(-0.558721,0.65878,-0.502204) points.InsertNextPoint(-0.494641,0.853971,-0.154057) points.InsertNextPoint(-0.965027,0.20654,-0.154057) points.InsertNextPoint(-0.799191,0.327801,-0.502204) points.InsertNextPoint(-0.694318,0,-0.719669) points.InsertNextPoint(-0.799191,-0.327801,-0.502204) points.InsertNextPoint(-0.965027,-0.20654,-0.154057) points.InsertNextPoint(-0.494641,-0.853971,-0.154057) points.InsertNextPoint(-0.558721,-0.65878,-0.502204) points.InsertNextPoint(-0.214556,-0.660335,-0.719669) points.InsertNextPoint(0.0647933,-0.861372,-0.502204) points.InsertNextPoint(-0.101778,-0.981619,-0.154057) points.InsertNextPoint(0.659322,-0.734323,-0.154057) points.InsertNextPoint(0.453883,-0.734949,-0.502204) points.InsertNextPoint(0.561715,-0.40811,-0.719669) points.InsertNextPoint(0.839236,-0.204556,-0.502204) points.InsertNextPoint(0.902124,-0.400134,-0.154057) points.InsertNextPoint(0.281548,-0.204556,-0.93749) points.InsertNextPoint(-0.107542,-0.330979,-0.93749) points.InsertNextPoint(-0.348012,0,-0.93749) points.InsertNextPoint(-0.107542,0.330979,-0.93749) points.InsertNextPoint(0.281548,0.204556,-0.93749) faces = vtk.vtkCellArray() faces.InsertNextCell(5) faces.InsertCellPoint(5) faces.InsertCellPoint(4) faces.InsertCellPoint(3) faces.InsertCellPoint(2) faces.InsertCellPoint(1) faces.InsertNextCell(5) faces.InsertCellPoint(10) faces.InsertCellPoint(9) faces.InsertCellPoint(8) faces.InsertCellPoint(7) faces.InsertCellPoint(6) faces.InsertNextCell(5) faces.InsertCellPoint(15) faces.InsertCellPoint(14) faces.InsertCellPoint(13) faces.InsertCellPoint(12) faces.InsertCellPoint(11) faces.InsertNextCell(5) faces.InsertCellPoint(20) faces.InsertCellPoint(19) faces.InsertCellPoint(18) faces.InsertCellPoint(17) faces.InsertCellPoint(16) faces.InsertNextCell(5) faces.InsertCellPoint(25) faces.InsertCellPoint(24) faces.InsertCellPoint(23) faces.InsertCellPoint(22) faces.InsertCellPoint(21) faces.InsertNextCell(5) faces.InsertCellPoint(30) faces.InsertCellPoint(29) faces.InsertCellPoint(28) faces.InsertCellPoint(27) faces.InsertCellPoint(26) faces.InsertNextCell(5) faces.InsertCellPoint(35) faces.InsertCellPoint(34) faces.InsertCellPoint(33) faces.InsertCellPoint(32) faces.InsertCellPoint(31) faces.InsertNextCell(5) faces.InsertCellPoint(40) faces.InsertCellPoint(39) faces.InsertCellPoint(38) faces.InsertCellPoint(37) faces.InsertCellPoint(36) faces.InsertNextCell(5) faces.InsertCellPoint(45) faces.InsertCellPoint(44) faces.InsertCellPoint(43) faces.InsertCellPoint(42) faces.InsertCellPoint(41) faces.InsertNextCell(5) faces.InsertCellPoint(50) faces.InsertCellPoint(49) faces.InsertCellPoint(48) faces.InsertCellPoint(47) faces.InsertCellPoint(46) faces.InsertNextCell(5) faces.InsertCellPoint(55) faces.InsertCellPoint(54) faces.InsertCellPoint(53) faces.InsertCellPoint(52) faces.InsertCellPoint(51) faces.InsertNextCell(5) faces.InsertCellPoint(60) faces.InsertCellPoint(59) faces.InsertCellPoint(58) faces.InsertCellPoint(57) faces.InsertCellPoint(56) faces.InsertNextCell(6) faces.InsertCellPoint(2) faces.InsertCellPoint(11) faces.InsertCellPoint(12) faces.InsertCellPoint(10) faces.InsertCellPoint(6) faces.InsertCellPoint(1) faces.InsertNextCell(6) faces.InsertCellPoint(3) faces.InsertCellPoint(16) faces.InsertCellPoint(17) faces.InsertCellPoint(15) faces.InsertCellPoint(11) faces.InsertCellPoint(2) faces.InsertNextCell(6) faces.InsertCellPoint(4) faces.InsertCellPoint(21) faces.InsertCellPoint(22) faces.InsertCellPoint(20) faces.InsertCellPoint(16) faces.InsertCellPoint(3) faces.InsertNextCell(6) faces.InsertCellPoint(5) faces.InsertCellPoint(26) faces.InsertCellPoint(27) faces.InsertCellPoint(25) faces.InsertCellPoint(21) faces.InsertCellPoint(4) faces.InsertNextCell(6) faces.InsertCellPoint(1) faces.InsertCellPoint(6) faces.InsertCellPoint(7) faces.InsertCellPoint(30) faces.InsertCellPoint(26) faces.InsertCellPoint(5) faces.InsertNextCell(6) faces.InsertCellPoint(12) faces.InsertCellPoint(13) faces.InsertCellPoint(35) faces.InsertCellPoint(31) faces.InsertCellPoint(9) faces.InsertCellPoint(10) faces.InsertNextCell(6) faces.InsertCellPoint(17) faces.InsertCellPoint(18) faces.InsertCellPoint(40) faces.InsertCellPoint(36) faces.InsertCellPoint(14) faces.InsertCellPoint(15) faces.InsertNextCell(6) faces.InsertCellPoint(22) faces.InsertCellPoint(23) faces.InsertCellPoint(45) faces.InsertCellPoint(41) faces.InsertCellPoint(19) faces.InsertCellPoint(20) faces.InsertNextCell(6) faces.InsertCellPoint(27) faces.InsertCellPoint(28) faces.InsertCellPoint(50) faces.InsertCellPoint(46) faces.InsertCellPoint(24) faces.InsertCellPoint(25) faces.InsertNextCell(6) faces.InsertCellPoint(7) faces.InsertCellPoint(8) faces.InsertCellPoint(55) faces.InsertCellPoint(51) faces.InsertCellPoint(29) faces.InsertCellPoint(30) faces.InsertNextCell(6) faces.InsertCellPoint(9) faces.InsertCellPoint(31) faces.InsertCellPoint(32) faces.InsertCellPoint(54) faces.InsertCellPoint(55) faces.InsertCellPoint(8) faces.InsertNextCell(6) faces.InsertCellPoint(14) faces.InsertCellPoint(36) faces.InsertCellPoint(37) faces.InsertCellPoint(34) faces.InsertCellPoint(35) faces.InsertCellPoint(13) faces.InsertNextCell(6) faces.InsertCellPoint(19) faces.InsertCellPoint(41) faces.InsertCellPoint(42) faces.InsertCellPoint(39) faces.InsertCellPoint(40) faces.InsertCellPoint(18) faces.InsertNextCell(6) faces.InsertCellPoint(24) faces.InsertCellPoint(46) faces.InsertCellPoint(47) faces.InsertCellPoint(44) faces.InsertCellPoint(45) faces.InsertCellPoint(23) faces.InsertNextCell(6) faces.InsertCellPoint(29) faces.InsertCellPoint(51) faces.InsertCellPoint(52) faces.InsertCellPoint(49) faces.InsertCellPoint(50) faces.InsertCellPoint(28) faces.InsertNextCell(6) faces.InsertCellPoint(32) faces.InsertCellPoint(33) faces.InsertCellPoint(60) faces.InsertCellPoint(56) faces.InsertCellPoint(53) faces.InsertCellPoint(54) faces.InsertNextCell(6) faces.InsertCellPoint(37) faces.InsertCellPoint(38) faces.InsertCellPoint(59) faces.InsertCellPoint(60) faces.InsertCellPoint(33) faces.InsertCellPoint(34) faces.InsertNextCell(6) faces.InsertCellPoint(42) faces.InsertCellPoint(43) faces.InsertCellPoint(58) faces.InsertCellPoint(59) faces.InsertCellPoint(38) faces.InsertCellPoint(39) faces.InsertNextCell(6) faces.InsertCellPoint(47) faces.InsertCellPoint(48) faces.InsertCellPoint(57) faces.InsertCellPoint(58) faces.InsertCellPoint(43) faces.InsertCellPoint(44) faces.InsertNextCell(6) faces.InsertCellPoint(52) faces.InsertCellPoint(53) faces.InsertCellPoint(56) faces.InsertCellPoint(57) faces.InsertCellPoint(48) faces.InsertCellPoint(49) faceColors = vtk.vtkFloatArray() faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(1) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) faceColors.InsertNextValue(2) vertexColors = vtk.vtkFloatArray() vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) vertexColors.InsertNextValue(2) model = vtk.vtkPolyData() model.SetPolys(faces) model.SetPoints(points) model.GetCellData().SetScalars(faceColors) model.GetPointData().SetScalars(vertexColors) ballTC = vtk.vtkTextureMapToSphere() ballTC.SetInputData(model) lut = vtk.vtkLookupTable() lut.SetNumberOfColors(3) lut.Build() lut.SetTableValue(0,0,0,0,0) lut.SetTableValue(1,1,.3,.3,1) lut.SetTableValue(2,.8,.8,.9,1) mapper = vtk.vtkDataSetMapper() mapper.SetInputConnection(ballTC.GetOutputPort()) mapper.SetScalarModeToUseCellData() mapper.SetLookupTable(lut) mapper.SetScalarRange(0,2) earth = vtk.vtkPNMReader() earth.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/earth.ppm") texture = vtk.vtkTexture() texture.SetInputConnection(earth.GetOutputPort()) soccerBall = vtk.vtkActor() soccerBall.SetMapper(mapper) soccerBall.SetTexture(texture) # Add the actors to the renderer, set the background and size # ren1.AddActor(soccerBall) ren1.SetBackground(0.1,0.2,0.4) renWin.SetSize(300,300) ren1.GetActiveCamera().SetPosition(4.19682,4.65178,6.23545) ren1.GetActiveCamera().SetFocalPoint(0,0,0) ren1.GetActiveCamera().SetViewAngle(21.4286) ren1.GetActiveCamera().SetViewUp(0.451577,-0.833646,0.317981) # render the image # cam1 = ren1.GetActiveCamera() cam1.Zoom(1.4) ren1.ResetCameraClippingRange() iren.Initialize() # prevent the tk window from showing up then start the event loop # --- end of script --
timkrentz/SunTracker
IMU/VTK-6.2.0/Filters/Texture/Testing/Python/socbal.py
Python
mit
13,848
[ "VTK" ]
f9ff96e343b6888b2b3a49e408d93ee025188f8723f24304d0524d8ae2778cac
import itertools import collections import numpy as np #======================================================================================================================================================================= # GROMACS Specific Definitions #======================================================================================================================================================================= # For adding a new term simply add it to the dictionary centers and key indexs to the dictionaries below together with the name between []. centers = { 'moleculetype': [], 'atoms': [0,2,5], ('bonds', 1) : [0,1], ('bonds', 6) : [0,1], ('bonds', 2) : [0,1], ('position_restraints',1):[0], ('angles', 1) : [0,1,2], ('angles', 2) : [0,1,2], ('angles', 10): [0,1,2], ('constraints', 1): [0,1], ('dihedrals', 9): [0,1,2,3], ('dihedrals', 11): [0,1,2,3], ('dihedrals', 1): [0,1,2,3], ('dihedrals', 3): [0,1,2,3], ('dihedrals', 2):[0,1,2,3], ('pairs',1):[0,1], 'exclusions': [0, 1], ('virtual_sitesn',2): [":"], ('virtual_sites3',4):[0,1,2,3], ('virtual_sites3',1):[0,1,2,3]} settings ={ 'moleculetype':[0,1], 'atoms':[1,3,4,6,7], ('bonds', 1) :[2,3,4], ('bonds', 2) : [2,3,4], ('bonds', 6) : [2,3,4], ('position_restraints',1):[1,2,3,4], ('angles',1):[3,4,5], ('angles',2):[3,4,5], ('angles',10):[3, 4, 5], ('constraints', 1):[2,3], ('dihedrals', 1):[4,5,6,7], ('dihedrals',2):[4,5,6], ('dihedrals',11):[4,5,6,7,8,9,10], ('dihedrals', 3):[4,5,6,7,8,9,10], ('dihedrals',9):[4,5,6,7], ('pairs', 1):[2], 'exclusions':[], ('virtual_sitesn',2):[], ('virtual_sites3',4):[4,5,6,7], ('virtual_sites3',1):[4,5,6]} function ={ 'bonds':2, 'angles':3, 'constraints':2, 'dihedrals':4, 'pairs':2, 'virtual_sitesn':1, 'virtual_sites3':4, 'position_restraints':1} block_bonds={'PS' :{ 'PEO': '1 8000', 'P3HT': '1 8000', 'PP' : '1 8000'}, 'PEO':{ 'PS':'1 8000', 'P3HT':'1 8000', 'PP' :'1 8000'}, 'P3HT':{'PEO': '1 8000', 'P3HT': '1 8000', 'PS' : '1 8000'}} term_names=['moleculetype','atoms', 'bonds','position_restraints' ,'angles', 'dihedrals', 'constraints', 'exclusions', 'virtual_sitesn','virtual_sites3'] # We could store the different format as a subdictionary and select based on the relevant function number in # the itp file. This would require modifcation of the write_itp function. # Use cases are : - dihedrals, - exclusions, virtual-sides and format_outfile={ ('bonds',1): '{:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('angles',1): '{:<5d} {:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('bonds',2): '{:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('bonds',6): '{:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('position_restraints',1): '{:<5d} {:<2s} {:<8s} {:<8s} {:<8s}{}', ('angles',2): '{:<5d} {:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('angles',10): '{:<5d} {:<5d} {:<5d} {:<2s} {:<8s} {:<8s}{}', ('dihedrals',1): '{:<5d} {:<5d} {:<5d} {:<5d} {:<2s} {:<10s} {:<10s} {:<10s}{}', ('dihedrals',2):'{:<5d} {:<5d} {:<5d} {:<5d} {:<2s} {:<10s} {:<10s} {}', ('dihedrals',11):'{:<5d} {:<5d} {:<5d} {:<5d} {:<2s} {:<10s} {:<10s} {:<10s} {:<10s} {:<10s} {:<10s} {}', ('dihedrals',9): '{:<5d} {:<5d} {:<5d} {:<5d} {:<2s} {:<10s} {:<10s} {:<10s}{}', 'atoms': '{:<5d} {:<5s} {:<5d} {:<5s} {:<3s} {:<1d} {:<8s} {:<5s} {}', ('constraints',1): '{:<5d} {:<5d} {:<2s}{:<8s}{}','[': '{:<1s}{:<10s}{:<1s}{}', 'moleculetype':'{:<5s} {:<1s}{}', 'exclusions': '{:<5d} {:<5d} {}', ('virtual_sitesn',2):'{:<5d} {:<1s} {:<5d} {:<5d} {:<5d} {}', ('virtual_sites3',1):'{:<5d} {:<5d} {:<5d} {:<5d} {:<1s} {:<10s} {:<10s}{}', ('virtual_sites3',4):'{:<5d} {:<5d} {:<5d} {:<5d} {:<1s} {:<10s} {:<10s} {:<10s}{}', ('pairs',1):'{:<5d} {:<5d} {:<2s} {}'} #======================================================================================================================================================================= # Summary of Functions #======================================================================================================================================================================= def line_up(new_centers): return([sorted(new_centers)[x][1] for x in np.arange(0,len(new_centers))]) def move(center, count, n_atoms, offset): return(int(center) + n_atoms * count + offset) def term_topology(key, term): # This takes care of define statements #print(term[0][0].split()) if "#" in term[0][0].split(): return (term, "define") if all([key != item for item in ['atoms', 'moleculetype', 'exclusions', 'virtual_sitesn']]): return(centers[(key, int(term[function[key]]))], settings[(key, int(term[function[key]]))]) elif key == 'virtual_sitesn': #print(key) fidx = int(function[key]) idxs = np.arange(0,len(term),1) cent_idxs = idxs[idxs != fidx] #print(fidx) #print(cent_idxs) return (cent_idxs, [fidx]) else: return(centers[key], settings[key]) def repeat_term(term, key, n_trans, n_atoms, offset, max_atom,res_offset): count = 0 new_terms = [] center_indices, setting_indices = term_topology(key, term) #print(max_atom) #print(n_atoms) if setting_indices == "define": #print(center_indices) return [[-1, center_indices]] while count < n_trans: try: new_term = [] [ new_term.append([x ,term[x]]) for x in setting_indices] [ new_term.append([x, move(term[x], count, n_atoms, offset)]) for x in center_indices ] #print(new_term) #exit() new_term = line_up(new_term) #print(new_term) except IndexError: print("\n+++++++++++++++++++ Fatal Error +++++++++++++++++++++++") print("Check your itp file!") print("Too many or few parameters on the following line:") print(term, '\n') exit() if key in '[ atoms ]': if new_term[center_indices[1]] > max_atom: #print(new_term) print("\n++++++++++++++++ Fatal Error ++++++++++++++++++++++++") print("The largest charge group index exceeds the number") print("of atoms in the repeat unit. You cannot have more") print("charge groups than atoms. Check your input!\n") exit() if all([ int(new_term[x]) <= max_atom for x in center_indices]): new_terms.append(new_term) # print(new_term) count = count + 1 # correction for couning the resids and the charge number if key == 'atoms': #print("term", " n_aotms*I "," offset "," i ") for i, term in enumerate(new_terms): #print(term[2],n_atoms*i ,res_offset, i) term[2] = term[2] - n_atoms*i - offset + i + res_offset #term[5] = term[5] - n_atoms*i - offset + i + 1 return(new_terms) # We need a special sorting algorithm to not sort around #defs def check_interval(ndx,ifdef,endif): for idx, jdx in zip(ifdef,endif): if all([ndx >= idx , ndx <= jdx]): return True return False def sort_section(section): sorted_section=[] ifdef=[] endif=[] if len(section) != 0: for i, term in enumerate(section): #print(term) try: #print(term[1][0]) if term[1][0] in ["#ifdef","#ifndef"] : ifdef.append(i) # if term[1][0] == "ifndef": # print("go here") elif "#endif" == term[1][0]: endif.append(i) except TypeError: continue #print(ifdef,endif) if len(ifdef) == 0 and len(section) != 0: #print("go") #print(sorted(section)) return sorted(section) else: #print("go here") temp_sorted=[] for idx, term in enumerate(section): if not check_interval(idx, ifdef, endif): temp_sorted.append(term) sorted_section += sorted(temp_sorted) temp_sorted=[] for idx,jdx in zip(ifdef,endif): #print(idx,jdx) #print(section[idx]) temp_sorted.append(section[idx]) #print("ifdef",section[idx+1:jdx]) temp_sorted += sorted(section[idx+1:jdx]) temp_sorted.append(section[jdx]) sorted_section += temp_sorted #print(sorted) return sorted_section def repeat_section(section, key, n_trans, n_atoms, offset, max_atoms, res_offset): new_section = [] for term in section: new_terms = repeat_term(term, key, n_trans, n_atoms, offset, max_atoms,res_offset) [new_section.append(new_term) for new_term in new_terms] #print(new_section) new_section=sort_section(new_section) #print(new_section) return(new_section) def read_itp(name): itp = collections.OrderedDict({'moleculetype':[], 'atoms':[], 'bonds':[], 'angles':[], 'dihedrals':[], 'constraints':[],'position_restraints':[] ,'pairs':[],'virtual_sites3':[] ,'virtual_sitesn':[], 'exclusions':[]}) with open(name) as f: lines = f.readlines() for line in lines: #print(line) words = line.split() if len(words) != 0: if not words[0] in ';, \n, \r\n': #if not any([ word in ';, \n, \r\n' for word in words]): try: if any([ word in '[ [ ]' for word in words]): key = words[1] #print(key) elif key != 'exclusions': add = itp[key] + [line.replace('\n', '').split()] itp.update({key:add}) elif key == 'exclusions': sline = line.replace('\n', '').split() #print(line) for atom in sline[1:]: #print(sline[0]) new_line = sline[0] + " " + atom #print(new_line) add = itp[key] + [line.replace('\n', '').split()] itp.update({key:add}) except (UnboundLocalError): print("+++++++++++++ Error when reading the itp file ++++++++++++++++") print("Check your format.") print("There was something wrong with the section header!\n") print("Note that there has to be a space between the [ and the section name.") exit() except (KeyError): print("+++++++++++++ Error when reading the itp file ++++++++++++++++") print("Check your format.") print("Your section type is currently not implemented.") exit() out_itp = collections.OrderedDict({}) [ out_itp.update(collections.OrderedDict({key: value})) for key, value in itp.items() if len(value) != 0 ] #print(out_itp['bonds']) return(out_itp) def write_itp(text, outname): out_file = open(outname, 'w') #print(text['bonds']) for key in ['moleculetype', 'atoms']: if key in text: out_file.write('{:<1s}{:^18s}{:>1s}{}'.format('[',key,']','\n')) for line in text[key]: line.append('\n') out_file.write(str(format_outfile[key]).format(*line)) for key in ['bonds', 'angles', 'dihedrals', 'constraints','pairs','virtual_sites3','position_restraints']: if key in text: out_file.write('{:<1s}{:^22s}{:>1s}{}'.format('[',key,']','\n')) for line in text[key]: #print(line) if line[0] == -1: #print(key) out_file.write(" ".join(line[1])+" \n") else: line.append('\n') out_file.write(str(format_outfile[(key, int(line[function[key]]))]).format(*line)) for key in ['exclusions', 'virtual_sitesn']: if key in text: out_file.write('{:<1s}{:^18s}{:>1s}{}'.format('[', key ,']','\n')) for line in text[key]: if line[0] == -1: out_file.write(" ".join(line[1])+" \n") else: line.append('\n') line = [ str(e) for e in line ] out_file.write(" ".join(line)) # The sole purpose of this function is to convert # the centers to ints. So this can for sure be # handled smater in some way. def add_links(itp, linkfile): linkers = read_itp(linkfile) for key, section in linkers.items(): for term in section: new_term = [] center_indices, setting_indices = term_topology(key, term) [ new_term.append([x ,term[x]]) for x in setting_indices] [ new_term.append([x, int(term[x])]) for x in center_indices ] new_term = line_up(new_term) new_section = itp[key] new_section.append(new_term) itp.update({key: new_section}) return(itp) # We use the offset to automatically manipulate # the terms of the end-group. Something similar # could probably be done for the linker, so # that one only has a single function. def terminate(itp, end_group_file, offset): group = read_itp(end_group_file) if len(itp['atoms']) != 0: last_res = itp['atoms'][-1][2] for key, section in group.items(): for term in section: #print(term) #print(offset) new_term = [] center_indices, setting_indices = term_topology(key, term) [ new_term.append([x ,term[x]]) for x in setting_indices] if offset != 0: offset_new = -len(center_indices) + offset #print(len(center_indices)) if key == 'atoms': offset_new = offset_new + 2 else: offset_new = offset [ new_term.append([x, move(term[x], 0, 0, offset_new)]) for x in center_indices ] new_term = line_up(new_term) if key == 'atoms' and offset != 0: new_term[2] = last_res +1 new_section = itp[key] new_section.append(new_term) itp.update({key: new_section}) return(itp) def itp_tool(itpfiles, linkfile, n_mon, outname, name, term_info): block_count = 0 new_itp =collections.OrderedDict({'moleculetype':[], 'atoms':[], 'bonds':[], 'angles':[], 'dihedrals':[], 'constraints':[],'position_restraints':[] ,'pairs':[] ,'virtual_sites3':[],'virtual_sitesn':[], 'exclusions':[]} ) offset = 0 n_atoms=0 mon_itp = read_itp(itpfiles[0]) nexcl = mon_itp["moleculetype"][0][1] new_itp.update({'moleculetype':[[name, nexcl]]}) max_atoms = [] for name, n_trans in zip(itpfiles, n_mon): mon_itp = read_itp(name) n_atoms = len(mon_itp["atoms"]) try: max_atoms.append(n_atoms * n_trans + max_atoms[-1]) except IndexError: max_atoms.append(n_atoms * n_trans) #print(n_atoms) if term_info != None: print("WARNING: The use of end-groups is to be deprecated.", '\n', "Instead feed the end-group as monomer and", '\n', "add corresponding link file.") new_itp = terminate(new_itp, term_info[0], 0) # if len(term_info) == 2: # atoms_last = len(read_itp(term_info[1])['atoms']) # else: # atoms_last = 0 offset = len(new_itp['atoms']) # max_atoms = [ n + len(new_itp['atoms']) + atoms_last for n in max_atoms ] max_atoms = [ n + len(new_itp['atoms']) for n in max_atoms ] try: res_offset = new_itp["atoms"][-1][2] except IndexError: res_offset = 0 count=0 for name, n_trans in zip(itpfiles, n_mon): mon_itp = read_itp(name) n_atoms = len(mon_itp["atoms"]) for key, section in mon_itp.items(): if key != 'moleculetype': #print(max_atoms) add = new_itp[key] + repeat_section(section, key, n_trans, n_atoms, offset, max_atoms[count],res_offset) new_itp.update(collections.OrderedDict({key: add})) #print(offset) res_offset = new_itp["atoms"][-1][2] offset += n_atoms * n_trans count = count + 1 out_itp = collections.OrderedDict({}) if linkfile != None: new_itp = add_links(new_itp, linkfile) if term_info != None and len(term_info) == 2: new_itp = terminate(new_itp, term_info[1], offset+1) [ out_itp.update({key: value}) for key, value in new_itp.items() if len(value) != 0 ] write_itp(out_itp, outname) return(None)
fgrunewald/Martini_PolyPly
polyply/itp_tool/itp_I.py
Python
gpl-3.0
18,100
[ "Gromacs" ]
35cb5b178337794fd8bc69f21ac092a27e38617c5195f371e5ec2cdda6f998d9
# coding=utf-8 from __future__ import unicode_literals from collections import OrderedDict from .. import Provider as PersonProvider class Provider(PersonProvider): formats = ( '{{first_name_male}} {{last_name}}', '{{first_name_male}} {{last_name}}', '{{first_name_male}} {{last_name}}', '{{first_name_male}} {{last_name}}', '{{first_name_male}} {{last_name}}-{{last_name}}', '{{first_name_female}} {{last_name}}', '{{first_name_female}} {{last_name}}', '{{first_name_female}} {{last_name}}', '{{first_name_female}} {{last_name}}', '{{first_name_female}} {{last_name}}-{{last_name}}', '{{prefix_male}} {{first_name_male}} {{last_name}}', '{{prefix_female}} {{first_name_female}} {{last_name}}', '{{prefix_male}} {{first_name_male}} {{last_name}}', '{{prefix_female}} {{first_name_female}} {{last_name}}' ) # Names from http://webarchive.nationalarchives.gov.uk/20160105160709/http://ons.gov.uk/ons/publications/re-reference-tables.html?edition=tcm%3A77-243767 first_names_male = ( 'David', 'Paul', 'Christopher', 'Thomas', 'John', 'Mark', 'James', 'Stephen', 'Andrew', 'Jack', 'Michael', 'Daniel', 'Peter', 'Richard', 'Matthew', 'Robert', 'Ryan', 'Joshua', 'Alan', 'Ian', 'Simon', 'Luke', 'Samuel', 'Jordan', 'Anthony', 'Adam', 'Lee', 'Alexander', 'William', 'Kevin', 'Darren', 'Benjamin', 'Philip', 'Gary', 'Joseph', 'Brian', 'Steven', 'Liam', 'Keith', 'Martin', 'Jason', 'Jonathan', 'Jake', 'Graham', 'Nicholas', 'Craig', 'George', 'Colin', 'Neil', 'Lewis', 'Nigel', 'Oliver', 'Timothy', 'Stuart', 'Kenneth', 'Raymond', 'Jamie', 'Nathan', 'Geoffrey', 'Connor', 'Terence', 'Trevor', 'Adrian', 'Harry', 'Malcolm', 'Scott', 'Callum', 'Wayne', 'Aaron', 'Barry', 'Ashley', 'Bradley', 'Patrick', 'Gareth', 'Jacob', 'Sean', 'Kieran', 'Derek', 'Carl', 'Dean', 'Charles', 'Sam', 'Shaun', 'Ben', 'Roger', 'Mohammed', 'Leslie', 'Ronald', 'Kyle', 'Clive', 'Edward', 'Antony', 'Jeremy', 'Justin', 'Jeffrey', 'Christian', 'Roy', 'Karl', 'Alex', 'Gordon', 'Dominic', 'Joe', 'Marc', 'Reece', 'Dennis', 'Russell', 'Gavin', 'Rhys', 'Phillip', 'Allan', 'Robin', 'Charlie', 'Gerald', 'Ross', 'Francis', 'Eric', 'Julian', 'Bernard', 'Dale', 'Donald', 'Damian', 'Frank', 'Shane', 'Cameron', 'Norman', 'Duncan', 'Louis', 'Frederick', 'Tony', 'Howard', 'Conor', 'Douglas', 'Garry', 'Elliot', 'Marcus', 'Arthur', 'Vincent', 'Max', 'Mathew', 'Abdul', 'Henry', 'Martyn', 'Ricky', 'Leonard', 'Lawrence', 'Glen', 'Mitchell', 'Gerard', 'Gregory', 'Iain', 'Billy', 'Bryan', 'Joel', 'Clifford', 'Josh', 'Leon', 'Stewart', 'Mohammad', 'Dylan', 'Graeme', 'Terry', 'Guy', 'Elliott', 'Stanley', 'Danny', 'Brandon', 'Victor', 'Toby', 'Hugh', 'Mohamed', 'Brett', 'Albert', 'Tom', 'Declan', 'Maurice', 'Glenn', 'Leigh', 'Denis', 'Damien', 'Bruce', 'Jay', 'Owen' ) first_names_female = ( 'Susan', 'Sarah', 'Rebecca', 'Linda', 'Julie', 'Claire', 'Laura', 'Lauren', 'Christine', 'Karen', 'Nicola', 'Gemma', 'Jessica', 'Margaret', 'Jacqueline', 'Emma', 'Charlotte', 'Janet', 'Deborah', 'Lisa', 'Hannah', 'Patricia', 'Tracey', 'Joanne', 'Sophie', 'Carol', 'Jane', 'Michelle', 'Victoria', 'Amy', 'Elizabeth', 'Helen', 'Samantha', 'Emily', 'Mary', 'Diane', 'Rachel', 'Anne', 'Sharon', 'Ann', 'Tracy', 'Amanda', 'Jennifer', 'Chloe', 'Angela', 'Louise', 'Katie', 'Lucy', 'Barbara', 'Alison', 'Sandra', 'Caroline', 'Clare', 'Kelly', 'Bethany', 'Gillian', 'Natalie', 'Jade', 'Pauline', 'Megan', 'Elaine', 'Alice', 'Lesley', 'Catherine', 'Hayley', 'Pamela', 'Danielle', 'Holly', 'Wendy', 'Abigail', 'Valerie', 'Olivia', 'Jean', 'Dawn', 'Donna', 'Stephanie', 'Leanne', 'Kathleen', 'Natasha', 'Denise', 'Sally', 'Katherine', 'Georgia', 'Maureen', 'Maria', 'Zoe', 'Judith', 'Kerry', 'Debra', 'Melanie', 'Stacey', 'Eleanor', 'Paula', 'Shannon', 'Sheila', 'Joanna', 'Paige', 'Janice', 'Lorraine', 'Georgina', 'Lynn', 'Andrea', 'Suzanne', 'Nicole', 'Yvonne', 'Chelsea', 'Lynne', 'Anna', 'Kirsty', 'Shirley', 'Alexandra', 'Marion', 'Beverley', 'Melissa', 'Rosemary', 'Kimberley', 'Carole', 'Fiona', 'Kate', 'Joan', 'Marie', 'Jenna', 'Marilyn', 'Jodie', 'June', 'Grace', 'Mandy', 'Rachael', 'Lynda', 'Tina', 'Kathryn', 'Molly', 'Jayne', 'Amber', 'Marian', 'Jasmine', 'Brenda', 'Sara', 'Kayleigh', 'Teresa', 'Harriet', 'Julia', 'Ashleigh', 'Heather', 'Kim', 'Ruth', 'Jemma', 'Carly', 'Leah', 'Eileen', 'Francesca', 'Naomi', 'Hilary', 'Abbie', 'Sylvia', 'Katy', 'Irene', 'Cheryl', 'Rosie', 'Dorothy', 'Aimee', 'Vanessa', 'Ellie', 'Frances', 'Sian', 'Josephine', 'Gail', 'Jill', 'Lydia', 'Joyce', 'Charlene', 'Hollie', 'Hazel', 'Annette', 'Bethan', 'Amelia', 'Beth', 'Rita', 'Geraldine', 'Diana', 'Lindsey', 'Carolyn' ) first_names = first_names_male + first_names_female last_names = OrderedDict(( ('Savage', 0.04), ('Winter', 0.03), ('Metcalfe', 0.03), ('Harper', 0.06), ('Burgess', 0.06), ('Bailey', 0.15), ('Potts', 0.03), ('Boyle', 0.03), ('Brown', 0.51), ('Jennings', 0.05), ('Payne', 0.09), ('Day', 0.09), ('Holland', 0.07), ('Higgins', 0.05), ('Rhodes', 0.04), ('Hancock', 0.04), ('Howells', 0.03), ('Fowler', 0.04), ('Sims', 0.03), ('Thomas', 0.35), ('Parker', 0.17), ('Bentley', 0.04), ('Barnett', 0.05), ('Manning', 0.03), ('Collier', 0.03), ('Holloway', 0.03), ('Hartley', 0.04), ('George', 0.05), ('Tomlinson', 0.04), ('Howard', 0.09), ('Long', 0.06), ('Farmer', 0.03), ('Collins', 0.15), ('Rice', 0.03), ('Townsend', 0.04), ('Rees', 0.07), ('Bruce', 0.03), ('Hammond', 0.05), ('Ford', 0.09), ('Tucker', 0.05), ('Wallis', 0.03), ('Hamilton', 0.06), ('Ferguson', 0.04), ('Hooper', 0.03), ('Francis', 0.07), ('Reeves', 0.04), ('Barlow', 0.04), ('Short', 0.04), ('Cunningham', 0.05), ('Hopkins', 0.06), ('Nicholson', 0.06), ('Archer', 0.04), ('Green', 0.25), ('Glover', 0.04), ('Gibson', 0.09), ('Spencer', 0.08), ('Warner', 0.04), ('Webb', 0.12), ('Whitehouse', 0.03), ('Dean', 0.06), ('Griffiths', 0.16), ('Clark', 0.2), ('Hardy', 0.05), ('Iqbal', 0.03), ('Baldwin', 0.04), ('O\'Neill', 0.06), ('Blake', 0.05), ('Lees', 0.03), ('Harvey', 0.1), ('Clarke', 0.24), ('Daniels', 0.04), ('Browne', 0.03), ('Macdonald', 0.04), ('Kirk', 0.04), ('Khan', 0.14), ('Davidson', 0.05), ('Dale', 0.04), ('Sanders', 0.04), ('Wilkins', 0.04), ('Connor', 0.03), ('Daly', 0.03), ('Lane', 0.06), ('Kennedy', 0.06), ('Bray', 0.03), ('Burrows', 0.04), ('Hayes', 0.07), ('Wyatt', 0.03), ('Gould', 0.03), ('Dyer', 0.03), ('Nash', 0.05), ('Bryan', 0.03), ('Pope', 0.03), ('Fraser', 0.04), ('Steele', 0.03), ('Walsh', 0.09), ('Wade', 0.04), ('Marsden', 0.03), ('Humphries', 0.03), ('O\'Brien', 0.08), ('Thompson', 0.28), ('Lord', 0.03), ('Coleman', 0.06), ('Jarvis', 0.04), ('Noble', 0.03), ('Williamson', 0.06), ('Carpenter', 0.03), ('Gardner', 0.06), ('Farrell', 0.04), ('Clayton', 0.05), ('Akhtar', 0.05), ('Gallagher', 0.05), ('Skinner', 0.04), ('Birch', 0.04), ('Kay', 0.04), ('Barrett', 0.07), ('Bates', 0.06), ('Lucas', 0.04), ('O\'Connor', 0.06), ('Chamberlain', 0.03), ('Chapman', 0.12), ('Ryan', 0.08), ('Thorpe', 0.04), ('Lawson', 0.04), ('Howell', 0.04), ('Martin', 0.23), ('Kelly', 0.16), ('Dobson', 0.04), ('Stevens', 0.1), ('Brennan', 0.04), ('Lloyd', 0.11), ('Quinn', 0.05), ('Morton', 0.04), ('Wilson', 0.35), ('Barnes', 0.11), ('Henry', 0.03), ('Smith', 1.15), ('Pritchard', 0.05), ('Phillips', 0.18), ('Dixon', 0.1), ('Sharpe', 0.03), ('Robertson', 0.07), ('White', 0.27), ('Bird', 0.06), ('Abbott', 0.04), ('Kirby', 0.04), ('Hussain', 0.11), ('Barber', 0.05), ('Harris', 0.25), ('Doyle', 0.05), ('Jordan', 0.05), ('Burns', 0.06), ('Hodgson', 0.06), ('Atkins', 0.04), ('Stokes', 0.05), ('Rogers', 0.12), ('Parkes', 0.03), ('Brookes', 0.04), ('Herbert', 0.03), ('Gordon', 0.05), ('Kemp', 0.05), ('Webster', 0.07), ('Sinclair', 0.03), ('McLean', 0.03), ('Saunders', 0.09), ('Stephens', 0.05), ('Newton', 0.07), ('Potter', 0.05), ('Storey', 0.03), ('Stanley', 0.04), ('Turnbull', 0.03), ('Duncan', 0.03), ('Rose', 0.08), ('Mills', 0.11), ('Sheppard', 0.03), ('Butcher', 0.03), ('Fry', 0.03), ('Ross', 0.06), ('Shepherd', 0.06), ('Goodwin', 0.05), ('Holt', 0.05), ('Haynes', 0.04), ('Cook', 0.15), ('Ward', 0.21), ('Godfrey', 0.03), ('Stone', 0.07), ('Dodd', 0.04), ('Parsons', 0.07), ('Ingram', 0.03), ('Nixon', 0.03), ('Evans', 0.39), ('Hargreaves', 0.03), ('Owen', 0.11), ('Chan', 0.03), ('Connolly', 0.03), ('Charlton', 0.03), ('Middleton', 0.04), ('Hyde', 0.03), ('Patel', 0.24), ('Owens', 0.03), ('Lamb', 0.04), ('Palmer', 0.11), ('Cooper', 0.22), ('McCarthy', 0.06), ('Black', 0.04), ('Dickinson', 0.04), ('Gilbert', 0.05), ('Leach', 0.03), ('North', 0.03), ('Byrne', 0.06), ('Frost', 0.05), ('Simmons', 0.04), ('Matthews', 0.11), ('Alexander', 0.04), ('Ahmed', 0.1), ('Gibbons', 0.03), ('Stevenson', 0.05), ('Rowley', 0.03), ('Miles', 0.05), ('Hanson', 0.03), ('Bolton', 0.03), ('Craig', 0.03), ('Ali', 0.12), ('Carroll', 0.04), ('Allan', 0.03), ('Sanderson', 0.03), ('Fletcher', 0.1), ('Burton', 0.08), ('Oliver', 0.07), ('Davison', 0.04), ('Douglas', 0.04), ('Field', 0.04), ('Pickering', 0.03), ('Pugh', 0.04), ('Rowe', 0.05), ('Mahmood', 0.03), ('Sykes', 0.03), ('Crawford', 0.03), ('Williams', 0.66), ('Parkin', 0.03), ('Patterson', 0.04), ('Power', 0.03), ('Price', 0.17), ('Murphy', 0.14), ('Hale', 0.03), ('Nicholls', 0.06), ('Hall', 0.25), ('Jones', 0.94), ('Hughes', 0.26), ('Stephenson', 0.05), ('Morley', 0.04), ('Knight', 0.11), ('Kerr', 0.03), ('Heath', 0.04), ('Pollard', 0.03), ('Lowe', 0.07), ('O\'Sullivan', 0.04), ('Buckley', 0.05), ('Bond', 0.05), ('Dennis', 0.03), ('Lewis', 0.25), ('Weston', 0.04), ('Joyce', 0.03), ('Reynolds', 0.09), ('Bishop', 0.06), ('Norris', 0.04), ('Barry', 0.03), ('Whittaker', 0.04), ('Carey', 0.03), ('Hill', 0.22), ('Kent', 0.04), ('Ashton', 0.04), ('Wilkinson', 0.13), ('Powell', 0.12), ('Henderson', 0.06), ('Freeman', 0.06), ('Dunn', 0.07), ('Kaur', 0.09), ('French', 0.04), ('Parry', 0.06), ('Walton', 0.06), ('Fisher', 0.1), ('Naylor', 0.03), ('Duffy', 0.04), ('Humphreys', 0.04), ('Randall', 0.03), ('Bevan', 0.03), ('Doherty', 0.03), ('Moore', 0.21), ('Armstrong', 0.07), ('Sullivan', 0.05), ('Swift', 0.03), ('Pearce', 0.09), ('Tyler', 0.03), ('Bradshaw', 0.04), ('Allen', 0.19), ('Mellor', 0.03), ('Whitehead', 0.05), ('Jackson', 0.24), ('Grant', 0.07), ('Fox', 0.09), ('Wright', 0.28), ('Anderson', 0.13), ('Foster', 0.13), ('Gibbs', 0.04), ('Butler', 0.11), ('Jenkins', 0.1), ('John', 0.04), ('Morrison', 0.04), ('Talbot', 0.03), ('Blackburn', 0.03), ('Osborne', 0.05), ('Flynn', 0.04), ('Richards', 0.14), ('Hurst', 0.03), ('Bibi', 0.05), ('Houghton', 0.03), ('Johnson', 0.34), ('Yates', 0.06), ('Mistry', 0.03), ('Donnelly', 0.03), ('Parkinson', 0.04), ('Thomson', 0.05), ('Woods', 0.07), ('Todd', 0.04), ('Dawson', 0.08), ('Hart', 0.07), ('Graham', 0.1), ('Berry', 0.07), ('Willis', 0.05), ('Miah', 0.04), ('Brooks', 0.09), ('Horton', 0.03), ('Riley', 0.07), ('Lambert', 0.05), ('Waters', 0.04), ('Lynch', 0.05), ('Moss', 0.06), ('Slater', 0.05), ('Knowles', 0.04), ('Benson', 0.03), ('Adams', 0.13), ('King', 0.2), ('Davies', 0.48), ('Richardson', 0.15), ('Vincent', 0.03), ('Holmes', 0.11), ('Conway', 0.03), ('Marshall', 0.14), ('Faulkner', 0.03), ('Garner', 0.03), ('Booth', 0.08), ('Harrison', 0.2), ('Campbell', 0.11), ('Cole', 0.08), ('Goddard', 0.04), ('Walters', 0.05), ('Ellis', 0.13), ('Edwards', 0.27), ('Peters', 0.04), ('Atkinson', 0.08), ('Wood', 0.24), ('Briggs', 0.04), ('Elliott', 0.09), ('Chandler', 0.03), ('Hope', 0.03), ('Hunter', 0.07), ('Newman', 0.07), ('Pratt', 0.03), ('Rahman', 0.03), ('Hicks', 0.04), ('Cox', 0.14), ('Reid', 0.07), ('Morris', 0.21), ('Banks', 0.04), ('Myers', 0.03), ('Mitchell', 0.16), ('Davey', 0.04), ('Peacock', 0.03), ('Reed', 0.07), ('Carter', 0.15), ('Miller', 0.14), ('Perkins', 0.04), ('Read', 0.05), ('Hilton', 0.03), ('Moran', 0.03), ('Welch', 0.03), ('Vaughan', 0.04), ('Clements', 0.03), ('Griffin', 0.05), ('Russell', 0.1), ('O\'Donnell', 0.03), ('Hobbs', 0.03), ('Marsh', 0.07), ('Porter', 0.07), ('Gill', 0.08), ('Leonard', 0.03), ('McKenzie', 0.03), ('Thornton', 0.04), ('Fitzgerald', 0.03), ('Greenwood', 0.05), ('Pearson', 0.1), ('James', 0.19), ('Coles', 0.03), ('Roberts', 0.33), ('Nelson', 0.05), ('Forster', 0.03), ('Gough', 0.03), ('Mann', 0.05), ('Law', 0.03), ('Barker', 0.1), ('Cartwright', 0.04), ('Bradley', 0.08), ('Sharp', 0.05), ('Warren', 0.06), ('Summers', 0.03), ('Little', 0.04), ('Perry', 0.08), ('Fuller', 0.04), ('West', 0.09), ('Mason', 0.12), ('Finch', 0.03), ('Norton', 0.03), ('Burke', 0.05), ('Holden', 0.04), ('Lee', 0.2), ('Smart', 0.04), ('Bull', 0.04), ('Bryant', 0.04), ('Gray', 0.12), ('Watts', 0.08), ('Brady', 0.03), ('Baker', 0.2), ('Barton', 0.05), ('Davis', 0.17), ('Baxter', 0.05), ('Taylor', 0.53), ('Carr', 0.07), ('Wong', 0.04), ('Cameron', 0.03), ('Gardiner', 0.03), ('Hawkins', 0.07), ('Shaw', 0.15), ('Wallace', 0.05), ('Young', 0.16), ('Shah', 0.06), ('Gregory', 0.07), ('Ball', 0.08), ('Norman', 0.04), ('Lawrence', 0.09), ('Bowen', 0.04), ('Wheeler', 0.05), ('Bartlett', 0.04), ('Sutton', 0.06), ('Lyons', 0.03), ('Hutchinson', 0.05), ('Poole', 0.05), ('Cooke', 0.06), ('Franklin', 0.03), ('Howe', 0.04), ('Walker', 0.27), ('Johnston', 0.05), ('Austin', 0.05), ('Chadwick', 0.03), ('Bell', 0.15), ('Wall', 0.04), ('Woodward', 0.05), ('Preston', 0.04), ('Bennett', 0.16), ('Murray', 0.1), ('Begum', 0.13), ('McDonald', 0.06), ('Hudson', 0.07), ('Cross', 0.06), ('Singh', 0.13), ('Howarth', 0.03), ('Hewitt', 0.05), ('Curtis', 0.06), ('Harding', 0.07), ('May', 0.05), ('Wells', 0.07), ('Giles', 0.03), ('Watson', 0.17), ('Nolan', 0.03), ('Andrews', 0.09), ('Hayward', 0.04), ('Schofield', 0.04), ('Hunt', 0.12), ('Robson', 0.06), ('Arnold', 0.05), ('Morgan', 0.19), ('Coates', 0.03), ('Page', 0.07), ('Simpson', 0.13), ('Stewart', 0.09), ('Robinson', 0.29), ('Fleming', 0.03), ('Scott', 0.18), ('Chambers', 0.06), ('Turner', 0.23), ('Watkins', 0.06), )) prefixes_female = ('Mrs.', 'Ms.', 'Miss', 'Dr.') prefixes_male = ('Mr.', 'Dr.')
Nebucatnetzer/tamagotchi
pygame/lib/python3.4/site-packages/faker/providers/person/en_GB/__init__.py
Python
gpl-2.0
18,349
[ "Amber", "Brian" ]
152817814d03c281acde134d7e95b25f6a404bcc5bc2cfc89086ecc7a95a43a6
import glob import inspect import logging import os from flask import jsonify, Blueprint, request from kalliope.core.ResourcesManager import ResourcesManager, ResourcesManagerException from kalliope.core.Utils import Utils logging.basicConfig() logger = logging.getLogger("kalliope") LIST_EXCLUDED_DIR_NAME = ["__pycache__"] KALLIOPE_WEBSITE_NEURON_URL = "https://raw.githubusercontent.com/kalliope-project/kalliope-project.github.io/" \ "sources/_data/community_neurons.yml" class NeuronsView(Blueprint): def __init__(self, name, import_name, app=None, brain=None, settings=None): self.brain = brain self.settings = settings self.app = app super(NeuronsView, self).__init__(name, import_name) # routes self.add_url_rule('/neurons', view_func=self.get_neurons, methods=['GET']) self.add_url_rule('/neurons/install', view_func=self.install_resource_by_git_url, methods=['POST']) def get_neurons(self): """ list all installed neuron curl -i --user admin:secret -X GET http://127.0.0.1:5000/neurons :return: """ data = { "core": self._get_list_core_neuron(), "community": self._get_list_installed_community_neuron() } logger.debug("[FlaskAPI] get_neurons: all") data = jsonify(data) return data, 200 @staticmethod def install_resource_by_git_url(): """ Install a new resource from the given git URL. Call the resource manager curl -i -H "Content-Type: application/json" \ --user admin:secret \ -X POST \ -d ' { "git_url": "https://github.com/kalliope-project/kalliope_neuron_wikipedia.git", "sudo_password": "azerty" # TODO add kalliope to sudoer file and remove this } ' \ http://127.0.0.1:5000/neurons/install """ if not request.get_json() or 'git_url' not in request.get_json(): data = { "Error": "Wrong parameters, 'git_url' not set" } return jsonify(error=data), 400 parameters = { "git_url": request.get_json()["git_url"], "sudo_password": request.get_json()["sudo_password"] } res_manager = ResourcesManager(**parameters) try: dna = res_manager.install() except ResourcesManagerException as e: data = { "error": "%s" % e } return jsonify(data), 400 if dna is not None: return jsonify(dna.serialize()), 200 else: data = { "Error": "Error during resource installation" } return jsonify(error=data), 400 def _get_list_core_neuron(self): current_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) kalliope_core_neuron_folder = Utils.get_current_file_parent_path(Utils.get_current_file_parent_parent_path(current_path)) + os.sep + "neurons/*" return self._get_neuron_name_list_from_path(kalliope_core_neuron_folder) def _get_list_installed_community_neuron(self): if self.settings.resources.neuron_folder is not None: return self._get_neuron_name_list_from_path(self.settings.resources.neuron_folder + os.sep + "/*") return list() @staticmethod def _get_neuron_name_list_from_path(neuron_path_list): """ From a given path, return all folder name as list. E.g: /home/pi/kalliope/neurons Will return [wikipedia_searcher, gmail] :param neuron_path_list: path to the community neurons folder :return: """ glob_neuron_path_list = glob.glob(neuron_path_list) neuron_name_list = list() for neuron_path in glob_neuron_path_list: if os.path.isdir(neuron_path): neuron_name = os.path.basename(os.path.normpath(neuron_path)) if neuron_name not in LIST_EXCLUDED_DIR_NAME: neuron_name_list.append(neuron_name) return neuron_name_list
kalliope-project/kalliope
kalliope/core/RestAPI/views/neurons_view.py
Python
gpl-3.0
4,175
[ "NEURON" ]
4ffd2ed78bc3559caaaeb41b8b6a896f1b6242b030608a8d1d7c99cd2c7586c4
"""Tests for visit tracking middleware.""" # pylint: disable=invalid-name from django.conf import settings from django.contrib.auth import get_user_model from django.core.urlresolvers import reverse from django.test import TestCase, RequestFactory from django.test.utils import override_settings from mock import Mock from model_mommy import mommy from open_connect.middleware.visit_tracking import VisitTrackingMiddleware from open_connect.accounts.models import Visit from open_connect.connect_core.utils.basetests import ConnectTestMixin User = get_user_model() middleware = list(settings.MIDDLEWARE_CLASSES) # pylint: disable=line-too-long if 'open_connect.middleware.visit_tracking.VisitTrackingMiddleware' not in middleware: middleware.insert( 0, 'open_connect.middleware.visit_tracking.VisitTrackingMiddleware') @override_settings(MIDDLEWARE_CLASSES=middleware) class VisitTrackingMiddlewareTest(ConnectTestMixin, TestCase): """Tests for visit tracking middleware.""" def test_no_user_attribute(self): """Test that a request without a user attr won't trigger an error""" user = mommy.make(User) request_factory = RequestFactory() visit_tracking_mw = VisitTrackingMiddleware() visit_count = Visit.objects.count() request1 = request_factory.get('/') response1 = Mock() self.assertFalse(hasattr(request1, 'user')) result1 = visit_tracking_mw.process_response(request1, response1) self.assertEqual(response1, result1) self.assertEqual(visit_count, Visit.objects.count()) request2 = request_factory.get('/') response2 = Mock() request2.user = user self.assertTrue(hasattr(request2, 'user')) visit_tracking_mw.process_response(request2, response2) self.assertEqual(Visit.objects.count(), visit_count + 1) def test_unauthenticated_requests_not_logged(self): """Anonymous users shouldn't have their visits logged.""" visit_count = Visit.objects.count() self.client.get('/') self.assertIsNone(self.client.cookies.get('visit_logged')) self.assertEqual(Visit.objects.count(), visit_count) def test_authenticated_requests_logged(self): """An authenticated user should have their visit logged.""" visit_count = Visit.objects.count() User.objects.create_user(username='a@b.local', password='moo') self.client.post( reverse('account_login'), {'login': 'a@b.local', 'password': 'moo'}) self.client.get('/') self.assertEqual(self.client.cookies.get('visit_logged').value, '1') self.assertEqual(Visit.objects.count(), visit_count + 1) def test_authenticated_requests_not_logged_twice_in_same_period(self): """An authenticated users hould only have their visit logged 1x/day.""" visit_count = Visit.objects.count() User.objects.create_user(username='a@b.local', password='moo') self.client.post( reverse('account_login'), {'login': 'a@b.local', 'password': 'moo'}) self.client.get('/') self.assertEqual(Visit.objects.count(), visit_count + 1) self.client.get('/') self.assertEqual(Visit.objects.count(), visit_count + 1)
ofa/connect
open_connect/middleware/tests/test_visit_tracking.py
Python
mit
3,298
[ "VisIt" ]
4480d4a7d94cdcc53b42736bcfa11f16c90dc8ce8afafc90c042206988739729
#* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html from peacock.PeacockException import FileExistsException, BadExecutableException import mooseutils import subprocess import os def runExe(app_path, args, print_errors=True): """ Convience function to run a executable with arguments and return the output Input: app_path: str: Path to the excutable args: either str or list: Arguments to pass to the executable Return: str: output of running the command Exceptions: FileExistsException: If there was a problem running the executable BadExecutableException: If the executable didn't exit cleanly """ popen_args = [str(app_path)] if isinstance(args, str): popen_args.append(args) else: popen_args.extend(args) proc = None try: proc = subprocess.Popen(popen_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except OSError as e: msg = "Problem running '%s'" % ' '.join(popen_args) if print_errors: mooseutils.mooseWarning(msg) msg += "\nError: %s" % e raise FileExistsException(msg) data = proc.communicate() stdout_data = data[0].decode("utf-8") if proc.returncode != 0: msg = "'%s' exited with non zero status %s.\n\n"\ "Please make sure your application is built and able to execute the given arguments.\n"\ "Working dir: %s\n"\ "Output: %s" % (' '.join(popen_args), proc.returncode, os.getcwd(), stdout_data) if print_errors: mooseutils.mooseWarning(msg) raise BadExecutableException(msg) return stdout_data
nuclear-wizard/moose
python/peacock/utils/ExeLauncher.py
Python
lgpl-2.1
1,932
[ "MOOSE" ]
7500fc09fdfa32c29070759ee17eef0f4b43068936f7e094bfc9fe645be603cd
# coding: utf-8 import unittest import numpy as np import neurolab as nl import numpy.random as rand class TestNet(unittest.TestCase): def test_newp(self): # Logical & input = [[0, 0], [0, 1], [1, 0], [1, 1]] target = [[0], [0], [0], [1]] # Create net with 2 inputs and 1 neuron net = nl.net.newp([[0, 1], [0, 1]], 1) # train with delta rule # see net.trainf error = net.train(input, target, epochs=100, show=0, lr=0.1) self.assertEqual(error[-1], 0) def test_newc(self): centr = np.array([[0.2, 0.2], [0.4, 0.4], [0.7, 0.3], [0.2, 0.5]]) rand_norm = 0.05 * rand.randn(100, 4, 2) inp = np.array([centr + r for r in rand_norm]) inp.shape = (100 * 4, 2) rand.shuffle(inp) # Create net with 2 inputs and 4 neurons net = nl.net.newc([[0.0, 1.0], [0.0, 1.0]], 4) # train with rule: Conscience Winner Take All algoritm (CWTA) funcs = [nl.train.train_wta, nl.train.train_cwta] for func in funcs: net.init() error = net.train(inp, epochs=50, show=0) self.assertLess(error[-1], error[0]) self.assertLess(np.sum(centr) - np.sum(net.layers[0].np['w']), 0.1) def test_newlvq(self): # Create train samples input = np.array([[-3, 0], [-2, 1], [-2, -1], [0, 2], [0, 1], [0, -1], [0, -2], [2, 1], [2, -1], [3, 0]]) target = np.array([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1], [0, 1], [1, 0], [1, 0], [1, 0]]) # Create network with 2 layers:4 neurons in input layer(Competitive) # and 2 neurons in output layer(liner) net = nl.net.newlvq(nl.tool.minmax(input), 4, [.6, .4]) # Train network error = net.train(input, target, epochs=100, goal=-1, show=0) self.assertEqual(error[-1], 0) def test_newhop(self): # N E R O target = [[1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1], [0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0]] chars = ['N', 'E', 'R', 'O'] target = np.asfarray(target) target[target == 0] = -1 # Create and train network net = nl.net.newhop(target) output = net.sim(target) test = np.asfarray([0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1]) test[test == 0] = -1 out = net.sim([test]) self.assertEqual(out[0].tolist(), target[0].tolist()) def test_newelm(self): i1 = np.sin(np.arange(0, 20)) i2 = np.sin(np.arange(0, 20)) * 2 t1 = np.ones([1, 20]) t2 = np.ones([1, 20]) * 2 input = np.array([i1, i2, i1, i2]).reshape(20 * 4, 1) target = np.array([t1, t2, t1, t2]).reshape(20 * 4, 1) # Create network with 2 layers net = nl.net.newelm( [[-2, 2]], [10, 1], [nl.trans.TanSig(), nl.trans.PureLin()]) # Set initialized functions and init net.layers[0].initf = nl.init.InitRand([-0.1, 0.1], 'wb') net.layers[1].initf = nl.init.InitRand([-0.1, 0.1], 'wb') net.init() # Train network error = net.train(input, target, epochs=100, show=0, goal=0.1) self.assertLess(error[-1], error[0]) self.assertLess(error[-1], 0.5) def test_newhem(self): target = [[-1, 1, -1, -1, 1, -1, -1, 1, -1], [1, 1, 1, 1, -1, 1, 1, -1, 1], [1, -1, 1, 1, 1, 1, 1, -1, 1], [1, 1, 1, 1, -1, -1, 1, -1, -1], [-1, -1, -1, -1, 1, -1, -1, -1, -1]] input = [[-1, -1, 1, 1, 1, 1, 1, -1, 1], [-1, -1, 1, -1, 1, -1, -1, -1, -1], [-1, -1, -1, -1, 1, -1, -1, 1, -1]] # Create and train network net = nl.net.newhem(target) output = net.sim(target) # Test on train samples (must be [0, 1, 2, 3, 4])" self.assertEqual(np.argmax(output, axis=0).tolist(), [0, 1, 2, 3, 4])
blagasz/python-ann
neurolab/test/net.py
Python
gpl-2.0
4,705
[ "NEURON" ]
c2a6647ef4b21089e81f8a058af5834394f6089626d3bc22acf03d1996072934
#!/usr/bin/env python import re,sys def get_mRNA(fold_data,speed_scores): rna_seq = [] for datum in fold_data: #get rna for segment that was crystalised seg_start = int(datum.rna_aligned_start) - (int(datum.protein_aligned_start)-1)*3 - 1 seg_end = seg_start + len(datum.protein_sequence)*3 rna = datum.rna_sequence[seg_start:seg_end] for i,amino_acid in enumerate(datum.protein_sequence): #remove codons which have non-standard nucleotides if not rna[i*3:(i+1)*3] in speed_scores[datum.organism].keys(): rna = rna[:i*3]+'---'+rna[(i+1)*3:] #extract fold segments for i,pfold in enumerate(datum.protein_domain_fold): if pfold == fold: seg = datum.protein_domain_residue[i].split(':')[0] # only use first part of fold res_seg = map(int,re.findall('\d+', seg)) if seg[0] == '-': #begins in a negative region res_seg[0] *= -1 rna_start = (res_seg[0]-int(datum.protein_start))*3 rna_end = (res_seg[1]-int(datum.protein_start) + 1 )*3 rna_seq.append(rna[rna_start:rna_end]) return rna_seq def align_mRNA(protein_seq,rna_seq): aligned_rna = [] for i,seq in enumerate(protein_seq): count = 0 alignment = '' for j,aa in enumerate(seq): #skip codons of amino acids not present in crystal structure if aa == '-': continue #iterate until you find the next amino acid in alignment while alignments[i][count] == '-': alignment += '---' count += 1 #sanity check if alignments[i][count] != aa: print "Error: Mismatch when assigning RNA alignment" print fnames[i] print sum([1 for a in alignments[i] if a != '-']) sys.exit() alignment += rna_seq[i][j*3:(j+1)*3] count += 1 alignment += (len(alignments[0])-len(alignment)/3) * '---' aligned_rna.append(alignment) return aligned_rna
Alicimo/codon_optimality_code
working_directory/mRNA.py
Python
gpl-2.0
1,821
[ "CRYSTAL" ]
992f68f5f36be47ad7ce48d60932548175c479368f16293b27aa1e392156ee0c
#!/usr/bin/env python """ Script that facilitates the modification of a element through the command line. However, the usage of this script will set the element token to the command issuer with a duration of 1 day. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = '$Id$' from datetime import datetime, timedelta from DIRAC import gLogger, exit as DIRACExit, S_OK, version from DIRAC.Core.Base import Script from DIRAC.Core.Utilities.DIRACScript import DIRACScript from DIRAC.Core.Security.ProxyInfo import getProxyInfo from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient from DIRAC.ResourceStatusSystem.PolicySystem import StateMachine from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations subLogger = None def registerSwitches(): ''' Registers all switches that can be used while calling the script from the command line interface. ''' switches = ( ('element=', 'Element family to be Synchronized ( Site, Resource or Node )'), ('name=', 'Name (or comma-separeted list of names) of the element where the change applies'), ('statusType=', 'StatusType (or comma-separeted list of names), if none applies to all possible statusTypes'), ('status=', 'Status to be changed'), ('reason=', 'Reason to set the Status'), ('VO=', 'VO to change a status for. Default: "all" ' 'VO=all sets the status for all VOs not explicitly listed in the RSS'), ) for switch in switches: Script.registerSwitch('', switch[0], switch[1]) def registerUsageMessage(): ''' Takes the script __doc__ and adds the DIRAC version to it ''' usageMessage = ' DIRAC %s\n' % version usageMessage += __doc__ Script.setUsageMessage(usageMessage) def parseSwitches(): ''' Parses the arguments passed by the user ''' Script.parseCommandLine(ignoreErrors=True) args = Script.getPositionalArgs() if args: subLogger.error("Found the following positional args '%s', but we only accept switches" % args) subLogger.error("Please, check documentation below") Script.showHelp(exitCode=1) switches = dict(Script.getUnprocessedSwitches()) switches.setdefault('statusType', None) switches.setdefault('VO', 'all') for key in ('element', 'name', 'status', 'reason'): if key not in switches: subLogger.error("%s Switch missing" % key) subLogger.error("Please, check documentation below") Script.showHelp(exitCode=1) if not switches['element'] in ('Site', 'Resource', 'Node'): subLogger.error("Found %s as element switch" % switches['element']) subLogger.error("Please, check documentation below") Script.showHelp(exitCode=1) statuses = StateMachine.RSSMachine(None).getStates() if not switches['status'] in statuses: subLogger.error("Found %s as element switch" % switches['element']) subLogger.error("Please, check documentation below") Script.showHelp(exitCode=1) subLogger.debug("The switches used are:") map(subLogger.debug, switches.items()) return switches def checkStatusTypes(statusTypes): ''' To check if values for 'statusType' are valid ''' opsH = Operations().getValue('ResourceStatus/Config/StatusTypes/StorageElement') acceptableStatusTypes = opsH.replace(',', '').split() for statusType in statusTypes: if statusType not in acceptableStatusTypes and statusType != 'all': acceptableStatusTypes.append('all') subLogger.error("'%s' is a wrong value for switch 'statusType'.\n\tThe acceptable values are:\n\t%s" % (statusType, str(acceptableStatusTypes))) if 'all' in statusType: return acceptableStatusTypes return statusTypes def unpack(switchDict): ''' To split and process comma-separated list of values for 'name' and 'statusType' ''' switchDictSet = [] names = [] statusTypes = [] if switchDict['name'] is not None: names = list(filter(None, switchDict['name'].split(','))) if switchDict['statusType'] is not None: statusTypes = list(filter(None, switchDict['statusType'].split(','))) statusTypes = checkStatusTypes(statusTypes) if len(names) > 0 and len(statusTypes) > 0: combinations = [(a, b) for a in names for b in statusTypes] for combination in combinations: n, s = combination switchDictClone = switchDict.copy() switchDictClone['name'] = n switchDictClone['statusType'] = s switchDictSet.append(switchDictClone) elif len(names) > 0 and len(statusTypes) == 0: for name in names: switchDictClone = switchDict.copy() switchDictClone['name'] = name switchDictSet.append(switchDictClone) elif len(names) == 0 and len(statusTypes) > 0: for statusType in statusTypes: switchDictClone = switchDict.copy() switchDictClone['statusType'] = statusType switchDictSet.append(switchDictClone) elif len(names) == 0 and len(statusTypes) == 0: switchDictClone = switchDict.copy() switchDictClone['name'] = None switchDictClone['statusType'] = None switchDictSet.append(switchDictClone) return switchDictSet def getTokenOwner(): ''' Function that gets the userName from the proxy ''' proxyInfo = getProxyInfo() if not proxyInfo['OK']: return proxyInfo userName = proxyInfo['Value']['username'] return S_OK(userName) def setStatus(switchDict, tokenOwner): ''' Function that gets the user token, sets the validity for it. Gets the elements in the database for a given name and statusType(s). Then updates the status of all them adding a reason and the token. ''' rssClient = ResourceStatusClient.ResourceStatusClient() elements = rssClient.selectStatusElement(switchDict['element'], 'Status', name=switchDict['name'], statusType=switchDict['statusType'], vO=switchDict['VO'], meta={'columns': ['Status', 'StatusType']}) if not elements['OK']: return elements elements = elements['Value'] if not elements: subLogger.warn('Nothing found for %s, %s, %s %s' % (switchDict['element'], switchDict['name'], switchDict['VO'], switchDict['statusType'])) return S_OK() tomorrow = datetime.utcnow().replace(microsecond=0) + timedelta(days=1) for status, statusType in elements: subLogger.debug('%s %s' % (status, statusType)) if switchDict['status'] == status: subLogger.notice('Status for %s (%s) is already %s. Ignoring..' % (switchDict['name'], statusType, status)) continue subLogger.debug('About to set status %s -> %s for %s, statusType: %s, VO: %s, reason: %s' % (status, switchDict['status'], switchDict['name'], statusType, switchDict['VO'], switchDict['reason'])) result = rssClient.modifyStatusElement(switchDict['element'], 'Status', name=switchDict['name'], statusType=statusType, status=switchDict['status'], reason=switchDict['reason'], vO=switchDict['VO'], tokenOwner=tokenOwner, tokenExpiration=tomorrow) if not result['OK']: return result return S_OK() def run(switchDict): ''' Main function of the script ''' tokenOwner = getTokenOwner() if not tokenOwner['OK']: subLogger.error(tokenOwner['Message']) DIRACExit(1) tokenOwner = tokenOwner['Value'] subLogger.notice('TokenOwner is %s' % tokenOwner) result = setStatus(switchDict, tokenOwner) if not result['OK']: subLogger.error(result['Message']) DIRACExit(1) @DIRACScript() def main(): global subLogger global registerUsageMessage subLogger = gLogger.getSubLogger(__file__) # Script initialization registerSwitches() registerUsageMessage() switchDict = parseSwitches() switchDictSets = unpack(switchDict) # Run script for switchDict in switchDictSets: run(switchDict) # Bye DIRACExit(0) if __name__ == "__main__": main()
yujikato/DIRAC
src/DIRAC/ResourceStatusSystem/scripts/dirac_rss_set_status.py
Python
gpl-3.0
8,531
[ "DIRAC" ]
ddf6632f963f6f2a831db264ca3abee2658588378ef8348b84cefc80d8bd2c9d
""" DataFrame --------- An efficient 2D container for potentially mixed-type time series or other labeled data series. Similar to its R counterpart, data.frame, except providing automatic data alignment and a host of useful data manipulation methods having to do with the labeling information """ from __future__ import annotations import collections from collections import abc import datetime import functools from io import StringIO import itertools import mmap from textwrap import dedent from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Hashable, Iterable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np import numpy.ma as ma from pandas._config import get_option from pandas._libs import ( algos as libalgos, lib, properties, ) from pandas._libs.hashtable import duplicated from pandas._libs.lib import no_default from pandas._typing import ( AggFuncType, AnyArrayLike, ArrayLike, Axes, Axis, ColspaceArgType, CompressionOptions, Dtype, FilePathOrBuffer, FillnaOptions, FloatFormatType, FormattersType, Frequency, IndexKeyFunc, IndexLabel, Level, PythonFuncType, Renamer, Scalar, StorageOptions, Suffixes, ValueKeyFunc, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import ( Appender, Substitution, deprecate_kwarg, deprecate_nonkeyword_arguments, doc, rewrite_axis_style_signature, ) from pandas.util._validators import ( validate_axis_style_args, validate_bool_kwarg, validate_percentile, ) from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, construct_2d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, maybe_box_native, maybe_downcast_to_dtype, validate_numeric_casting, ) from pandas.core.dtypes.common import ( ensure_platform_int, infer_dtype_from_object, is_1d_only_ea_dtype, is_1d_only_ea_obj, is_bool_dtype, is_dataclass, is_datetime64_any_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_object_dtype, is_scalar, is_sequence, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, common as com, generic, nanops, ops, ) from pandas.core.accessor import CachedAccessor from pandas.core.aggregation import ( reconstruct_func, relabel_result, ) from pandas.core.array_algos.take import take_2d_multi from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ( DatetimeArray, ExtensionArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( extract_array, sanitize_array, sanitize_masked_array, ) from pandas.core.generic import ( NDFrame, _shared_docs, ) from pandas.core.indexers import check_key_length from pandas.core.indexes import base as ibase from pandas.core.indexes.api import ( DatetimeIndex, Index, PeriodIndex, ensure_index, ensure_index_from_sequences, ) from pandas.core.indexes.multi import ( MultiIndex, maybe_droplevels, ) from pandas.core.indexing import ( check_bool_indexer, convert_to_index_sliceable, ) from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.core.internals.construction import ( arrays_to_mgr, dataclasses_to_dicts, dict_to_mgr, mgr_to_mgr, ndarray_to_mgr, nested_data_to_arrays, rec_array_to_mgr, reorder_arrays, to_arrays, treat_as_nested, ) from pandas.core.reshape.melt import melt from pandas.core.series import Series from pandas.core.sorting import ( get_group_index, lexsort_indexer, nargsort, ) from pandas.io.common import get_handle from pandas.io.formats import ( console, format as fmt, ) from pandas.io.formats.info import ( BaseInfo, DataFrameInfo, ) import pandas.plotting if TYPE_CHECKING: from pandas._typing import ( TimedeltaConvertibleTypes, TimestampConvertibleTypes, ) from pandas.core.groupby.generic import DataFrameGroupBy from pandas.core.resample import Resampler from pandas.io.formats.style import Styler # --------------------------------------------------------------------- # Docstring templates _shared_doc_kwargs = { "axes": "index, columns", "klass": "DataFrame", "axes_single_arg": "{0 or 'index', 1 or 'columns'}", "axis": """axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index': apply function to each column. If 1 or 'columns': apply function to each row.""", "inplace": """ inplace : bool, default False If True, performs operation inplace and returns None.""", "optional_by": """ by : str or list of str Name or list of names to sort by. - if `axis` is 0 or `'index'` then `by` may contain index levels and/or column labels. - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels.""", "optional_labels": """labels : array-like, optional New labels / index to conform the axis specified by 'axis' to.""", "optional_axis": """axis : int or str, optional Axis to target. Can be either the axis name ('index', 'columns') or number (0, 1).""", "replace_iloc": """ This differs from updating with ``.loc`` or ``.iloc``, which require you to specify a location to update with some value.""", } _numeric_only_doc = """numeric_only : bool or None, default None Include only float, int, boolean data. If None, will attempt to use everything, then use only numeric data """ _merge_doc = """ Merge DataFrame or named Series objects with a database-style join. A named Series object is treated as a DataFrame with a single named column. The join is done on columns or indexes. If joining columns on columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes on indexes or indexes on a column or columns, the index will be passed on. When performing a cross merge, no column specifications to merge on are allowed. Parameters ----------%s right : DataFrame or named Series Object to merge with. how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner' Type of merge to be performed. * left: use only keys from left frame, similar to a SQL left outer join; preserve key order. * right: use only keys from right frame, similar to a SQL right outer join; preserve key order. * outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. * inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. * cross: creates the cartesian product from both frames, preserves the order of the left keys. .. versionadded:: 1.2.0 on : label or list Column or index level names to join on. These must be found in both DataFrames. If `on` is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on : label or list, or array-like Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on : label or list, or array-like Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index : bool, default False Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index : bool, default False Use the index from the right DataFrame as the join key. Same caveats as left_index. sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead of a string to indicate that the column name from `left` or `right` should be left as-is, with no suffix. At least one of the values must not be None. copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False If True, adds a column to the output DataFrame called "_merge" with information on the source of each row. The column can be given a different name by providing a string argument. The column will have a Categorical type with the value of "left_only" for observations whose merge key only appears in the left DataFrame, "right_only" for observations whose merge key only appears in the right DataFrame, and "both" if the observation's merge key is found in both DataFrames. validate : str, optional If specified, checks if merge is of specified type. * "one_to_one" or "1:1": check if merge keys are unique in both left and right datasets. * "one_to_many" or "1:m": check if merge keys are unique in left dataset. * "many_to_one" or "m:1": check if merge keys are unique in right dataset. * "many_to_many" or "m:m": allowed, but does not result in checks. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- merge_ordered : Merge with optional filling/interpolation. merge_asof : Merge on nearest keys. DataFrame.join : Similar method using indices. Notes ----- Support for specifying index levels as the `on`, `left_on`, and `right_on` parameters was added in version 0.23.0 Support for merging named Series objects was added in version 0.24.0 Examples -------- >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}) >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> df1.merge(df2, left_on='lkey', right_on='rkey') lkey value_x rkey value_y 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2 with specified left and right suffixes appended to any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', ... suffixes=('_left', '_right')) lkey value_left rkey value_right 0 foo 1 foo 5 1 foo 1 foo 8 2 foo 5 foo 5 3 foo 5 foo 8 4 bar 2 bar 6 5 baz 3 baz 7 Merge DataFrames df1 and df2, but raise an exception if the DataFrames have any overlapping columns. >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False)) Traceback (most recent call last): ... ValueError: columns overlap but no suffix specified: Index(['value'], dtype='object') >>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]}) >>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]}) >>> df1 a b 0 foo 1 1 bar 2 >>> df2 a c 0 foo 3 1 baz 4 >>> df1.merge(df2, how='inner', on='a') a b c 0 foo 1 3 >>> df1.merge(df2, how='left', on='a') a b c 0 foo 1 3.0 1 bar 2 NaN >>> df1 = pd.DataFrame({'left': ['foo', 'bar']}) >>> df2 = pd.DataFrame({'right': [7, 8]}) >>> df1 left 0 foo 1 bar >>> df2 right 0 7 1 8 >>> df1.merge(df2, how='cross') left right 0 foo 7 1 foo 8 2 bar 7 3 bar 8 """ # ----------------------------------------------------------------------- # DataFrame class class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. .. versionchanged:: 0.25.0 If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager @property def _constructor(self) -> type[DataFrame]: return DataFrame _constructor_sliced: type[Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ): if copy is None: if isinstance(data, dict) or data is None: # retain pre-GH#38939 default behavior copy = True else: copy = False if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if isinstance(data, (BlockManager, ArrayManager)): # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) warnings.warn( "Support for MaskedRecords is deprecated and will be " "removed in a future version. Pass " "{name: data[name] for name in data.dtype.names} instead.", FutureWarning, stacklevel=2, ) # a masked array else: data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, (abc.Sequence, ExtensionArray)): data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if treat_as_nested(data): if columns is not None: # error: Argument 1 to "ensure_index" has incompatible type # "Collection[Any]"; expected "Union[Union[Union[ExtensionArray, # ndarray], Index, Series], Sequence[Any]]" columns = ensure_index(columns) # type: ignore[arg-type] arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, columns, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns, dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; # expected "Union[Union[Union[ExtensionArray, ndarray], # Index, Series], Sequence[Any]]" index = ensure_index(index) # type: ignore[arg-type] # Argument 1 to "ensure_index" has incompatible type "Collection[Any]"; # expected "Union[Union[Union[ExtensionArray, ndarray], # Index, Series], Sequence[Any]]" columns = ensure_index(columns) # type: ignore[arg-type] if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr( values, columns, index, columns, dtype=None, typ=manager ) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- @property def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @property def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) @property def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type @property def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) # error: Return type "Union[ndarray, DatetimeArray, TimedeltaArray]" of # "_values" incompatible with return type "ndarray" in supertype "NDFrame" @property def _values( # type: ignore[override] self, ) -> np.ndarray | DatetimeArray | TimedeltaArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ self._consolidate_inplace() mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_obj(mgr.arrays[0]): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return self.values blocks = mgr.blocks if len(blocks) != 1: return self.values arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ buf = StringIO("") if self._info_repr(): self.info(buf=buf) return buf.getvalue() max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") max_colwidth = get_option("display.max_colwidth") show_dimensions = get_option("display.show_dimensions") if get_option("display.expand_frame_repr"): width, _ = console.get_console_size() else: width = None self.to_string( buf=buf, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, line_width=width, max_colwidth=max_colwidth, show_dimensions=show_dimensions, ) return buf.getvalue() def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return "<pre>" + val + "</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None @Substitution( header_type="bool or sequence", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column", ) @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string( self, buf: FilePathOrBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, min_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. .. versionadded:: 1.0.0 encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- @property def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ @Appender(_shared_docs["items"]) def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) @Appender(_shared_docs["items"]) def iteritems(self) -> Iterable[tuple[Hashable, Series]]: yield from self.items() def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k) yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. On python versions < 3.7 regular tuples are returned for DataFrames with a large number of columns (>254). Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) @overload def dot(self, other: Series) -> Series: ... @overload def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame | Series) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns ) elif isinstance(other, Series): return self._constructor_sliced(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return self._constructor_sliced(result, index=left.index) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") @overload def __matmul__(self, other: Series) -> Series: ... @overload def __matmul__( self, other: AnyArrayLike | DataFrame | Series ) -> DataFrame | Series: ... def __matmul__( self, other: AnyArrayLike | DataFrame | Series ) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) @classmethod def from_dict( cls, data, orient: str = "columns", dtype: Dtype | None = None, columns=None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == "columns": if columns is not None: raise ValueError("cannot use columns parameter with orient='columns'") else: # pragma: no cover raise ValueError("only recognize index or columns for orient") return cls(data, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value=lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ self._consolidate_inplace() result = self._mgr.as_array( transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value ) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def to_dict(self, orient: str = "dict", into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn( "DataFrame columns are not unique, some columns will be omitted.", UserWarning, stacklevel=2, ) # GH16122 into_c = com.standardize_mapping(into) orient = orient.lower() # GH32515 if orient.startswith(("d", "l", "s", "r", "i")) and orient not in { "dict", "list", "series", "split", "records", "index", }: warnings.warn( "Using short name for 'orient' is deprecated. Only the " "options: ('dict', list, 'series', 'split', 'records', 'index') " "will be used in a future version. Use one of the above " "to silence this warning.", FutureWarning, stacklevel=2, ) if orient.startswith("d"): orient = "dict" elif orient.startswith("l"): orient = "list" elif orient.startswith("sp"): orient = "split" elif orient.startswith("s"): orient = "series" elif orient.startswith("r"): orient = "records" elif orient.startswith("i"): orient = "index" if orient == "dict": return into_c((k, v.to_dict(into)) for k, v in self.items()) elif orient == "list": return into_c((k, v.tolist()) for k, v in self.items()) elif orient == "split": return into_c( ( ("index", self.index.tolist()), ("columns", self.columns.tolist()), ( "data", [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ], ), ) ) elif orient == "series": return into_c((k, v) for k, v in self.items()) elif orient == "records": columns = self.columns.tolist() rows = ( dict(zip(columns, row)) for row in self.itertuples(index=False, name=None) ) return [ into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows ] elif orient == "index": if not self.index.is_unique: raise ValueError("DataFrame index must be unique for orient='index'.") return into_c( (t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None) ) else: raise ValueError(f"orient '{orient}' not understood") def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = False, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) @classmethod def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns if exclude is None: exclude = set() else: exclude = set(exclude) result_index = None if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns, typ=manager) return cls(mgr) def to_records( self, index=True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = list(map(np.array, zip(*self.index._values))) else: # error: List item 0 has incompatible type "ArrayLike"; expected # "ndarray" ix_vals = [self.index.values] # type: ignore[list-item] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): count = 0 for i, n in enumerate(index_names): if n is None: index_names[i] = f"level_{count}" count += 1 elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) @classmethod def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) mgr = arrays_to_mgr( arrays, columns, index, columns, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_stata( self, path: FilePathOrBuffer, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. .. versionchanged:: 1.0.0 Previously this was "fname" convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. .. versionchanged:: 1.0.0 Added support for formats 118 and 119. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. compression : str or dict, default 'infer' For on-the-fly compression of the output dta. If string, specifies compression mode. If dict, value at key 'method' specifies compression mode. Compression mode must be one of {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and `fname` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). If dict and compression mode is one of {{'zip', 'gzip', 'bz2'}}, or inferred as one of the above, other entries passed as additional compression options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # mypy: Name 'statawriter' already defined (possibly by an import) from pandas.io.stata import ( # type: ignore[no-redef] StataWriter117 as statawriter, ) else: # versions 118 and 119 # mypy: Name 'statawriter' already defined (possibly by an import) from pandas.io.stata import ( # type: ignore[no-redef] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version # mypy: Too many arguments for "StataWriter" writer = statawriter( # type: ignore[call-arg] path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, **kwargs, ) writer.write_file() @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str or file-like object If a string, it will be used as Root Directory path. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) @doc( Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: IO[str] | str | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: warnings.warn( "'showindex' is deprecated. Only 'index' will be used " "in a future version. Use 'index' to silence this warning.", FutureWarning, stacklevel=2, ) kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: assert not isinstance(handles.handle, (str, mmap.mmap)) handles.handle.writelines(result) return None @doc(storage_options=generic._shared_docs["storage_options"]) @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, path: FilePathOrBuffer | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str or file-like object, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function) or io.BytesIO. The engine fastparquet does not accept file-like objects. If path is None, a bytes object is returned. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) @Substitution( header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.\n\n" " .. versionadded:: 0.25.0\n" " Ability to use str", ) @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html( self, buf: FilePathOrBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ): """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) @doc(storage_options=generic._shared_docs["storage_options"]) def to_xml( self, path_or_buffer: FilePathOrBuffer | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: str | list[str] | None = None, elem_cols: str | list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePathOrBuffer | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object or file-like object, optional File to write output to. If None, the output is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buffer is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- @Substitution( klass="DataFrame", type_sub=" and columns", max_cols_sub=dedent( """\ max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. By default, the setting in ``pandas.options.display.max_info_columns`` is used.""" ), show_counts_sub=dedent( """\ show_counts : bool, optional Whether to show the non-null counts. By default, this is shown only if the DataFrame is smaller than ``pandas.options.display.max_info_rows`` and ``pandas.options.display.max_info_columns``. A value of True always shows the counts, and False never shows the counts. null_counts : bool, optional .. deprecated:: 1.2.0 Use show_counts instead.""" ), examples_sub=dedent( """\ >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values, ... "float_col": float_values}) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) <class 'pandas.core.frame.DataFrame'> RangeIndex: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open("df_info.txt", "w", ... encoding="utf-8") as f: # doctest: +SKIP ... f.write(s) 260 The `memory_usage` parameter allows deep introspection mode, specially useful for big DataFrames and fine-tune memory optimization: >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6) >>> df = pd.DataFrame({ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6), ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6) ... }) >>> df.info() <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 column_1 1000000 non-null object 1 column_2 1000000 non-null object 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB >>> df.info(memory_usage='deep') <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 column_1 1000000 non-null object 1 column_2 1000000 non-null object 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 165.9 MB""" ), see_also_sub=dedent( """\ DataFrame.describe: Generate descriptive statistics of DataFrame columns. DataFrame.memory_usage: Memory usage of DataFrame columns.""" ), version_added_sub="", ) @doc(BaseInfo.render) def info( self, verbose: bool | None = None, buf: IO[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, null_counts: bool | None = None, ) -> None: if null_counts is not None: if show_counts is not None: raise ValueError("null_counts used with show_counts. Use show_counts.") warnings.warn( "null_counts is deprecated. Use show_counts instead", FutureWarning, stacklevel=2, ) show_counts = null_counts info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, ) if index: result = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ).append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy: new_vals = new_vals.copy() result = self._constructor(new_vals, index=self.columns, columns=self.index) elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy: new_arr = new_arr.copy() result = self._constructor(new_arr, index=self.columns, columns=self.index) return result.__finalize__(self, method="transpose") @property def T(self) -> DataFrame: return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: int = 0): """ Parameters ---------- i : int axis : int Notes ----- If slice passed, the resulting data will be a view. """ # irow if axis == 0: new_values = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_values, np.ndarray) and new_values.base is None result = self._constructor_sliced( new_values, index=self.columns, name=self.index[i], dtype=new_values.dtype, ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] values = self._mgr.iget(i) result = self._box_col_values(values, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). """ for i in range(len(self.columns)): yield self._get_column_array(i) def __getitem__(self, key): key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key): # shortcut if the key is in columns if self.columns.is_unique and key in self.columns: if isinstance(self.columns, MultiIndex): return self._getitem_multilevel(key) return self._get_item_cache(key) # Do we have a slicer (on rows)? indexer = convert_to_index_sliceable(self, key) if indexer is not None: if isinstance(indexer, np.ndarray): indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) # either we have a slice or we have a string that can be converted # to a slice for partial-string date indexing return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.loc._get_listlike_indexer(key, axis=1)[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=3, ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self.values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns ) result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def __setitem__(self, key, value): key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) if indexer is not None: # either we have a slice or we have a string that can be converted # to a slice for partial-string date indexing return self._setitem_slice(indexer, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif is_list_like(value) and 1 < len( self.columns.get_indexer_for([key]) ) == len(value): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value): # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict()) if key.size and not is_bool_dtype(key.values): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols = maybe_droplevels(cols, key) if len(cols) and not cols.equals(value.columns): value = value.reindex(cols, axis=1) # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) def _iset_item_mgr(self, loc: int | slice | np.ndarray, value) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: series = self._ixs(col, axis=1) series._set_value(index, value, takeable=True) return series = self._get_item_cache(col) loc = self.index.get_loc(index) validate_numeric_casting(series.dtype, value) series._values[loc] = value # Note: trying to use series._set_value breaks tests in # tests.frame.indexing.test_indexing and tests.indexing.test_partial except (KeyError, TypeError): # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced return klass(values, index=self.index, name=name, fastpath=True) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) values = self._mgr.iget(loc) res = self._box_col_values(values, loc).__finalize__(self) cache[item] = res res._set_as_cached(item, self) # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values self._mgr.iset(loc, arraylike) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, inplace: bool = False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. .. versionadded:: 0.25.0 Backtick quoting introduced. .. versionadded:: 1.0.0 Expanding functionality of backtick quoting for more than only spaces. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, inplace: bool = False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") resolvers = kwargs.pop("resolvers", None) kwargs["level"] = kwargs.pop("level", 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) # error: Argument 1 to "append" of "list" has incompatible type # "Type[signedinteger[Any]]"; expected "Type[signedinteger[Any]]" converted_dtypes.append(np.int64) # type: ignore[arg-type] else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected # "Type[signedinteger[Any]]" converted_dtypes.append( infer_dtype_from_object(dtype) # type: ignore[arg-type] ) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") # We raise when both include and exclude are empty # Hence, we can just shrink the columns we want to keep keep_these = np.full(self.shape[1], True) def extract_unique_dtypes_from_dtypes_set( dtypes_set: frozenset[Dtype], unique_dtypes: np.ndarray ) -> list[Dtype]: extracted_dtypes = [ unique_dtype for unique_dtype in unique_dtypes if ( issubclass( # error: Argument 1 to "tuple" has incompatible type # "FrozenSet[Union[ExtensionDtype, Union[str, Any], Type[str], # Type[float], Type[int], Type[complex], Type[bool], # Type[object]]]"; expected "Iterable[Union[type, Tuple[Any, # ...]]]" unique_dtype.type, tuple(dtypes_set), # type: ignore[arg-type] ) or ( np.number in dtypes_set and getattr(unique_dtype, "_is_numeric", False) ) ) ] return extracted_dtypes unique_dtypes = self.dtypes.unique() if include: included_dtypes = extract_unique_dtypes_from_dtypes_set( include, unique_dtypes ) keep_these &= self.dtypes.isin(included_dtypes) if exclude: excluded_dtypes = extract_unique_dtypes_from_dtypes_set( exclude, unique_dtypes ) keep_these &= ~self.dtypes.isin(excluded_dtypes) # error: "ndarray" has no attribute "values" return self.iloc[:, keep_these.values] # type: ignore[attr-defined] def insert(self, loc, column, value, allow_duplicates: bool = False) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : int, Series, or array-like allow_duplicates : bool, optional See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We should never get here with DataFrame value if isinstance(value, Series): return _reindex_for_setitem(value, self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) @property def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } def lookup( self, row_labels: Sequence[IndexLabel], col_labels: Sequence[IndexLabel] ) -> np.ndarray: """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. .. deprecated:: 1.2.0 DataFrame.lookup is deprecated, use DataFrame.melt and DataFrame.loc instead. For further details see :ref:`Looking up values by index/column labels <indexing.lookup>`. Parameters ---------- row_labels : sequence The row labels to use for lookup. col_labels : sequence The column labels to use for lookup. Returns ------- numpy.ndarray The found values. """ msg = ( "The 'lookup' method is deprecated and will be" "removed in a future version." "You can use DataFrame.melt and DataFrame.loc" "as a substitute." ) warnings.warn(msg, FutureWarning, stacklevel=2) n = len(row_labels) if n != len(col_labels): raise ValueError("Row labels must have same size as column labels") if not (self.index.is_unique and self.columns.is_unique): # GH#33041 raise ValueError("DataFrame.lookup requires unique index and columns") thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError("One or more row labels was not found") if (cidx == -1).any(): raise KeyError("One or more column labels was not found") flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype="O") for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer # error: Argument 2 to "take_2d_multi" has incompatible type "Tuple[Any, # Any]"; expected "ndarray" new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) @doc(NDFrame.align, **_shared_doc_kwargs) def align( self, other, join: str = "outer", axis: Axis | None = None, level: Level | None = None, copy: bool = True, fill_value=None, method: str | None = None, limit=None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) @overload def set_axis( self, labels, axis: Axis = ..., inplace: Literal[False] = ... ) -> DataFrame: ... @overload def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... @overload def set_axis(self, labels, *, inplace: Literal[True]) -> None: ... @overload def set_axis( self, labels, axis: Axis = ..., inplace: bool = ... ) -> DataFrame | None: ... @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"]) @Appender( """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 Now, update the labels inplace. >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True) >>> df i ii 0 1 4 1 2 5 2 3 6 """ ) @Substitution( **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) @Appender(NDFrame.set_axis.__doc__) def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): return super().set_axis(labels, axis=axis, inplace=inplace) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.reindex.__doc__) @rewrite_axis_style_signature( "labels", [ ("method", None), ("copy", True), ("level", None), ("fill_value", np.nan), ("limit", None), ("tolerance", None), ], ) def reindex(self, *args, **kwargs) -> DataFrame: axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex") kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop("axis", None) kwargs.pop("labels", None) return super().reindex(**kwargs) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"]) def drop( self, labels=None, axis: Axis = 0, index=None, columns=None, level: Level | None = None, inplace: bool = False, errors: str = "raise", ): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. See the `user guide <advanced.shown_levels>` for more information about the now unused levels. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If False, return a copy. Otherwise, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame or None DataFrame without the removed index or column labels or None if ``inplace=True``. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop( labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors, ) @rewrite_axis_style_signature( "mapper", [("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")], ) def rename( self, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: str = "ignore", ) -> DataFrame | None: """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or function transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame or None DataFrame with the renamed axis labels or None if ``inplace=True``. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Rename columns using a mapping: >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 Rename index using a mapping: >>> df.rename(index={0: "x", 1: "y", 2: "z"}) A B x 1 4 y 2 5 z 3 6 Cast index labels to a different type: >>> df.index RangeIndex(start=0, stop=3, step=1) >>> df.rename(index=str).index Index(['0', '1', '2'], dtype='object') >>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters: >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ return super().rename( mapper=mapper, index=index, columns=columns, axis=axis, copy=copy, inplace=inplace, level=level, errors=errors, ) @overload def fillna( self, value=..., method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit=..., downcast=..., ) -> DataFrame: ... @overload def fillna( self, value, method: FillnaOptions | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, *, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, value, *, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, *, method: FillnaOptions | None, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, *, method: FillnaOptions | None, axis: Axis | None, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, value, *, axis: Axis | None, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, value, method: FillnaOptions | None, *, inplace: Literal[True], limit=..., downcast=..., ) -> None: ... @overload def fillna( self, value=..., method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool = ..., limit=..., downcast=..., ) -> DataFrame | None: ... @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"]) @doc(NDFrame.fillna, **_shared_doc_kwargs) def fillna( self, value: object | ArrayLike | None = None, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool = False, limit=None, downcast=None, ) -> DataFrame | None: return super().fillna( value=value, method=method, axis=axis, inplace=inplace, limit=limit, downcast=downcast, ) def pop(self, item: Hashable) -> Series: """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : label Label of column to be popped. Returns ------- Series Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN """ return super().pop(item=item) @doc(NDFrame.replace, **_shared_doc_kwargs) def replace( self, to_replace=None, value=None, inplace: bool = False, limit=None, regex: bool = False, method: str = "pad", ): return super().replace( to_replace=to_replace, value=value, inplace=inplace, limit=limit, regex=regex, method=method, ) def _replace_columnwise( self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex ): """ Dispatch to Series.replace column-wise. Parameters ---------- mapping : dict of the form {col: (target, value)} inplace : bool regex : bool or same types as `to_replace` in DataFrame.replace Returns ------- DataFrame or None """ # Operate column-wise res = self if inplace else self.copy() ax = self.columns for i in range(len(ax)): if ax[i] in mapping: ser = self.iloc[:, i] target, value = mapping[ax[i]] newobj = ser.replace(target, value, regex=regex) res.iloc[:, i] = newobj if inplace: return return res.__finalize__(self) @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) def shift( self, periods=1, freq: Frequency | None = None, axis: Axis = 0, fill_value=lib.no_default, ) -> DataFrame: axis = self._get_axis_number(axis) ncols = len(self.columns) if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0: # We will infer fill_value to match the closest column # Use a column that we know is valid for our column's dtype GH#38434 label = self.columns[0] if periods > 0: result = self.iloc[:, :-periods] for col in range(min(ncols, abs(periods))): # TODO(EA2D): doing this in a loop unnecessary with 2D EAs # Define filler inside loop so we get a copy filler = self.iloc[:, 0].shift(len(self)) result.insert(0, label, filler, allow_duplicates=True) else: result = self.iloc[:, -periods:] for col in range(min(ncols, abs(periods))): # Define filler inside loop so we get a copy filler = self.iloc[:, -1].shift(len(self)) result.insert( len(result.columns), label, filler, allow_duplicates=True ) result.columns = self.columns.copy() return result return super().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value ) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "keys"]) def set_index( self, keys, drop: bool = True, append: bool = False, inplace: bool = False, verify_integrity: bool = False, ): """ Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`~collections.abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False If True, modifies the DataFrame in place (do not create a new object). verify_integrity : bool, default False Check the new index for duplicates. Otherwise defer the check until necessary. Setting to False will improve the performance of this method. Returns ------- DataFrame or None Changed row labels or None if ``inplace=True``. See Also -------- DataFrame.reset_index : Opposite of set_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 Create a MultiIndex using an Index and a column: >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 Create a MultiIndex using two Series: >>> s = pd.Series([1, 2, 3, 4]) >>> df.set_index([s, s**2]) month year sale 1 1 1 2012 55 2 4 4 2014 40 3 9 7 2013 84 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, "inplace") self._check_inplace_and_allows_duplicate_labels(inplace) if not isinstance(keys, list): keys = [keys] err_msg = ( 'The parameter "keys" may be a column key, one-dimensional ' "array, or a list containing only valid column keys and " "one-dimensional arrays." ) missing: list[Hashable] = [] for col in keys: if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, "ndim", 1) != 1: raise ValueError(err_msg) else: # everything else gets tried as a key; see GH 24969 try: found = col in self.columns except TypeError as err: raise TypeError( f"{err_msg}. Received column of type {type(col)}" ) from err else: if not found: missing.append(col) if missing: raise KeyError(f"None of {missing} are in the columns") if inplace: frame = self else: frame = self.copy() arrays = [] names: list[Hashable] = [] if append: names = list(self.index.names) if isinstance(self.index, MultiIndex): for i in range(self.index.nlevels): arrays.append(self.index._get_level_values(i)) else: arrays.append(self.index) to_remove: list[Hashable] = [] for col in keys: if isinstance(col, MultiIndex): for n in range(col.nlevels): arrays.append(col._get_level_values(n)) names.extend(col.names) elif isinstance(col, (Index, Series)): # if Index then not MultiIndex (treated above) # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Series]"; expected "Index" arrays.append(col) # type:ignore[arg-type] names.append(col.name) elif isinstance(col, (list, np.ndarray)): # error: Argument 1 to "append" of "list" has incompatible type # "Union[List[Any], ndarray]"; expected "Index" arrays.append(col) # type: ignore[arg-type] names.append(None) elif isinstance(col, abc.Iterator): # error: Argument 1 to "append" of "list" has incompatible type # "List[Any]"; expected "Index" arrays.append(list(col)) # type: ignore[arg-type] names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) names.append(col) if drop: to_remove.append(col) if len(arrays[-1]) != len(self): # check newest element against length of calling frame, since # ensure_index_from_sequences would not raise for append=False. raise ValueError( f"Length mismatch: Expected {len(self)} rows, " f"received array of length {len(arrays[-1])}" ) index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() raise ValueError(f"Index has duplicate keys: {duplicates}") # use set to handle duplicate column names gracefully in case of drop for c in set(to_remove): del frame[c] # clear up memory usage index._cleanup() frame.index = index if not inplace: return frame @overload def reset_index( self, level: Hashable | Sequence[Hashable] | None = ..., drop: bool = ..., inplace: Literal[False] = ..., col_level: Hashable = ..., col_fill: Hashable = ..., ) -> DataFrame: ... @overload def reset_index( self, level: Hashable | Sequence[Hashable] | None, drop: bool, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ..., ) -> None: ... @overload def reset_index( self, *, drop: bool, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ..., ) -> None: ... @overload def reset_index( self, level: Hashable | Sequence[Hashable] | None, *, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ..., ) -> None: ... @overload def reset_index( self, *, inplace: Literal[True], col_level: Hashable = ..., col_fill: Hashable = ..., ) -> None: ... @overload def reset_index( self, level: Hashable | Sequence[Hashable] | None = ..., drop: bool = ..., inplace: bool = ..., col_level: Hashable = ..., col_fill: Hashable = ..., ) -> DataFrame | None: ... @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"]) def reset_index( self, level: Hashable | Sequence[Hashable] | None = None, drop: bool = False, inplace: bool = False, col_level: Hashable = 0, col_fill: Hashable = "", ) -> DataFrame | None: """ Reset the index, or a level of it. Reset the index of the DataFrame, and use the default one instead. If the DataFrame has a MultiIndex, this method can remove one or more levels. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame or None DataFrame with the new index or None if ``inplace=True``. See Also -------- DataFrame.set_index : Opposite of reset_index. DataFrame.reindex : Change to new indices or expand indices. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- >>> df = pd.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column, and a new sequential index is used: >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = pd.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, col_fill='species') species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, col_fill='genus') genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") self._check_inplace_and_allows_duplicate_labels(inplace) if inplace: new_obj = self else: new_obj = self.copy() new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: to_insert: Iterable[tuple[Any, Any | None]] if isinstance(self.index, MultiIndex): names = [ (n if n is not None else f"level_{i}") for i, n in enumerate(self.index.names) ] to_insert = zip(self.index.levels, self.index.codes) else: default = "index" if "index" not in self else "level_0" names = [default] if self.index.name is None else [self.index.name] to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for i, (lev, lab) in reversed(list(enumerate(to_insert))): if level is not None and i not in level: continue name = names[i] if multi_col: col_name = list(name) if isinstance(name, tuple) else [name] if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError( "col_fill=None is incompatible " f"with incomplete column name {name}" ) col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) # to ndarray and maybe infer different dtype level_values = lev._values if level_values.dtype == np.object_: level_values = lib.maybe_convert_objects(level_values) if lab is not None: # if we have the codes, extract the values with a mask level_values = algorithms.take( level_values, lab, allow_fill=True, fill_value=lev._na_value ) new_obj.insert(0, name, level_values) new_obj.index = new_index if not inplace: return new_obj return None # ---------------------------------------------------------------------- # Reindex-based selection methods @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isna(self) -> DataFrame: result = self._constructor(self._mgr.isna(func=isna)) return result.__finalize__(self, method="isna") @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) def isnull(self) -> DataFrame: return self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notna(self) -> DataFrame: return ~self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) def notnull(self) -> DataFrame: return ~self.isna() @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) def dropna( self, axis: Axis = 0, how: str = "any", thresh=None, subset=None, inplace: bool = False, ): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. versionchanged:: 1.0.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame or None DataFrame with NA entries dropped from it or None if ``inplace=True``. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'toy']) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(axis, (tuple, list)): # GH20987 raise TypeError("supplying multiple axes to axis is no longer supported.") axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == "any": mask = count == len(agg_obj._get_axis(agg_axis)) elif how == "all": mask = count > 0 else: if how is not None: raise ValueError(f"invalid how option: {how}") else: raise TypeError("must specify how or thresh") result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "subset"]) def drop_duplicates( self, subset: Hashable | Sequence[Hashable] | None = None, keep: Literal["first"] | Literal["last"] | Literal[False] = "first", inplace: bool = False, ignore_index: bool = False, ) -> DataFrame | None: """ Return DataFrame with duplicate rows removed. Considering certain columns is optional. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : bool, default False Whether to drop duplicates in place or to return a copy. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 Returns ------- DataFrame or None DataFrame with duplicates removed or None if ``inplace=True``. See Also -------- DataFrame.value_counts: Count unique combinations of columns. Examples -------- Consider dataset containing ramen rating. >>> df = pd.DataFrame({ ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], ... 'rating': [4, 4, 3.5, 15, 5] ... }) >>> df brand style rating 0 Yum Yum cup 4.0 1 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 By default, it removes duplicate rows based on all columns. >>> df.drop_duplicates() brand style rating 0 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 To remove duplicates on specific column(s), use ``subset``. >>> df.drop_duplicates(subset=['brand']) brand style rating 0 Yum Yum cup 4.0 2 Indomie cup 3.5 To remove duplicates and keep last occurrences, use ``keep``. >>> df.drop_duplicates(subset=['brand', 'style'], keep='last') brand style rating 1 Yum Yum cup 4.0 2 Indomie cup 3.5 4 Indomie pack 5.0 """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, "inplace") ignore_index = validate_bool_kwarg(ignore_index, "ignore_index") duplicated = self.duplicated(subset, keep=keep) result = self[-duplicated] if ignore_index: result.index = ibase.default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated( self, subset: Hashable | Sequence[Hashable] | None = None, keep: Literal["first"] | Literal["last"] | Literal[False] = "first", ) -> Series: """ Return boolean Series denoting duplicate rows. Considering certain columns is optional. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to mark. - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series Boolean series for each duplicated rows. See Also -------- Index.duplicated : Equivalent method on index. Series.duplicated : Equivalent method on Series. Series.drop_duplicates : Remove duplicate values from Series. DataFrame.drop_duplicates : Remove duplicate values from DataFrame. Examples -------- Consider dataset containing ramen rating. >>> df = pd.DataFrame({ ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'], ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'], ... 'rating': [4, 4, 3.5, 15, 5] ... }) >>> df brand style rating 0 Yum Yum cup 4.0 1 Yum Yum cup 4.0 2 Indomie cup 3.5 3 Indomie pack 15.0 4 Indomie pack 5.0 By default, for each set of duplicated values, the first occurrence is set on False and all others on True. >>> df.duplicated() 0 False 1 True 2 False 3 False 4 False dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True. >>> df.duplicated(keep='last') 0 True 1 False 2 False 3 False 4 False dtype: bool By setting ``keep`` on False, all duplicates are True. >>> df.duplicated(keep=False) 0 True 1 True 2 False 3 False 4 False dtype: bool To find duplicates on specific column(s), use ``subset``. >>> df.duplicated(subset=['brand']) 0 False 1 True 2 False 3 True 4 True dtype: bool """ if self.empty: return self._constructor_sliced(dtype=bool) def f(vals) -> tuple[np.ndarray, int]: labels, shape = algorithms.factorize(vals, size_hint=len(self)) return labels.astype("i8", copy=False), len(shape) if subset is None: # https://github.com/pandas-dev/pandas/issues/28770 # Incompatible types in assignment (expression has type "Index", variable # has type "Sequence[Any]") subset = self.columns # type: ignore[assignment] elif ( not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns ): subset = (subset,) # needed for mypy since can't narrow types using np.iterable subset = cast(Sequence, subset) # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.items() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index( labels, # error: Argument 1 to "tuple" has incompatible type "List[_T]"; # expected "Iterable[int]" tuple(shape), # type: ignore[arg-type] sort=False, xnull=False, ) result = self._constructor_sliced(duplicated(ids, keep), index=self.index) return result.__finalize__(self, method="duplicated") # ---------------------------------------------------------------------- # Sorting # TODO: Just move the sort_values doc here. @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "by"]) @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) # error: Signature of "sort_values" incompatible with supertype "NDFrame" def sort_values( # type: ignore[override] self, by, axis: Axis = 0, ascending=True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ): inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError( f"Length of ascending ({len(ascending)}) != length of by ({len(by)})" ) if len(by) > 1: keys = [self._get_label_or_level_values(x, axis=axis) for x in by] # need to rewrap columns in Series to apply key function if key is not None: # error: List comprehension has incompatible type List[Series]; # expected List[ndarray] keys = [ Series(k, name=name) # type: ignore[misc] for (k, name) in zip(keys, by) ] indexer = lexsort_indexer( keys, orders=ascending, na_position=na_position, key=key ) elif len(by): # len(by) == 1 by = by[0] k = self._get_label_or_level_values(by, axis=axis) # need to rewrap column in Series to apply key function if key is not None: # error: Incompatible types in assignment (expression has type # "Series", variable has type "ndarray") k = Series(k, name=by) # type: ignore[assignment] if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort( k, kind=kind, ascending=ascending, na_position=na_position, key=key ) else: return self.copy() new_data = self._mgr.take( indexer, axis=self._get_block_manager_axis(axis), verify=False ) if ignore_index: new_data.set_axis( self._get_block_manager_axis(axis), ibase.default_index(len(indexer)) ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_values") @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) def sort_index( self, axis: Axis = 0, level: Level | None = None, ascending: bool | int | Sequence[bool | int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ): """ Sort object by labels (along an axis). Returns a new DataFrame sorted by label if `inplace` argument is ``False``, otherwise updates the original DataFrame and returns None. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis along which to sort. The value 0 identifies the rows, and 1 identifies the columns. level : int or level name or list of ints or list of level names If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 1.0.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. For MultiIndex inputs, the key is applied *per level*. .. versionadded:: 1.1.0 Returns ------- DataFrame or None The original DataFrame sorted by the labels or None if ``inplace=True``. See Also -------- Series.sort_index : Sort Series by the index. DataFrame.sort_values : Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150], ... columns=['A']) >>> df.sort_index() A 1 4 29 2 100 1 150 5 234 3 By default, it sorts in ascending order, to sort in descending order, use ``ascending=False`` >>> df.sort_index(ascending=False) A 234 3 150 5 100 1 29 2 1 4 A key function can be specified which is applied to the index before sorting. For a ``MultiIndex`` this is applied to each level separately. >>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd']) >>> df.sort_index(key=lambda x: x.str.lower()) a A 1 b 2 C 3 d 4 """ return super().sort_index( axis, level, ascending, inplace, kind, na_position, sort_remaining, ignore_index, key, ) def value_counts( self, subset: Sequence[Hashable] | None = None, normalize: bool = False, sort: bool = True, ascending: bool = False, dropna: bool = True, ): """ Return a Series containing counts of unique rows in the DataFrame. .. versionadded:: 1.1.0 Parameters ---------- subset : list-like, optional Columns to use when counting unique combinations. normalize : bool, default False Return proportions rather than frequencies. sort : bool, default True Sort by frequencies. ascending : bool, default False Sort in ascending order. dropna : bool, default True Don’t include counts of rows that contain NA values. .. versionadded:: 1.3.0 Returns ------- Series See Also -------- Series.value_counts: Equivalent method on Series. Notes ----- The returned Series will have a MultiIndex with one level per input column. By default, rows that contain any NA values are omitted from the result. By default, the resulting Series will be in descending order so that the first element is the most frequently-occurring row. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6], ... 'num_wings': [2, 0, 0, 0]}, ... index=['falcon', 'dog', 'cat', 'ant']) >>> df num_legs num_wings falcon 2 2 dog 4 0 cat 4 0 ant 6 0 >>> df.value_counts() num_legs num_wings 4 0 2 2 2 1 6 0 1 dtype: int64 >>> df.value_counts(sort=False) num_legs num_wings 2 2 1 4 0 2 6 0 1 dtype: int64 >>> df.value_counts(ascending=True) num_legs num_wings 2 2 1 6 0 1 4 0 2 dtype: int64 >>> df.value_counts(normalize=True) num_legs num_wings 4 0 0.50 2 2 0.25 6 0 0.25 dtype: float64 With `dropna` set to `False` we can also count rows with NA values. >>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'], ... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']}) >>> df first_name middle_name 0 John Smith 1 Anne <NA> 2 John <NA> 3 Beth Louise >>> df.value_counts() first_name middle_name Beth Louise 1 John Smith 1 dtype: int64 >>> df.value_counts(dropna=False) first_name middle_name Anne NaN 1 Beth Louise 1 John Smith 1 NaN 1 dtype: int64 """ if subset is None: subset = self.columns.tolist() counts = self.groupby(subset, dropna=dropna).grouper.size() if sort: counts = counts.sort_values(ascending=ascending) if normalize: counts /= counts.sum() # Force MultiIndex for single column if len(subset) == 1: counts.index = MultiIndex.from_arrays( [counts.index], names=[counts.index.name] ) return counts def nlargest(self, n, columns, keep: str = "first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n, columns, keep: str = "first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 337000, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 337000 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "population". >>> df.nsmallest(3, 'population') population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Iceland 337000 17036 IS When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 337000 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Iceland 337000 17036 IS Nauru 337000 182 NR To order by the smallest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Anguilla 11300 311 AI Nauru 337000 182 NR """ return algorithms.SelectNFrame( self, n=n, keep=keep, columns=columns ).nsmallest() @doc( Series.swaplevel, klass=_shared_doc_kwargs["klass"], extra_params=dedent( """axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to swap levels on. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.""" ), examples=dedent( """Examples -------- >>> df = pd.DataFrame( ... {"Grade": ["A", "B", "A", "C"]}, ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> df Grade Final exam History January A Geography February B Coursework History March A Geography April C In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> df.swaplevel() Grade Final exam January History A February Geography B Coursework March History A April Geography C By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> df.swaplevel(0) Grade January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> df.swaplevel(0, 1) Grade History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C""" ), ) def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame: result = self.copy() axis = self._get_axis_number(axis) if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only swap levels on a hierarchical axis.") if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.swaplevel(i, j) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : {0 or 'index', 1 or 'columns'}, default 0 Where to reorder levels. Returns ------- DataFrame """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError("Can only reorder levels on a hierarchical axis.") result = self.copy() if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.reorder_levels(order) return result # ---------------------------------------------------------------------- # Arithmetic Methods def _cmp_method(self, other, op): axis = 1 # only relevant for Series other case self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None) # See GH#4537 for discussion of scalar op behavior new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) def _arith_method(self, other, op): if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None): return ops.frame_arith_method_with_reindex(self, other, op) axis = 1 # only relevant for Series other case other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None) new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) _logical_method = _arith_method def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. Parameters ---------- right : scalar, Series, or DataFrame func : arithmetic or comparison operator axis : {None, 0, 1} Returns ------- DataFrame """ # Get the appropriate array-op to apply to each column/block's values. array_op = ops.get_array_op(func) right = lib.item_from_zerodim(right) if not is_list_like(right): # i.e. scalar, faster than checking np.ndim(right) == 0 with np.errstate(all="ignore"): bm = self._mgr.apply(array_op, right=right) return type(self)(bm) elif isinstance(right, DataFrame): assert self.index.equals(right.index) assert self.columns.equals(right.columns) # TODO: The previous assertion `assert right._indexed_same(self)` # fails in cases with empty columns reached via # _frame_arith_method_with_reindex # TODO operate_blockwise expects a manager of the same type with np.errstate(all="ignore"): bm = self._mgr.operate_blockwise( # error: Argument 1 to "operate_blockwise" of "ArrayManager" has # incompatible type "Union[ArrayManager, BlockManager]"; expected # "ArrayManager" # error: Argument 1 to "operate_blockwise" of "BlockManager" has # incompatible type "Union[ArrayManager, BlockManager]"; expected # "BlockManager" right._mgr, # type: ignore[arg-type] array_op, ) return type(self)(bm) elif isinstance(right, Series) and axis == 1: # axis=1 means we want to operate row-by-row assert right.index.equals(self.columns) right = right._values # maybe_align_as_frame ensures we do not have an ndarray here assert not isinstance(right, np.ndarray) with np.errstate(all="ignore"): arrays = [ array_op(_left, _right) for _left, _right in zip(self._iter_column_arrays(), right) ] elif isinstance(right, Series): assert right.index.equals(self.index) # Handle other cases later right = right._values with np.errstate(all="ignore"): arrays = [array_op(left, right) for left in self._iter_column_arrays()] else: # Remaining cases have less-obvious dispatch rules raise NotImplementedError(right) return type(self)._from_arrays( arrays, self.columns, self.index, verify_integrity=False ) def _combine_frame(self, other: DataFrame, func, fill_value=None): # at this point we have `self._indexed_same(other)` if fill_value is None: # since _arith_op may be called in a loop, avoid function call # overhead if possible by doing this check once _arith_op = func else: def _arith_op(left, right): # for the mixed_type case where we iterate over columns, # _arith_op(left, right) is equivalent to # left._binop(right, func, fill_value=fill_value) left, right = ops.fill_binop(left, right, fill_value) return func(left, right) new_data = self._dispatch_frame_op(other, _arith_op) return new_data def _construct_result(self, result) -> DataFrame: """ Wrap the result of an arithmetic, comparison, or logical operation. Parameters ---------- result : DataFrame Returns ------- DataFrame """ out = self._constructor(result, copy=False) # Pin columns instead of passing to constructor for compat with # non-unique columns case out.columns = self.columns out.index = self.index return out def __divmod__(self, other) -> tuple[DataFrame, DataFrame]: # Naive implementation, room for optimization div = self // other mod = self - div * other return div, mod def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]: # Naive implementation, room for optimization div = other // self mod = other - div * self return div, mod # ---------------------------------------------------------------------- # Combination-Related @doc( _shared_docs["compare"], """ Returns ------- DataFrame DataFrame that shows the differences stacked side by side. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. Raises ------ ValueError When the two DataFrames don't have identical labels or shape. See Also -------- Series.compare : Compare with another Series and show differences. DataFrame.equals : Test whether two objects contain the same elements. Notes ----- Matching NaNs will not appear as a difference. Can only compare identically-labeled (i.e. same shape, identical row and column labels) DataFrames Examples -------- >>> df = pd.DataFrame( ... {{ ... "col1": ["a", "a", "b", "b", "a"], ... "col2": [1.0, 2.0, 3.0, np.nan, 5.0], ... "col3": [1.0, 2.0, 3.0, 4.0, 5.0] ... }}, ... columns=["col1", "col2", "col3"], ... ) >>> df col1 col2 col3 0 a 1.0 1.0 1 a 2.0 2.0 2 b 3.0 3.0 3 b NaN 4.0 4 a 5.0 5.0 >>> df2 = df.copy() >>> df2.loc[0, 'col1'] = 'c' >>> df2.loc[2, 'col3'] = 4.0 >>> df2 col1 col2 col3 0 c 1.0 1.0 1 a 2.0 2.0 2 b 3.0 4.0 3 b NaN 4.0 4 a 5.0 5.0 Align the differences on columns >>> df.compare(df2) col1 col3 self other self other 0 a c NaN NaN 2 NaN NaN 3.0 4.0 Stack the differences on rows >>> df.compare(df2, align_axis=0) col1 col3 0 self a NaN other c NaN 2 self NaN 3.0 other NaN 4.0 Keep the equal values >>> df.compare(df2, keep_equal=True) col1 col3 self other self other 0 a c 1.0 1.0 2 b b 3.0 4.0 Keep all original rows and columns >>> df.compare(df2, keep_shape=True) col1 col2 col3 self other self other self other 0 a c NaN NaN NaN NaN 1 NaN NaN NaN NaN NaN NaN 2 NaN NaN NaN NaN 3.0 4.0 3 NaN NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN NaN Keep all original rows and columns and also all original values >>> df.compare(df2, keep_shape=True, keep_equal=True) col1 col2 col3 self other self other self other 0 a c 1.0 1.0 1.0 1.0 1 a a 2.0 2.0 2.0 2.0 2 b b 3.0 3.0 3.0 4.0 3 b b NaN NaN 4.0 4.0 4 a a 5.0 5.0 5.0 5.0 """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: DataFrame, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, ) def combine( self, other: DataFrame, func, fill_value=None, overwrite: bool = True ) -> DataFrame: """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unnecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) series = series.astype(new_dtype, copy=False) otherSeries = otherSeries.astype(new_dtype, copy=False) arr = func(series, otherSeries) if isinstance(new_dtype, np.dtype): # if new_dtype is an EA Dtype, then `func` is expected to return # the correct dtype without any additional casting arr = maybe_downcast_to_dtype(arr, new_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) def combine_first(self, other: DataFrame) -> DataFrame: """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame The result of combining the provided DataFrame with the other object. See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def combiner(x, y): mask = extract_array(isna(x)) x_values = extract_array(x, extract_numpy=True) y_values = extract_array(y, extract_numpy=True) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) combined = self.combine(other, combiner, overwrite=False) dtypes = { col: find_common_type([self.dtypes[col], other.dtypes[col]]) for col in self.columns.intersection(other.columns) if not is_dtype_equal(combined.dtypes[col], self.dtypes[col]) } if dtypes: combined = combined.astype(dtypes) return combined def update( self, other, join: str = "left", overwrite: bool = True, filter_func=None, errors: str = "ignore", ) -> None: """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-column(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, its name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != "left": # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ["ignore", "raise"]: raise ValueError("The parameter errors must be either 'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all="ignore"): mask = ~filter_func(this) | isna(that) else: if errors == "raise": mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unnecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that) # ---------------------------------------------------------------------- # Data reshaping @Appender( """ Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level="Type").mean() Max Speed Type Captive 210.0 Wild 185.0 We can also choose to include NA in group keys or not by setting `dropna` parameter, the default setting is `True`: >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) >>> df.groupby(by=["b"]).sum() a c b 1.0 2 3 2.0 2 5 >>> df.groupby(by=["b"], dropna=False).sum() a c b 1.0 2 3 2.0 2 5 NaN 1 4 >>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]] >>> df = pd.DataFrame(l, columns=["a", "b", "c"]) >>> df.groupby(by="a").sum() b c a a 13.0 13.0 b 12.3 123.0 >>> df.groupby(by="a", dropna=False).sum() b c a a 13.0 13.0 b 12.3 123.0 NaN 12.3 33.0 """ ) @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) def groupby( self, by=None, axis: Axis = 0, level: Level | None = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, squeeze: bool | lib.NoDefault = no_default, observed: bool = False, dropna: bool = True, ) -> DataFrameGroupBy: from pandas.core.groupby.generic import DataFrameGroupBy if squeeze is not no_default: warnings.warn( ( "The `squeeze` parameter is deprecated and " "will be removed in a future version." ), FutureWarning, stacklevel=2, ) else: squeeze = False if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) # https://github.com/python/mypy/issues/7642 # error: Argument "squeeze" to "DataFrameGroupBy" has incompatible type # "Union[bool, NoDefault]"; expected "bool" return DataFrameGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, # type: ignore[arg-type] observed=observed, dropna=dropna, ) _shared_docs[ "pivot" ] = """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation, multiple values will result in a MultiIndex in the columns. See the :ref:`User Guide <reshaping>` for more on reshaping. Parameters ----------%s index : str or object or a list of str, optional Column to use to make new frame's index. If None, uses existing index. .. versionchanged:: 1.1.0 Also accept list of index names. columns : str or object or a list of str Column to use to make new frame's columns. .. versionchanged:: 1.1.0 Also accept list of columns names. values : str, object or a list of the previous, optional Column(s) to use for populating new frame's values. If not specified, all remaining columns will be used and the result will have hierarchically indexed columns. Returns ------- DataFrame Returns reshaped DataFrame. Raises ------ ValueError: When there are any `index`, `columns` combinations with multiple values. `DataFrame.pivot_table` when you need to aggregate. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Notes ----- For finer-tuned control, see hierarchical indexing documentation along with the related stack/unstack methods. Examples -------- >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz') bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar')['baz'] bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo']) baz zoo bar A B C A B C foo one 1 2 3 x y z two 4 5 6 q w t You could also assign a list of column names or a list of index names. >>> df = pd.DataFrame({ ... "lev1": [1, 1, 1, 2, 2, 2], ... "lev2": [1, 1, 2, 1, 1, 2], ... "lev3": [1, 2, 1, 2, 1, 2], ... "lev4": [1, 2, 3, 4, 5, 6], ... "values": [0, 1, 2, 3, 4, 5]}) >>> df lev1 lev2 lev3 lev4 values 0 1 1 1 1 0 1 1 1 2 2 1 2 1 2 1 3 2 3 2 1 2 4 3 4 2 1 1 5 4 5 2 2 2 6 5 >>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values") lev2 1 2 lev3 1 2 1 2 lev1 1 0.0 1.0 2.0 NaN 2 4.0 3.0 NaN 5.0 >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values") lev3 1 2 lev1 lev2 1 1 0.0 1.0 2 2.0 NaN 2 1 4.0 3.0 2 NaN 5.0 A ValueError is raised if there are any duplicates. >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 Notice that the first two rows are the same for our `index` and `columns` arguments. >>> df.pivot(index='foo', columns='bar', values='baz') Traceback (most recent call last): ... ValueError: Index contains duplicate entries, cannot reshape """ @Substitution("") @Appender(_shared_docs["pivot"]) def pivot(self, index=None, columns=None, values=None) -> DataFrame: from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs[ "pivot_table" ] = """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ----------%s values : column to aggregate, optional index : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. columns : column, Grouper, array, or list of the previous If an array is passed, it must be the same length as the data. The list can contain any of the other types (except list). Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. aggfunc : function, list of functions, dict, default numpy.mean If list of functions passed, the resulting pivot table will have hierarchical columns whose top level are the function names (inferred from the function objects themselves) If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with (in the resulting pivot table, after aggregation). margins : bool, default False Add all row / columns (e.g. for subtotal / grand totals). dropna : bool, default True Do not include columns whose entries are all NaN. margins_name : str, default 'All' Name of the row / column that will contain the totals when margins is True. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionchanged:: 0.25.0 sort : bool, default True Specifies if the result should be sorted. .. versionadded:: 1.3.0 Returns ------- DataFrame An Excel style pivot table. See Also -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. DataFrame.melt: Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. wide_to_long : Wide panel to long format. Less flexible but more user-friendly than melt. Examples -------- >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B bar one 4.0 5.0 two 7.0 6.0 foo one 4.0 1.0 two NaN 6.0 We can also fill missing values using the `fill_value` parameter. >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 The next example aggregates by taking the mean across multiple columns. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 foo large 2.000000 4.500000 small 2.333333 4.333333 We can also calculate multiple types of aggregations for any given value column. >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table D E mean max mean min A C bar large 5.500000 9.0 7.500000 6.0 small 5.500000 9.0 8.500000 8.0 foo large 2.000000 5.0 4.500000 4.0 small 2.333333 6.0 4.333333 2.0 """ @Substitution("") @Appender(_shared_docs["pivot_table"]) def pivot_table( self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None, margins=False, dropna=True, margins_name="All", observed=False, sort=True, ) -> DataFrame: from pandas.core.reshape.pivot import pivot_table return pivot_table( self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, sort=sort, ) def stack(self, level: Level = -1, dropna: bool = True): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. Parameters ---------- level : int, str, list, default -1 Level(s) to stack from the column axis onto the index axis, defined as one index or label, or a list of indices or labels. dropna : bool, default True Whether to drop rows in the resulting Frame/Series with missing values. Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe. See Examples section. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack() cat weight 0 height 1 dog weight 2 height 3 dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack() height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN **Prescribing the level(s) to be stacked** The first parameter controls which level or levels are stacked: >>> df_multi_level_cols2.stack(0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN >>> df_multi_level_cols2.stack([0, 1]) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype: float64 **Dropping missing values** >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]], ... index=['cat', 'dog'], ... columns=multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter: >>> df_multi_level_cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 >>> df_multi_level_cols3.stack(dropna=False) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN >>> df_multi_level_cols3.stack(dropna=True) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN """ from pandas.core.reshape.reshape import ( stack, stack_multiple, ) if isinstance(level, (tuple, list)): result = stack_multiple(self, level, dropna=dropna) else: result = stack(self, level, dropna=dropna) return result.__finalize__(self, method="stack") def explode( self, column: str | tuple | list[str | tuple], ignore_index: bool = False, ) -> DataFrame: """ Transform each element of a list-like to a row, replicating index values. .. versionadded:: 0.25.0 Parameters ---------- column : str or tuple or list thereof Column(s) to explode. For multiple columns, specify a non-empty list with each element be str or tuple, and all specified columns their list-like data on same row of the frame must have matching length. .. versionadded:: 1.3.0 Multi-column explode ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- DataFrame Exploded lists to rows of the subset columns; index will be duplicated for these rows. Raises ------ ValueError : * If columns of the frame are not unique. * If specified columns to explode is empty list. * If specified columns to explode have not matching count of elements rowwise in the frame. See Also -------- DataFrame.unstack : Pivot a level of the (necessarily hierarchical) index labels. DataFrame.melt : Unpivot a DataFrame from wide format to long format. Series.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of rows in the output will be non-deterministic when exploding sets. Examples -------- >>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]], ... 'B': 1, ... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]}) >>> df A B C 0 [0, 1, 2] 1 [a, b, c] 1 foo 1 NaN 2 [] 1 [] 3 [3, 4] 1 [d, e] Single-column explode. >>> df.explode('A') A B C 0 0 1 [a, b, c] 0 1 1 [a, b, c] 0 2 1 [a, b, c] 1 foo 1 NaN 2 NaN 1 [] 3 3 1 [d, e] 3 4 1 [d, e] Multi-column explode. >>> df.explode(list('AC')) A B C 0 0 1 a 0 1 1 b 0 2 1 c 1 foo 1 NaN 2 NaN 1 NaN 3 3 1 d 3 4 1 e """ if not self.columns.is_unique: raise ValueError("columns must be unique") columns: list[str | tuple] if is_scalar(column) or isinstance(column, tuple): assert isinstance(column, (str, tuple)) columns = [column] elif isinstance(column, list) and all( map(lambda c: is_scalar(c) or isinstance(c, tuple), column) ): if not column: raise ValueError("column must be nonempty") if len(column) > len(set(column)): raise ValueError("column must be unique") columns = column else: raise ValueError("column must be a scalar, tuple, or list thereof") df = self.reset_index(drop=True) if len(columns) == 1: result = df[columns[0]].explode() else: mylen = lambda x: len(x) if is_list_like(x) else -1 counts0 = self[columns[0]].apply(mylen) for c in columns[1:]: if not all(counts0 == self[c].apply(mylen)): raise ValueError("columns must have matching element counts") result = DataFrame({c: df[c].explode() for c in columns}) result = df.drop(columns, axis=1).join(result) if ignore_index: result.index = ibase.default_index(len(result)) else: result.index = self.index.take(result.index) result = result.reindex(columns=self.columns, copy=False) return result def unstack(self, level: Level = -1, fill_value=None): """ Pivot a level of the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series (the analogue of stack when the columns are not a MultiIndex). Parameters ---------- level : int, str, or list of these, default -1 (last level) Level(s) of index to unstack, can pass level name. fill_value : int, str or dict Replace NaN with this value if the unstack produces missing values. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from `unstack`). Examples -------- >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ... ('two', 'a'), ('two', 'b')]) >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 >>> s.unstack(level=-1) a b one 1.0 2.0 two 3.0 4.0 >>> s.unstack(level=0) one two a 1.0 3.0 b 2.0 4.0 >>> df = s.unstack(level=0) >>> df.unstack() one a 1.0 b 2.0 two a 3.0 b 4.0 dtype: float64 """ from pandas.core.reshape.reshape import unstack result = unstack(self, level, fill_value) return result.__finalize__(self, method="unstack") @Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"}) def melt( self, id_vars=None, value_vars=None, var_name=None, value_name="value", col_level: Level | None = None, ignore_index: bool = True, ) -> DataFrame: return melt( self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, ignore_index=ignore_index, ) # ---------------------------------------------------------------------- # Time series-related @doc( Series.diff, klass="Dataframe", extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n " "Take difference over rows (0) or columns (1).\n", other_klass="Series", examples=dedent( """ Difference with previous row >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(axis=1) a b c 0 NaN 0 0 1 NaN -1 3 2 NaN -1 7 3 NaN -1 13 4 NaN 0 20 5 NaN 2 28 Difference with 3rd previous row >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN Overflow in input dtype >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8) >>> df.diff() a 0 NaN 1 255.0""" ), ) def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: if not isinstance(periods, int): if not (is_float(periods) and periods.is_integer()): raise ValueError("periods must be an integer") periods = int(periods) axis = self._get_axis_number(axis) if axis == 1 and periods != 0: return self - self.shift(periods, axis=axis) new_data = self._mgr.diff(n=periods, axis=axis) return self._constructor(new_data).__finalize__(self, "diff") # ---------------------------------------------------------------------- # Function application def _gotitem( self, key: IndexLabel, ndim: int, subset: DataFrame | Series | None = None, ) -> DataFrame | Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ if subset is None: subset = self elif subset.ndim == 1: # is Series return subset # TODO: _shallow_copy(subset)? return subset[key] _agg_summary_and_see_also_doc = dedent( """ The aggregation operations are always performed over an axis, either the index (default) or the column axis. This behavior is different from `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`, `var`), where the default is to compute the aggregation of the flattened array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d, axis=0)``. `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. core.groupby.GroupBy : Perform operations over groups. core.resample.Resampler : Perform operations over resampled bins. core.window.Rolling : Perform operations over rolling window. core.window.Expanding : Perform operations over expanding window. core.window.ExponentialMovingWindow : Perform operation over exponential weighted window. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> df = pd.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) Aggregate these functions over the rows. >>> df.agg(['sum', 'min']) A B C sum 12.0 15.0 18.0 min 1.0 2.0 3.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']}) A B sum 12.0 NaN min 1.0 2.0 max NaN 8.0 Aggregate different functions over the columns and rename the index of the resulting DataFrame. >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean)) A B C x 7.0 NaN NaN y NaN 2.0 NaN z NaN NaN 6.0 Aggregate over the columns. >>> df.agg("mean", axis="columns") 0 2.0 1 5.0 2 8.0 3 NaN dtype: float64 """ ) @doc( _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): from pandas.core.apply import frame_apply axis = self._get_axis_number(axis) relabeling, func, columns, order = reconstruct_func(func, **kwargs) op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) result = op.agg() if relabeling: # This is to keep the order to columns occurrence unchanged, and also # keep the order of new columns occurrence unchanged # For the return values of reconstruct_func, if relabeling is # False, columns and order will be None. assert columns is not None assert order is not None result_in_dict = relabel_result(result, func, columns, order) result = DataFrame(result_in_dict, index=columns) return result agg = aggregate @doc( _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame: from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) result = op.transform() assert isinstance(result, DataFrame) return result def apply( self, func: AggFuncType, axis: Axis = 0, raw: bool = False, result_type=None, args=(), **kwargs, ): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. raw : bool, default False Determines if row or column is passed as a Series or ndarray object: * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwargs Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing ``result_type='expand'`` will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply( self, func=func, axis=axis, raw=raw, result_type=result_type, args=args, kwargs=kwargs, ) return op.apply() def applymap( self, func: PythonFuncType, na_action: str | None = None, **kwargs ) -> DataFrame: """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. na_action : {None, 'ignore'}, default None If ‘ignore’, propagate NaN values, without passing them to func. .. versionadded:: 1.2 **kwargs Additional keyword arguments to pass as keywords arguments to `func`. .. versionadded:: 1.3.0 Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Like Series.map, NA values can be ignored: >>> df_copy = df.copy() >>> df_copy.iloc[0, 0] = pd.NA >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore') 0 1 0 <NA> 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ if na_action not in {"ignore", None}: raise ValueError( f"na_action must be 'ignore' or None. Got {repr(na_action)}" ) ignore_na = na_action == "ignore" func = functools.partial(func, **kwargs) # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func, ignore_na=ignore_na) return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) return self.apply(infer).__finalize__(self, "applymap") # ---------------------------------------------------------------------- # Merging / joining methods def append( self, other, ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> DataFrame: """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. verify_integrity : bool, default False If True, raise ValueError on creating index with duplicates. sort : bool, default False Sort columns if the columns of `self` and `other` are not aligned. .. versionchanged:: 1.0.0 Changed to not sort by default. Returns ------- DataFrame A new DataFrame consisting of the rows of caller and the rows of `other`. See Also -------- concat : General function to concatenate DataFrame or Series objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=['x', 'y']) >>> df A B x 1 2 y 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'), index=['x', 'y']) >>> df.append(df2) A B x 1 2 y 3 4 x 5 6 y 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): if not ignore_index: raise TypeError("Can only append a dict if ignore_index=True") other = Series(other) if other.name is None and not ignore_index: raise TypeError( "Can only append a Series if ignore_index=True " "or if the Series has a name" ) index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) combined_columns = self.columns.append(idx_diff) other = ( other.reindex(combined_columns, copy=False) .to_frame() .T.infer_objects() .rename_axis(index.names, copy=False) ) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list): if not other: pass elif not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self, *other] else: to_concat = [self, other] return ( concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort, ) ).__finalize__(self, method="append") def join( self, other: DataFrame | Series, on: IndexLabel | None = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", sort: bool = False, ) -> DataFrame: """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-column(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ return self._join_compat( other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort ) def _join_compat( self, other: DataFrame | Series, on: IndexLabel | None = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", sort: bool = False, ): from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge if isinstance(other, Series): if other.name is None: raise ValueError("Other Series must have a name") other = DataFrame({other.name: other}) if isinstance(other, DataFrame): if how == "cross": return merge( self, other, how=how, on=on, suffixes=(lsuffix, rsuffix), sort=sort, ) return merge( self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort, ) else: if on is not None: raise ValueError( "Joining multiple DataFrames only supported for joining on index" ) frames = [self] + list(other) can_concat = all(df.index.is_unique for df in frames) # join indexes only using concat if can_concat: if how == "left": res = concat( frames, axis=1, join="outer", verify_integrity=True, sort=sort ) return res.reindex(self.index, copy=False) else: return concat( frames, axis=1, join=how, verify_integrity=True, sort=sort ) joined = frames[0] for frame in frames[1:]: joined = merge( joined, frame, how=how, left_index=True, right_index=True ) return joined @Substitution("") @Appender(_merge_doc, indents=2) def merge( self, right: DataFrame | Series, how: str = "inner", on: IndexLabel | None = None, left_on: IndexLabel | None = None, right_on: IndexLabel | None = None, left_index: bool = False, right_index: bool = False, sort: bool = False, suffixes: Suffixes = ("_x", "_y"), copy: bool = True, indicator: bool = False, validate: str | None = None, ) -> DataFrame: from pandas.core.reshape.merge import merge return merge( self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate, ) def round( self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs ) -> DataFrame: """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. *args Additional keywords have no effect but might be accepted for compatibility with numpy. **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame A DataFrame with the affected columns rounded to the specified number of decimal places. See Also -------- numpy.around : Round a numpy array to the given number of decimals. Series.round : Round a Series to the given number of decimals. Examples -------- >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.21 0.32 1 0.01 0.67 2 0.66 0.03 3 0.21 0.18 By providing an integer each column is rounded to the same number of decimal places >>> df.round(1) dogs cats 0 0.2 0.3 1 0.0 0.7 2 0.7 0.0 3 0.2 0.2 With a dict, the number of places for specific columns can be specified with the column names as key and the number of decimal places as value >>> df.round({'dogs': 1, 'cats': 0}) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 Using a Series, the number of places for specific columns can be specified with the column names as index and the number of decimal places as value >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) dogs cats 0 0.2 0.0 1 0.0 1.0 2 0.7 0.0 3 0.2 0.0 """ from pandas.core.reshape.concat import concat def _dict_round(df, decimals): for col, vals in df.items(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(s, decimals): if is_integer_dtype(s) or is_float_dtype(s): return s.round(decimals) return s nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series) and not decimals.index.is_unique: raise ValueError("Index of decimals must be unique") if is_dict_like(decimals) and not all( is_integer(value) for _, value in decimals.items() ): raise TypeError("Values in decimals must be integers") new_cols = list(_dict_round(self, decimals)) elif is_integer(decimals): # Dispatch to Series.round new_cols = [_series_round(v, decimals) for _, v in self.items()] else: raise TypeError("decimals must be an integer, a dict-like or a Series") if len(new_cols) > 0: return self._constructor( concat(new_cols, axis=1), index=self.index, columns=self.columns ) else: return self # ---------------------------------------------------------------------- # Statistical methods, etc. def corr( self, method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson", min_periods: int = 1, ) -> DataFrame: """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'kendall', 'spearman'} or callable Method of correlation: * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. Returns ------- DataFrame Correlation matrix. See Also -------- DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Series.corr : Compute the correlation between two Series. Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) dogs cats dogs 1.0 0.3 cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False) if method == "pearson": correl = libalgos.nancorr(mat, minp=min_periods) elif method == "spearman": correl = libalgos.nancorr_spearman(mat, minp=min_periods) elif method == "kendall": correl = libalgos.nancorr_kendall(mat, minp=min_periods) elif callable(method): if min_periods is None: min_periods = 1 mat = mat.T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for i, ac in enumerate(mat): for j, bc in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1.0 elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) return self._constructor(correl, index=idx, columns=cols) def cov(self, min_periods: int | None = None, ddof: int | None = 1) -> DataFrame: """ Compute pairwise covariance of columns, excluding NA/null values. Compute the pairwise covariance among the series of a DataFrame. The returned data frame is the `covariance matrix <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns of the DataFrame. Both NA and null values are automatically excluded from the calculation. (See the note below about bias from missing values.) A threshold can be set for the minimum number of observations for each value created. Comparisons with observations below this threshold will be returned as ``NaN``. This method is generally used for the analysis of time series data to understand the relationship between different measures across time. Parameters ---------- min_periods : int, optional Minimum number of observations required per pair of columns to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- DataFrame The covariance matrix of the series of the DataFrame. See Also -------- Series.cov : Compute covariance with another Series. core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance. core.window.Expanding.cov : Expanding sample covariance. core.window.Rolling.cov : Rolling sample covariance. Notes ----- Returns the covariance matrix of the DataFrame's time series. The covariance is normalized by N-ddof. For DataFrames that have Series that are missing data (assuming that data is `missing at random <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__) the returned covariance matrix will be an unbiased estimate of the variance and covariance between the member Series. However, for many applications this estimate may not be acceptable because the estimate covariance matrix is not guaranteed to be positive semi-definite. This could lead to estimate correlations having absolute values which are greater than one, and/or a non-invertible covariance matrix. See `Estimation of covariance matrices <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_ matrices>`__ for more details. Examples -------- >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)], ... columns=['dogs', 'cats']) >>> df.cov() dogs cats dogs 0.666667 -1.000000 cats -1.000000 1.666667 >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(1000, 5), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df.cov() a b c d e a 0.998438 -0.020161 0.059277 -0.008943 0.014144 b -0.020161 1.059352 -0.008543 -0.024738 0.009826 c 0.059277 -0.008543 1.010670 -0.001486 -0.000271 d -0.008943 -0.024738 -0.001486 0.921297 -0.013692 e 0.014144 0.009826 -0.000271 -0.013692 0.977795 **Minimum number of periods** This method also supports an optional ``min_periods`` keyword that specifies the required minimum number of non-NA observations for each column pair in order to have a valid result: >>> np.random.seed(42) >>> df = pd.DataFrame(np.random.randn(20, 3), ... columns=['a', 'b', 'c']) >>> df.loc[df.index[:5], 'a'] = np.nan >>> df.loc[df.index[5:10], 'b'] = np.nan >>> df.cov(min_periods=12) a b c a 0.316741 NaN -0.150812 b NaN 1.248003 0.191417 c -0.150812 0.191417 0.895202 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns idx = cols.copy() mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False) if notna(mat).all(): if min_periods is not None and min_periods > len(mat): base_cov = np.empty((mat.shape[1], mat.shape[1])) base_cov.fill(np.nan) else: base_cov = np.cov(mat.T, ddof=ddof) base_cov = base_cov.reshape((len(cols), len(cols))) else: base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) return self._constructor(base_cov, index=idx, columns=cols) def corrwith(self, other, axis: Axis = 0, drop=False, method="pearson") -> Series: """ Compute pairwise correlation. Pairwise correlation is computed between rows or columns of DataFrame with rows or columns of Series or DataFrame. DataFrames are first aligned along both axes before computing the correlations. Parameters ---------- other : DataFrame, Series Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. drop : bool, default False Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable Method of correlation: * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Returns ------- Series Pairwise correlations. See Also -------- DataFrame.corr : Compute pairwise correlation of columns. """ axis = self._get_axis_number(axis) this = self._get_numeric_data() if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method), axis=axis) other = other._get_numeric_data() left, right = this.align(other, join="inner", copy=False) if axis == 1: left = left.T right = right.T if method == "pearson": # mask missing values left = left + right * 0 right = right + left * 0 # demeaned data ldem = left - left.mean() rdem = right - right.mean() num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std() * right.std() correl = num / dom elif method in ["kendall", "spearman"] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = self._constructor_sliced( map(c, zip(left.values.T, right.values.T)), index=left.columns ) else: raise ValueError( f"Invalid method {method} was passed, " "valid methods are: 'pearson', 'kendall', " "'spearman', or callable" ) if not drop: # Find non-matching labels along the given axis # and append missing correlations (GH 22375) raxis = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl # ---------------------------------------------------------------------- # ndarray-like stats methods def count( self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False ): """ Count non-NA cells for each column or row. The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending on `pandas.options.mode.use_inf_as_na`) are considered NA. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 If 0 or 'index' counts are generated for each column. If 1 or 'columns' counts are generated for each row. level : int or str, optional If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns ------- Series or DataFrame For each column/row the number of non-NA/null entries. If `level` is specified returns a `DataFrame`. See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.value_counts: Count unique combinations of columns. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = pd.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 dtype: int64 Counts for each **row**: >>> df.count(axis='columns') 0 3 1 2 2 3 3 3 4 3 dtype: int64 """ axis = self._get_axis_number(axis) if level is not None: warnings.warn( "Using the level keyword in DataFrame and Series aggregations is " "deprecated and will be removed in a future version. Use groupby " "instead. df.count(level=1) should use df.groupby(level=1).count().", FutureWarning, stacklevel=2, ) return self._count_level(level, axis=axis, numeric_only=numeric_only) if numeric_only: frame = self._get_numeric_data() else: frame = self # GH #423 if len(frame._get_axis(axis)) == 0: result = self._constructor_sliced(0, index=frame._get_agg_axis(axis)) else: if frame._is_mixed_type or frame._mgr.any_extension_types: # the or any_extension_types is really only hit for single- # column frames with an extension array result = notna(frame).sum(axis=axis) else: # GH13407 series_counts = notna(frame).sum(axis=axis) counts = series_counts.values result = self._constructor_sliced( counts, index=frame._get_agg_axis(axis) ) return result.astype("int64") def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False): if numeric_only: frame = self._get_numeric_data() else: frame = self count_axis = frame._get_axis(axis) agg_axis = frame._get_agg_axis(axis) if not isinstance(count_axis, MultiIndex): raise TypeError( f"Can only count levels on hierarchical {self._get_axis_name(axis)}." ) # Mask NaNs: Mask rows or columns where the index level is NaN, and all # values in the DataFrame that are NaN if frame._is_mixed_type: # Since we have mixed types, calling notna(frame.values) might # upcast everything to object values_mask = notna(frame).values else: # But use the speedup when we have homogeneous dtypes values_mask = notna(frame.values) index_mask = notna(count_axis.get_level_values(level=level)) if axis == 1: mask = index_mask & values_mask else: mask = index_mask.reshape(-1, 1) & values_mask if isinstance(level, str): level = count_axis._get_level_number(level) level_name = count_axis._names[level] level_index = count_axis.levels[level]._rename(name=level_name) level_codes = ensure_platform_int(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis) if axis == 1: result = self._constructor(counts, index=agg_axis, columns=level_index) else: result = self._constructor(counts, index=level_index, columns=agg_axis) return result def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool | None = None, filter_type=None, **kwds, ): assert filter_type is None or filter_type == "bool", filter_type out_dtype = "bool" if filter_type == "bool" else None own_dtypes = [arr.dtype for arr in self._iter_column_arrays()] dtype_is_dt = np.array( [is_datetime64_any_dtype(dtype) for dtype in own_dtypes], dtype=bool, ) if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any(): warnings.warn( "DataFrame.mean and DataFrame.median with numeric_only=None " "will include datetime64 and datetime64tz columns in a " "future version.", FutureWarning, stacklevel=5, ) # Non-copy equivalent to # cols = self.columns[~dtype_is_dt] # self = self[cols] predicate = lambda x: not is_datetime64_any_dtype(x.dtype) mgr = self._mgr._get_data_subset(predicate) self = type(self)(mgr) # TODO: Make other agg func handle axis=None properly GH#21597 axis = self._get_axis_number(axis) labels = self._get_agg_axis(axis) assert axis in [0, 1] def func(values: np.ndarray): # We only use this in the case that operates on self.values return op(values, axis=axis, skipna=skipna, **kwds) def blk_func(values, axis=1): if isinstance(values, ExtensionArray): if not is_1d_only_ea_obj(values) and not isinstance( self._mgr, ArrayManager ): return values._reduce(name, axis=1, skipna=skipna, **kwds) return values._reduce(name, skipna=skipna, **kwds) else: return op(values, axis=axis, skipna=skipna, **kwds) def _get_data() -> DataFrame: if filter_type is None: data = self._get_numeric_data() else: # GH#25101, GH#24434 assert filter_type == "bool" data = self._get_bool_data() return data if numeric_only is not None or axis == 0: # For numeric_only non-None and axis non-None, we know # which blocks to use and no try/except is needed. # For numeric_only=None only the case with axis==0 and no object # dtypes are unambiguous can be handled with BlockManager.reduce # Case with EAs see GH#35881 df = self if numeric_only is True: df = _get_data() if axis == 1: df = df.T axis = 0 ignore_failures = numeric_only is None # After possibly _get_data and transposing, we are now in the # simple case where we can use BlockManager.reduce res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures) out = df._constructor(res).iloc[0] if out_dtype is not None: out = out.astype(out_dtype) if axis == 0 and len(self) == 0 and name in ["sum", "prod"]: # Even if we are object dtype, follow numpy and return # float64, see test_apply_funcs_over_empty out = out.astype(np.float64) if numeric_only is None and out.shape[0] != df.shape[1]: # columns have been dropped GH#41480 arg_name = "numeric_only" if name in ["all", "any"]: arg_name = "bool_only" warnings.warn( "Dropping of nuisance columns in DataFrame reductions " f"(with '{arg_name}=None') is deprecated; in a future " "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, stacklevel=5, ) return out assert numeric_only is None data = self values = data.values try: result = func(values) except TypeError: # e.g. in nanops trying to convert strs to float data = _get_data() labels = data._get_agg_axis(axis) values = data.values with np.errstate(all="ignore"): result = func(values) # columns have been dropped GH#41480 arg_name = "numeric_only" if name in ["all", "any"]: arg_name = "bool_only" warnings.warn( "Dropping of nuisance columns in DataFrame reductions " f"(with '{arg_name}=None') is deprecated; in a future " "version this will raise TypeError. Select only valid " "columns before calling the reduction.", FutureWarning, stacklevel=5, ) if hasattr(result, "dtype"): if filter_type == "bool" and notna(result).all(): result = result.astype(np.bool_) elif filter_type is None and is_object_dtype(result.dtype): try: result = result.astype(np.float64) except (ValueError, TypeError): # try to coerce to the original dtypes item by item if we can pass result = self._constructor_sliced(result, index=labels) return result def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series: """ Count number of distinct elements in specified axis. Return Series with number of distinct elements. Can ignore NaN values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. dropna : bool, default True Don't include NaN in the counts. Returns ------- Series See Also -------- Series.nunique: Method nunique for Series. DataFrame.count: Count non-NA cells for each column or row. Examples -------- >>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]}) >>> df.nunique() A 3 B 2 dtype: int64 >>> df.nunique(axis=1) 0 1 1 2 2 2 dtype: int64 """ return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series: """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of minima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmin : Return index of the minimum element. Notes ----- This method is the DataFrame version of ``ndarray.argmin``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the minimum value in each column. >>> df.idxmin() consumption Pork co2_emissions Wheat Products dtype: object To return the index for the minimum value in each row, use ``axis="columns"``. >>> df.idxmin(axis="columns") Pork consumption Wheat Products co2_emissions Beef consumption dtype: object """ axis = self._get_axis_number(axis) res = self._reduce( nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False ) indices = res._values # indices will always be np.ndarray since axis is not None and # values is a 2d array for DataFrame # error: Item "int" of "Union[int, Any]" has no attribute "__iter__" assert isinstance(indices, np.ndarray) # for mypy index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return self._constructor_sliced(result, index=self._get_agg_axis(axis)) def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series: """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. Returns ------- Series Indexes of maxima along the specified axis. Raises ------ ValueError * If the row/column is empty See Also -------- Series.idxmax : Return index of the maximum element. Notes ----- This method is the DataFrame version of ``ndarray.argmax``. Examples -------- Consider a dataset containing food consumption in Argentina. >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], ... 'co2_emissions': [37.2, 19.66, 1712]}, ... index=['Pork', 'Wheat Products', 'Beef']) >>> df consumption co2_emissions Pork 10.51 37.20 Wheat Products 103.11 19.66 Beef 55.48 1712.00 By default, it returns the index for the maximum value in each column. >>> df.idxmax() consumption Wheat Products co2_emissions Beef dtype: object To return the index for the maximum value in each row, use ``axis="columns"``. >>> df.idxmax(axis="columns") Pork co2_emissions Wheat Products consumption Beef co2_emissions dtype: object """ axis = self._get_axis_number(axis) res = self._reduce( nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False ) indices = res._values # indices will always be np.ndarray since axis is not None and # values is a 2d array for DataFrame # error: Item "int" of "Union[int, Any]" has no attribute "__iter__" assert isinstance(indices, np.ndarray) # for mypy index = self._get_axis(axis) result = [index[i] if i >= 0 else np.nan for i in indices] return self._constructor_sliced(result, index=self._get_agg_axis(axis)) def _get_agg_axis(self, axis_num: int) -> Index: """ Let's be explicit about this. """ if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") def mode( self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True ) -> DataFrame: """ Get the mode(s) of each element along the selected axis. The mode of a set of values is the value that appears most often. It can be multiple values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to iterate over while searching for the mode: * 0 or 'index' : get mode of each column * 1 or 'columns' : get mode of each row. numeric_only : bool, default False If True, only apply to numeric columns. dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- DataFrame The modes of each column or row. See Also -------- Series.mode : Return the highest frequency value in a Series. Series.value_counts : Return the counts of values in a Series. Examples -------- >>> df = pd.DataFrame([('bird', 2, 2), ... ('mammal', 4, np.nan), ... ('arthropod', 8, 0), ... ('bird', 2, np.nan)], ... index=('falcon', 'horse', 'spider', 'ostrich'), ... columns=('species', 'legs', 'wings')) >>> df species legs wings falcon bird 2 2.0 horse mammal 4 NaN spider arthropod 8 0.0 ostrich bird 2 NaN By default, missing values are not considered, and the mode of wings are both 0 and 2. Because the resulting DataFrame has two rows, the second row of ``species`` and ``legs`` contains ``NaN``. >>> df.mode() species legs wings 0 bird 2.0 0.0 1 NaN NaN 2.0 Setting ``dropna=False`` ``NaN`` values are considered and they can be the mode (like for wings). >>> df.mode(dropna=False) species legs wings 0 bird 2 NaN Setting ``numeric_only=True``, only the mode of numeric columns is computed, and columns of other types are ignored. >>> df.mode(numeric_only=True) legs wings 0 2.0 0.0 1 NaN 2.0 To compute the mode over columns and not rows, use the axis parameter: >>> df.mode(axis='columns', numeric_only=True) 0 1 falcon 2.0 NaN horse 4.0 NaN spider 0.0 8.0 ostrich 2.0 NaN """ data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) data = data.apply(f, axis=axis) # Ensure index is type stable (should always use int index) if data.empty: data.index = ibase.default_index(0) return data def quantile( self, q=0.5, axis: Axis = 0, numeric_only: bool = True, interpolation: str = "linear", ): """ Return values at the given quantile over requested axis. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value between 0 <= q <= 1, the quantile(s) to compute. axis : {0, 1, 'index', 'columns'}, default 0 Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- Series or DataFrame If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also -------- core.window.Rolling.quantile: Rolling quantile. numpy.percentile: Numpy function to compute the percentile. Examples -------- >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), ... columns=['a', 'b']) >>> df.quantile(.1) a 1.3 b 3.7 Name: 0.1, dtype: float64 >>> df.quantile([.1, .5]) a b 0.1 1.3 3.7 0.5 2.5 55.0 Specifying `numeric_only=False` will also compute the quantile of datetime and timedelta data. >>> df = pd.DataFrame({'A': [1, 2], ... 'B': [pd.Timestamp('2010'), ... pd.Timestamp('2011')], ... 'C': [pd.Timedelta('1 days'), ... pd.Timedelta('2 days')]}) >>> df.quantile(0.5, numeric_only=False) A 1.5 B 2010-07-02 12:00:00 C 1 days 12:00:00 Name: 0.5, dtype: object """ validate_percentile(q) if not is_list_like(q): # BlockManager.quantile expects listlike, so we wrap and unwrap here res = self.quantile( [q], axis=axis, numeric_only=numeric_only, interpolation=interpolation ) return res.iloc[0] q = Index(q, dtype=np.float64) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) if axis == 1: data = data.T if len(data.columns) == 0: # GH#23925 _get_numeric_data may have dropped all columns cols = Index([], name=self.columns.name) if is_list_like(q): return self._constructor([], index=q, columns=cols) return self._constructor_sliced([], index=cols, name=q, dtype=np.float64) res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation) result = self._constructor(res) return result @doc(NDFrame.asfreq, **_shared_doc_kwargs) def asfreq( self, freq: Frequency, method=None, how: str | None = None, normalize: bool = False, fill_value=None, ) -> DataFrame: return super().asfreq( freq=freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) @doc(NDFrame.resample, **_shared_doc_kwargs) def resample( self, rule, axis=0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, loffset=None, base: int | None = None, on=None, level=None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, ) -> Resampler: return super().resample( rule=rule, axis=axis, closed=closed, label=label, convention=convention, kind=kind, loffset=loffset, base=base, on=on, level=level, origin=origin, offset=offset, ) def to_timestamp( self, freq: Frequency | None = None, how: str = "start", axis: Axis = 0, copy: bool = True, ) -> DataFrame: """ Cast to DatetimeIndex of timestamps, at *beginning* of period. Parameters ---------- freq : str, default frequency of PeriodIndex Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with DatetimeIndex """ new_obj = self.copy(deep=copy) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) if not isinstance(old_ax, PeriodIndex): raise TypeError(f"unsupported Type {type(old_ax).__name__}") new_ax = old_ax.to_timestamp(freq=freq, how=how) setattr(new_obj, axis_name, new_ax) return new_obj def to_period( self, freq: Frequency | None = None, axis: Axis = 0, copy: bool = True ) -> DataFrame: """ Convert DataFrame from DatetimeIndex to PeriodIndex. Convert DataFrame from DatetimeIndex to PeriodIndex with desired frequency (inferred from index if not passed). Parameters ---------- freq : str, default Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to convert (the index by default). copy : bool, default True If False then underlying input data is not copied. Returns ------- DataFrame with PeriodIndex """ new_obj = self.copy(deep=copy) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) if not isinstance(old_ax, DatetimeIndex): raise TypeError(f"unsupported Type {type(old_ax).__name__}") new_ax = old_ax.to_period(freq=freq) setattr(new_obj, axis_name, new_ax) return new_obj def isin(self, values) -> DataFrame: """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable, Series, DataFrame or dict The result will only be true at a location if all the labels match. If `values` is a Series, that's the index. If `values` is a dict, the keys must be the column names, which must match. If `values` is a DataFrame, then both the index and column labels must match. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. See Also -------- DataFrame.eq: Equality test for DataFrame. Series.isin: Equivalent method on Series. Series.str.contains: Test if pattern or regex is contained within a string of a Series or Index. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True When ``values`` is a Series or DataFrame the index and column must match. Note that 'falcon' does not match based on the number of legs in df2. >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings falcon True True dog False False """ if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) return concat( ( self.iloc[:, [i]].isin(values[col]) for i, col in enumerate(self.columns) ), axis=1, ) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self), axis="index") elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError("cannot compute isin with a duplicate axis.") return self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError( "only list-like or dict-like objects are allowed " "to be passed to DataFrame.isin(), " f"you passed a '{type(values).__name__}'" ) return self._constructor( algorithms.isin(self.values.ravel(), values).reshape(self.shape), self.index, self.columns, ) # ---------------------------------------------------------------------- # Add index and columns _AXIS_ORDERS = ["index", "columns"] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = { **NDFrame._AXIS_TO_AXIS_NUMBER, 1: 1, "columns": 1, } _AXIS_REVERSED = True _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number = 1 _info_axis_name = "columns" index: Index = properties.AxisProperty( axis=1, doc="The index (row labels) of the DataFrame." ) columns: Index = properties.AxisProperty( axis=0, doc="The column labels of the DataFrame." ) @property def _AXIS_NUMBERS(self) -> dict[str, int]: """.. deprecated:: 1.1.0""" super()._AXIS_NUMBERS return {"index": 0, "columns": 1} @property def _AXIS_NAMES(self) -> dict[int, str]: """.. deprecated:: 1.1.0""" super()._AXIS_NAMES return {0: "index", 1: "columns"} # ---------------------------------------------------------------------- # Add plotting methods to DataFrame plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = CachedAccessor("sparse", SparseFrameAccessor) # ---------------------------------------------------------------------- # Internal Interface Methods def _to_dict_of_blocks(self, copy: bool = True): """ Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY - only works for BlockManager """ mgr = self._mgr # convert to BlockManager if needed -> this way support ArrayManager as well mgr = mgr_to_mgr(mgr, "block") mgr = cast(BlockManager, mgr) return { k: self._constructor(v).__finalize__(self) for k, v, in mgr.to_dict(copy=copy).items() } @property def values(self) -> np.ndarray: """ Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. DataFrame.index : Retrieve the index labels. DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]]) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object) """ self._consolidate_inplace() return self._mgr.as_array(transpose=True) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) def ffill( self: DataFrame, axis: None | Axis = None, inplace: bool = False, limit: None | int = None, downcast=None, ) -> DataFrame | None: return super().ffill(axis, inplace, limit, downcast) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) def bfill( self: DataFrame, axis: None | Axis = None, inplace: bool = False, limit: None | int = None, downcast=None, ) -> DataFrame | None: return super().bfill(axis, inplace, limit, downcast) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "lower", "upper"] ) def clip( self: DataFrame, lower=None, upper=None, axis: Axis | None = None, inplace: bool = False, *args, **kwargs, ) -> DataFrame | None: return super().clip(lower, upper, axis, inplace, *args, **kwargs) @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"]) def interpolate( self: DataFrame, method: str = "linear", axis: Axis = 0, limit: int | None = None, inplace: bool = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> DataFrame | None: return super().interpolate( method, axis, limit, inplace, limit_direction, limit_area, downcast, **kwargs, ) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) def where( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): return super().where(cond, other, inplace, axis, level, errors, try_cast) @deprecate_nonkeyword_arguments( version=None, allowed_args=["self", "cond", "other"] ) def mask( self, cond, other=np.nan, inplace=False, axis=None, level=None, errors="raise", try_cast=lib.no_default, ): return super().mask(cond, other, inplace, axis, level, errors, try_cast) DataFrame._add_numeric_operations() ops.add_flex_arithmetic_methods(DataFrame) def _from_nested_dict(data) -> collections.defaultdict: new_data: collections.defaultdict = collections.defaultdict(dict) for index, s in data.items(): for col, v in s.items(): new_data[col][index] = v return new_data def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike: # reindex if necessary if value.index.equals(index) or not len(index): return value._values.copy() # GH#4107 try: reindexed_value = value.reindex(index)._values except ValueError as err: # raised in MultiIndex.from_tuples, see test_insert_error_msmgs if not value.index.is_unique: # duplicate axis raise err raise TypeError( "incompatible index of inserted column with frame index" ) from err return reindexed_value
gfyoung/pandas
pandas/core/frame.py
Python
bsd-3-clause
369,149
[ "Elk" ]
3a2458d5bc79666dde7b1d06098b40069337dd441fa91a7ed60aae3780a88881
#!/usr/bin/python """ Copyright 2012 Paul Willworth <ioscode@gmail.com> This file is part of Galaxy Harvester. Galaxy Harvester is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Galaxy Harvester is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>. """ import os import sys import re import Cookie import dbSession import dbShared import cgi import MySQLdb from xml.dom import minidom import resource import recipe # def n2n(inVal): if (inVal == '' or inVal == None or inVal == 'undefined' or inVal == 'None'): return 'NULL' else: return str(inVal) def addRecipe(conn, schematicID, recipeName, user): # Add new Recipe returnStr = "" cursor = conn.cursor() chkcursor = conn.cursor() # Make sure schematic ID is valid tempSQL = "SELECT schematicName FROM tSchematic WHERE schematicID='" + schematicID + "';" try: chkcursor.execute(tempSQL) row = chkcursor.fetchone() if row == None: returnStr = "Error: That is not a valid schematic." except Exception as e: returnStr = 'Error: Add Failed.' sys.stderr.write(str(e)) chkcursor.close() # insert new recipe if (returnStr.find("Error:") == -1): tempSQL = "INSERT INTO tRecipe (recipeName, userID, schematicID) VALUES ('" + recipeName + "','" + user + "','" + schematicID + "');" try: cursor.execute(tempSQL) result = cursor.rowcount if (result < 1): returnStr = "Error: recipe not added." else: returnStr = "Recipe added.ID" + str(cursor.lastrowid) except Exception as e: returnStr = 'Error: Add Failed.' sys.stderr.write(str(e)) conn.commit() cursor.close() return returnStr def updateRecipe(conn, recipeID, newName): # Update recipe information returnStr = "" cursor = conn.cursor() tempSQL = "UPDATE tRecipe SET recipeName='" + newName + "' WHERE recipeID=" + str(recipeID) + ";" cursor.execute(tempSQL) result = cursor.rowcount if (result < 1): returnStr = "No change in name." else: returnStr = " Recipe name updated." conn.commit() cursor.close() return returnStr def deleteRecipe(conn, recipeID, user): # Remove recipe returnStr = "" cursor = conn.cursor() tempSQL = "SELECT userID FROM tRecipe WHERE recipeID=" + recipeID + ";" cursor.execute(tempSQL) row = cursor.fetchone() if row != None: if row[0] != user: returnStr = "Error: You cannot delete that recipe because it is not yours." else: tempSQL = "DELETE FROM tRecipe WHERE recipeID=" + recipeID + ";" cursor.execute(tempSQL) tempSQL = "DELETE FROM tRecipeIngredients WHERE recipeID=" + recipeID + ";" cursor.execute(tempSQL) returnStr = "Recipe deleted." else: returnStr = "Error: That recipe could not be found." cursor.close() conn.commit() return returnStr def updateIngredients(conn, recipeID, ingredients, schematicID, user): # Update recipe ingredient slots returnStr = "" cursor = conn.cursor() ingredients = ingredients.split(",") ingredientsUpdated = 0 ingredientNames = "" for ingredient in ingredients: if ingredient != "" and ingredient != "clear": ingredientVals = ingredient.split(":") if len(ingredientVals) == 2 and ingredientVals[0] != "" and ingredientVals[1] != "": ingredientNames += "'" + ingredientVals[0] + "'," ingredientQuality = None # See if ingredient slot already exists tempSQL = "SELECT ingredientResource FROM tRecipeIngredients WHERE recipeID=" + str(recipeID) + " AND ingredientName='" + ingredientVals[0] + "';" cursor.execute(tempSQL) row = cursor.fetchone() if row != None: # Updated existing ingredient slot if row[0] != ingredientVals[1]: ingredientQuality = recipe.calcIngredientQuality(conn, ingredientVals[1], schematicID, user) tempSQL = "UPDATE tRecipeIngredients SET ingredientResource=" + ingredientVals[1] + ", ingredientQuality=" + n2n(ingredientQuality) + " WHERE recipeID=" + str(recipeID) + " AND ingredientName='" + ingredientVals[0] + "';" cursor.execute(tempSQL) ingredientsUpdated += cursor.rowcount else: # create new record for ingredient slot ingredientQuality = recipe.calcIngredientQuality(conn, ingredientVals[1], schematicID, user) tempSQL = "INSERT INTO tRecipeIngredients (recipeID, ingredientName, ingredientResource, ingredientQuality) VALUES (" + str(recipeID) + ",'" + ingredientVals[0] + "'," + str(ingredientVals[1]) + "," + n2n(ingredientQuality) + ");" cursor.execute(tempSQL) ingredientsUpdated += cursor.rowcount # Delete any ingredients that were not included in the current list ingredientNames += "''" tempSQL = "DELETE FROM tRecipeIngredients WHERE recipeID=" + str(recipeID) + " AND ingredientName NOT IN (" + ingredientNames + ");" cursor.execute(tempSQL) ingredientsDeleted = cursor.rowcount returnStr = " Updated " + str(ingredientsUpdated) + " ingredient slots." if ingredientsDeleted > 0: returnStr += " Cleared " + str(ingredientsDeleted) + " ingredient slots." conn.commit() cursor.close() return returnStr def addIngredient(conn, recipeID, spawnID, schematicID, user): # Try to add a spawn to an available ingredient slot returnStr = "" filledCandidates = 0 ingredientsUpdated = 0 s = resource.getResource(conn, 1, user, spawnID, None, None) ingCursor = conn.cursor() ingSql = "SELECT si.ingredientName, ingredientObject, ingredientResource FROM tSchematicIngredients si LEFT JOIN (SELECT ingredientName, ingredientResource FROM tRecipeIngredients WHERE recipeID=" + str(recipeID) + ") ri ON si.ingredientName = ri.ingredientName WHERE schematicID='" + schematicID + "' ORDER BY ingredientQuantity DESC, si.ingredientName;" ingCursor.execute(ingSql) ingRow = ingCursor.fetchone() while ingRow != None: if s.groupList.find("," + ingRow[1] + ",") > -1: if ingRow[2] == None: cursor = conn.cursor() # create new record for ingredient slot ingredientQuality = recipe.calcIngredientQuality(conn, spawnID, schematicID, user) tempSQL = "INSERT INTO tRecipeIngredients (recipeID, ingredientName, ingredientResource, ingredientQuality) VALUES (" + str(recipeID) + ",'" + ingRow[0] + "'," + str(spawnID) + "," + n2n(ingredientQuality) + ");" cursor.execute(tempSQL) ingredientsUpdated += cursor.rowcount cursor.close() else: filledCandidates += 1 ingRow = ingCursor.fetchone() if ingredientsUpdated > 0: returnStr = "Added " + s.spawnName + " to " + str(ingredientsUpdated) + " available slot(s)." else: if filledCandidates > 0: returnStr = "Any slots that could take the resource are already filled." else: returnStr = "That resource cannot be used in any of the slots of that schematic." conn.commit() ingCursor.close() return returnStr def main(): # Get current url try: url = os.environ['SCRIPT_NAME'] except KeyError: url = '' form = cgi.FieldStorage() # Get Cookies useCookies = 1 cookies = Cookie.SimpleCookie() try: cookies.load(os.environ['HTTP_COOKIE']) except KeyError: useCookies = 0 if useCookies: try: currentUser = cookies['userID'].value except KeyError: currentUser = '' try: loginResult = cookies['loginAttempt'].value except KeyError: loginResult = 'success' try: sid = cookies['gh_sid'].value except KeyError: sid = form.getfirst('gh_sid', '') else: currentUser = '' loginResult = 'success' sid = form.getfirst('gh_sid', '') # Get form info schematic = form.getfirst("schematic", "") recipeName = form.getfirst("recipeName", "") recipeID = form.getfirst("recipeID", "") ingredients = form.getfirst("ingredients", "") operation = form.getfirst("op", "") spawnID = form.getfirst("spawnID", "") # escape input to prevent sql injection sid = dbShared.dbInsertSafe(sid) schematic = dbShared.dbInsertSafe(schematic) recipeName = dbShared.dbInsertSafe(recipeName) recipeID = dbShared.dbInsertSafe(recipeID) ingredients = dbShared.dbInsertSafe(ingredients) spawnID = dbShared.dbInsertSafe(spawnID) result = "" # Get a session logged_state = 0 sess = dbSession.getSession(sid, 2592000) if (sess != ''): logged_state = 1 currentUser = sess # Check for errors errstr = "" if recipeName == "" and operation == "": errstr = "Error: You must provide a name for the recipe." if schematic == "" and recipeID == "": errstr = "Error: You must select a schematic to base the recipe on." if logged_state != 1: errstr = "Error: You must be logged in to do that." # Only process if no errors if (errstr == ""): result = "" if (logged_state > 0): conn = dbShared.ghConn() if schematic == "": # Make sure user owns recipe chkcursor = conn.cursor() tempSQL = "SELECT userID, schematicID FROM tRecipe WHERE recipeID=" + recipeID + ";" chkcursor.execute(tempSQL) row = chkcursor.fetchone() if row != None: if row[0] == currentUser: if operation == "delete": result = deleteRecipe(conn, recipeID, currentUser) elif operation == "addspawn": result = addIngredient(conn, recipeID, spawnID, row[1], currentUser) else: result = updateRecipe(conn, recipeID, recipeName) if ingredients != "": result += updateIngredients(conn, recipeID, ingredients, row[1], currentUser) else: result = "Error: You do not own that recipe." else: result = "Error: That recipe does not exist." chkcursor.close() else: result = addRecipe(conn, schematic, recipeName, currentUser) tmpPos = result.find("ID") # Save and strip ID on successful add if tmpPos > -1: recipeID = result[tmpPos+2:] result = result[:tmpPos] # Update ingredients if they were provided (saving suggestion) if ingredients != '': result += updateIngredients(conn, recipeID, ingredients, schematic, currentUser) conn.close() else: result = "Error: must be logged in to do that." else: result = errstr print 'Content-type: text/xml\n' doc = minidom.Document() eRoot = doc.createElement("result") doc.appendChild(eRoot) eName = doc.createElement("recipeID") tName = doc.createTextNode(str(recipeID)) eName.appendChild(tName) eRoot.appendChild(eName) eText = doc.createElement("resultText") tText = doc.createTextNode(result) eText.appendChild(tText) eRoot.appendChild(eText) print doc.toxml() if (result.find("Error:") > -1): sys.exit(500) else: sys.exit(200) if __name__ == "__main__": main()
clreinki/GalaxyHarvester
postRecipe.py
Python
agpl-3.0
10,852
[ "Galaxy" ]
4854eae057079c6e11aed43feee0a154f3c33168d8d89a58e9823a76ca77d36b
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Code snippets used in webdocs. The examples here are written specifically to read well with the accompanying web docs. Do not rewrite them until you make sure the webdocs still read well and the rewritten code supports the concept being described. For example, there are snippets that could be shorter but they are written like this to make a specific point in the docs. The code snippets are all organized as self contained functions. Parts of the function body delimited by [START tag] and [END tag] will be included automatically in the web docs. The naming convention for the tags is to have as prefix the PATH_TO_HTML where they are included followed by a descriptive string. The tags can contain only letters, digits and _. """ import apache_beam as beam from apache_beam.io import iobase from apache_beam.io.range_trackers import OffsetRangeTracker from apache_beam.metrics import Metrics from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.testing.test_pipeline import TestPipeline from apache_beam.testing.util import assert_that from apache_beam.testing.util import equal_to from apache_beam.transforms.core import PTransform # Quiet some pylint warnings that happen because of the somewhat special # format for the code snippets. # pylint:disable=invalid-name # pylint:disable=expression-not-assigned # pylint:disable=redefined-outer-name # pylint:disable=reimported # pylint:disable=unused-variable # pylint:disable=wrong-import-order, wrong-import-position class SnippetUtils(object): from apache_beam.pipeline import PipelineVisitor class RenameFiles(PipelineVisitor): """RenameFiles will rewire read/write paths for unit testing. RenameFiles will replace the GCS files specified in the read and write transforms to local files so the pipeline can be run as a unit test. This assumes that read and write transforms defined in snippets have already been replaced by transforms 'DummyReadForTesting' and 'DummyReadForTesting' (see snippets_test.py). This is as close as we can get to have code snippets that are executed and are also ready to presented in webdocs. """ def __init__(self, renames): self.renames = renames def visit_transform(self, transform_node): if transform_node.full_label.find('DummyReadForTesting') >= 0: transform_node.transform.fn.file_to_read = self.renames['read'] elif transform_node.full_label.find('DummyWriteForTesting') >= 0: transform_node.transform.fn.file_to_write = self.renames['write'] def construct_pipeline(renames): """A reverse words snippet as an example for constructing a pipeline.""" import re # This is duplicate of the import statement in # pipelines_constructing_creating tag below, but required to avoid # Unresolved reference in ReverseWords class import apache_beam as beam class ReverseWords(beam.PTransform): """A PTransform that reverses individual elements in a PCollection.""" def expand(self, pcoll): return pcoll | beam.Map(lambda e: e[::-1]) def filter_words(unused_x): """Pass through filter to select everything.""" return True # [START pipelines_constructing_creating] import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions p = beam.Pipeline(options=PipelineOptions()) # [END pipelines_constructing_creating] p = TestPipeline() # Use TestPipeline for testing. # [START pipelines_constructing_reading] lines = p | 'ReadMyFile' >> beam.io.ReadFromText('gs://some/inputData.txt') # [END pipelines_constructing_reading] # [START pipelines_constructing_applying] words = lines | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x)) reversed_words = words | ReverseWords() # [END pipelines_constructing_applying] # [START pipelines_constructing_writing] filtered_words = reversed_words | 'FilterWords' >> beam.Filter(filter_words) filtered_words | 'WriteMyFile' >> beam.io.WriteToText( 'gs://some/outputData.txt') # [END pipelines_constructing_writing] p.visit(SnippetUtils.RenameFiles(renames)) # [START pipelines_constructing_running] p.run() # [END pipelines_constructing_running] def model_pipelines(argv): """A wordcount snippet as a simple pipeline example.""" # [START model_pipelines] import re import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions class MyOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--input', dest='input', default='gs://dataflow-samples/shakespeare/kinglear' '.txt', help='Input file to process.') parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') pipeline_options = PipelineOptions(argv) my_options = pipeline_options.view_as(MyOptions) with beam.Pipeline(options=pipeline_options) as p: (p | beam.io.ReadFromText(my_options.input) | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x)) | beam.Map(lambda x: (x, 1)) | beam.combiners.Count.PerKey() | beam.io.WriteToText(my_options.output)) # [END model_pipelines] def model_pcollection(argv): """Creating a PCollection from data in local memory.""" from apache_beam.options.pipeline_options import PipelineOptions class MyOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') pipeline_options = PipelineOptions(argv) my_options = pipeline_options.view_as(MyOptions) # [START model_pcollection] with beam.Pipeline(options=pipeline_options) as p: lines = (p | beam.Create([ 'To be, or not to be: that is the question: ', 'Whether \'tis nobler in the mind to suffer ', 'The slings and arrows of outrageous fortune, ', 'Or to take arms against a sea of troubles, '])) # [END model_pcollection] (lines | beam.io.WriteToText(my_options.output)) def pipeline_options_remote(argv): """Creating a Pipeline using a PipelineOptions object for remote execution.""" from apache_beam import Pipeline from apache_beam.options.pipeline_options import PipelineOptions # [START pipeline_options_create] options = PipelineOptions(flags=argv) # [END pipeline_options_create] # [START pipeline_options_define_custom] class MyOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--input') parser.add_argument('--output') # [END pipeline_options_define_custom] from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.options.pipeline_options import StandardOptions # [START pipeline_options_dataflow_service] # Create and set your PipelineOptions. options = PipelineOptions(flags=argv) # For Cloud execution, set the Cloud Platform project, job_name, # staging location, temp_location and specify DataflowRunner. google_cloud_options = options.view_as(GoogleCloudOptions) google_cloud_options.project = 'my-project-id' google_cloud_options.job_name = 'myjob' google_cloud_options.staging_location = 'gs://my-bucket/binaries' google_cloud_options.temp_location = 'gs://my-bucket/temp' options.view_as(StandardOptions).runner = 'DataflowRunner' # Create the Pipeline with the specified options. p = Pipeline(options=options) # [END pipeline_options_dataflow_service] my_options = options.view_as(MyOptions) my_input = my_options.input my_output = my_options.output p = TestPipeline() # Use TestPipeline for testing. lines = p | beam.io.ReadFromText(my_input) lines | beam.io.WriteToText(my_output) p.run() def pipeline_options_local(argv): """Creating a Pipeline using a PipelineOptions object for local execution.""" from apache_beam import Pipeline from apache_beam.options.pipeline_options import PipelineOptions options = PipelineOptions(flags=argv) # [START pipeline_options_define_custom_with_help_and_default] class MyOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--input', help='Input for the pipeline', default='gs://my-bucket/input') parser.add_argument('--output', help='Output for the pipeline', default='gs://my-bucket/output') # [END pipeline_options_define_custom_with_help_and_default] my_options = options.view_as(MyOptions) my_input = my_options.input my_output = my_options.output # [START pipeline_options_local] # Create and set your Pipeline Options. options = PipelineOptions() p = Pipeline(options=options) # [END pipeline_options_local] p = TestPipeline() # Use TestPipeline for testing. lines = p | beam.io.ReadFromText(my_input) lines | beam.io.WriteToText(my_output) p.run() def pipeline_options_command_line(argv): """Creating a Pipeline by passing a list of arguments.""" # [START pipeline_options_command_line] # Use Python argparse module to parse custom arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('--input') parser.add_argument('--output') known_args, pipeline_args = parser.parse_known_args(argv) # Create the Pipeline with remaining arguments. with beam.Pipeline(argv=pipeline_args) as p: lines = p | 'ReadFromText' >> beam.io.ReadFromText(known_args.input) lines | 'WriteToText' >> beam.io.WriteToText(known_args.output) # [END pipeline_options_command_line] def pipeline_logging(lines, output): """Logging Pipeline Messages.""" import re import apache_beam as beam # [START pipeline_logging] # import Python logging module. import logging class ExtractWordsFn(beam.DoFn): def process(self, element): words = re.findall(r'[A-Za-z\']+', element) for word in words: yield word if word.lower() == 'love': # Log using the root logger at info or higher levels logging.info('Found : %s', word.lower()) # Remaining WordCount example code ... # [END pipeline_logging] with TestPipeline() as p: # Use TestPipeline for testing. (p | beam.Create(lines) | beam.ParDo(ExtractWordsFn()) | beam.io.WriteToText(output)) def pipeline_monitoring(renames): """Using monitoring interface snippets.""" import re import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions class WordCountOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--input', help='Input for the pipeline', default='gs://my-bucket/input') parser.add_argument('--output', help='output for the pipeline', default='gs://my-bucket/output') class ExtractWordsFn(beam.DoFn): def process(self, element): words = re.findall(r'[A-Za-z\']+', element) for word in words: yield word class FormatCountsFn(beam.DoFn): def process(self, element): word, count = element yield '%s: %s' % (word, count) # [START pipeline_monitoring_composite] # The CountWords Composite Transform inside the WordCount pipeline. class CountWords(beam.PTransform): def expand(self, pcoll): return (pcoll # Convert lines of text into individual words. | 'ExtractWords' >> beam.ParDo(ExtractWordsFn()) # Count the number of times each word occurs. | beam.combiners.Count.PerElement() # Format each word and count into a printable string. | 'FormatCounts' >> beam.ParDo(FormatCountsFn())) # [END pipeline_monitoring_composite] pipeline_options = PipelineOptions() options = pipeline_options.view_as(WordCountOptions) with TestPipeline() as p: # Use TestPipeline for testing. # [START pipeline_monitoring_execution] (p # Read the lines of the input text. | 'ReadLines' >> beam.io.ReadFromText(options.input) # Count the words. | CountWords() # Write the formatted word counts to output. | 'WriteCounts' >> beam.io.WriteToText(options.output)) # [END pipeline_monitoring_execution] p.visit(SnippetUtils.RenameFiles(renames)) def examples_wordcount_minimal(renames): """MinimalWordCount example snippets.""" import re import apache_beam as beam from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.options.pipeline_options import StandardOptions from apache_beam.options.pipeline_options import PipelineOptions # [START examples_wordcount_minimal_options] options = PipelineOptions() google_cloud_options = options.view_as(GoogleCloudOptions) google_cloud_options.project = 'my-project-id' google_cloud_options.job_name = 'myjob' google_cloud_options.staging_location = 'gs://your-bucket-name-here/staging' google_cloud_options.temp_location = 'gs://your-bucket-name-here/temp' options.view_as(StandardOptions).runner = 'DataflowRunner' # [END examples_wordcount_minimal_options] # Run it locally for testing. options = PipelineOptions() # [START examples_wordcount_minimal_create] p = beam.Pipeline(options=options) # [END examples_wordcount_minimal_create] ( # [START examples_wordcount_minimal_read] p | beam.io.ReadFromText( 'gs://dataflow-samples/shakespeare/kinglear.txt') # [END examples_wordcount_minimal_read] # [START examples_wordcount_minimal_pardo] | 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x)) # [END examples_wordcount_minimal_pardo] # [START examples_wordcount_minimal_count] | beam.combiners.Count.PerElement() # [END examples_wordcount_minimal_count] # [START examples_wordcount_minimal_map] | beam.Map(lambda word_count: '%s: %s' % (word_count[0], word_count[1])) # [END examples_wordcount_minimal_map] # [START examples_wordcount_minimal_write] | beam.io.WriteToText('gs://my-bucket/counts.txt') # [END examples_wordcount_minimal_write] ) p.visit(SnippetUtils.RenameFiles(renames)) # [START examples_wordcount_minimal_run] result = p.run() # [END examples_wordcount_minimal_run] result.wait_until_finish() def examples_wordcount_wordcount(renames): """WordCount example snippets.""" import re import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions argv = [] # [START examples_wordcount_wordcount_options] class WordCountOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument('--input', help='Input for the pipeline', default='gs://my-bucket/input') options = PipelineOptions(argv) with beam.Pipeline(options=options) as p: # [END examples_wordcount_wordcount_options] lines = p | beam.io.ReadFromText( 'gs://dataflow-samples/shakespeare/kinglear.txt') # [START examples_wordcount_wordcount_composite] class CountWords(beam.PTransform): def expand(self, pcoll): return (pcoll # Convert lines of text into individual words. | 'ExtractWords' >> beam.FlatMap( lambda x: re.findall(r'[A-Za-z\']+', x)) # Count the number of times each word occurs. | beam.combiners.Count.PerElement()) counts = lines | CountWords() # [END examples_wordcount_wordcount_composite] # [START examples_wordcount_wordcount_dofn] class FormatAsTextFn(beam.DoFn): def process(self, element): word, count = element yield '%s: %s' % (word, count) formatted = counts | beam.ParDo(FormatAsTextFn()) # [END examples_wordcount_wordcount_dofn] formatted | beam.io.WriteToText('gs://my-bucket/counts.txt') p.visit(SnippetUtils.RenameFiles(renames)) def examples_wordcount_templated(renames): """Templated WordCount example snippet.""" import re import apache_beam as beam from apache_beam.io import ReadFromText from apache_beam.io import WriteToText from apache_beam.options.pipeline_options import PipelineOptions # [START example_wordcount_templated] class WordcountTemplatedOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): # Use add_value_provider_argument for arguments to be templatable # Use add_argument as usual for non-templatable arguments parser.add_value_provider_argument( '--input', help='Path of the file to read from') parser.add_argument( '--output', required=True, help='Output file to write results to.') pipeline_options = PipelineOptions(['--output', 'some/output_path']) p = beam.Pipeline(options=pipeline_options) wordcount_options = pipeline_options.view_as(WordcountTemplatedOptions) lines = p | 'Read' >> ReadFromText(wordcount_options.input) # [END example_wordcount_templated] def format_result(word_count): (word, count) = word_count return '%s: %s' % (word, count) ( lines | 'ExtractWords' >> beam.FlatMap( lambda x: re.findall(r'[A-Za-z\']+', x)) | 'PairWithOnes' >> beam.Map(lambda x: (x, 1)) | 'Group' >> beam.GroupByKey() | 'Sum' >> beam.Map(lambda word_ones: (word_ones[0], sum(word_ones[1]))) | 'Format' >> beam.Map(format_result) | 'Write' >> WriteToText(wordcount_options.output) ) p.visit(SnippetUtils.RenameFiles(renames)) result = p.run() result.wait_until_finish() def examples_wordcount_debugging(renames): """DebuggingWordCount example snippets.""" import re import apache_beam as beam # [START example_wordcount_debugging_logging] # [START example_wordcount_debugging_aggregators] import logging class FilterTextFn(beam.DoFn): """A DoFn that filters for a specific key based on a regular expression.""" def __init__(self, pattern): self.pattern = pattern # A custom metric can track values in your pipeline as it runs. Create # custom metrics matched_word and unmatched_words. self.matched_words = Metrics.counter(self.__class__, 'matched_words') self.umatched_words = Metrics.counter(self.__class__, 'umatched_words') def process(self, element): word, _ = element if re.match(self.pattern, word): # Log at INFO level each element we match. When executing this pipeline # using the Dataflow service, these log lines will appear in the Cloud # Logging UI. logging.info('Matched %s', word) # Add 1 to the custom metric counter matched_words self.matched_words.inc() yield element else: # Log at the "DEBUG" level each element that is not matched. Different # log levels can be used to control the verbosity of logging providing # an effective mechanism to filter less important information. Note # currently only "INFO" and higher level logs are emitted to the Cloud # Logger. This log message will not be visible in the Cloud Logger. logging.debug('Did not match %s', word) # Add 1 to the custom metric counter umatched_words self.umatched_words.inc() # [END example_wordcount_debugging_logging] # [END example_wordcount_debugging_aggregators] with TestPipeline() as p: # Use TestPipeline for testing. filtered_words = ( p | beam.io.ReadFromText( 'gs://dataflow-samples/shakespeare/kinglear.txt') | 'ExtractWords' >> beam.FlatMap( lambda x: re.findall(r'[A-Za-z\']+', x)) | beam.combiners.Count.PerElement() | 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach'))) # [START example_wordcount_debugging_assert] beam.testing.util.assert_that( filtered_words, beam.testing.util.equal_to( [('Flourish', 3), ('stomach', 1)])) # [END example_wordcount_debugging_assert] def format_result(word_count): (word, count) = word_count return '%s: %s' % (word, count) output = (filtered_words | 'format' >> beam.Map(format_result) | 'Write' >> beam.io.WriteToText('gs://my-bucket/counts.txt')) p.visit(SnippetUtils.RenameFiles(renames)) def examples_ptransforms_templated(renames): # [START examples_ptransforms_templated] import apache_beam as beam from apache_beam.io import WriteToText from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.value_provider import StaticValueProvider class TemplatedUserOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_value_provider_argument('--templated_int', type=int) class MySumFn(beam.DoFn): def __init__(self, templated_int): self.templated_int = templated_int def process(self, an_int): yield self.templated_int.get() + an_int pipeline_options = PipelineOptions() p = beam.Pipeline(options=pipeline_options) user_options = pipeline_options.view_as(TemplatedUserOptions) my_sum_fn = MySumFn(user_options.templated_int) sum = (p | 'ReadCollection' >> beam.io.ReadFromText( 'gs://some/integer_collection') | 'StringToInt' >> beam.Map(lambda w: int(w)) | 'AddGivenInt' >> beam.ParDo(my_sum_fn) | 'WriteResultingCollection' >> WriteToText('some/output_path')) # [END examples_ptransforms_templated] # Templates are not supported by DirectRunner (only by DataflowRunner) # so a value must be provided at graph-construction time my_sum_fn.templated_int = StaticValueProvider(int, 10) p.visit(SnippetUtils.RenameFiles(renames)) result = p.run() result.wait_until_finish() # Defining a new source. # [START model_custom_source_new_source] class CountingSource(iobase.BoundedSource): def __init__(self, count): self.records_read = Metrics.counter(self.__class__, 'recordsRead') self._count = count def estimate_size(self): return self._count def get_range_tracker(self, start_position, stop_position): if start_position is None: start_position = 0 if stop_position is None: stop_position = self._count return OffsetRangeTracker(start_position, stop_position) def read(self, range_tracker): for i in range(self._count): if not range_tracker.try_claim(i): return self.records_read.inc() yield i def split(self, desired_bundle_size, start_position=None, stop_position=None): if start_position is None: start_position = 0 if stop_position is None: stop_position = self._count bundle_start = start_position while bundle_start < self._count: bundle_stop = max(self._count, bundle_start + desired_bundle_size) yield iobase.SourceBundle(weight=(bundle_stop - bundle_start), source=self, start_position=bundle_start, stop_position=bundle_stop) bundle_start = bundle_stop # [END model_custom_source_new_source] def model_custom_source(count): """Demonstrates creating a new custom source and using it in a pipeline. Defines a new source ``CountingSource`` that produces integers starting from 0 up to a given size. Uses the new source in an example pipeline. Additionally demonstrates how a source should be implemented using a ``PTransform``. This is the recommended way to develop sources that are to distributed to a large number of end users. This method runs two pipelines. (1) A pipeline that uses ``CountingSource`` directly using the ``df.Read`` transform. (2) A pipeline that uses a custom ``PTransform`` that wraps ``CountingSource``. Args: count: the size of the counting source to be used in the pipeline demonstrated in this method. """ # Using the source in an example pipeline. # [START model_custom_source_use_new_source] with beam.Pipeline(options=PipelineOptions()) as p: numbers = p | 'ProduceNumbers' >> beam.io.Read(CountingSource(count)) # [END model_custom_source_use_new_source] lines = numbers | beam.core.Map(lambda number: 'line %d' % number) assert_that( lines, equal_to( ['line ' + str(number) for number in range(0, count)])) # We recommend users to start Source classes with an underscore to discourage # using the Source class directly when a PTransform for the source is # available. We simulate that here by simply extending the previous Source # class. class _CountingSource(CountingSource): pass # [START model_custom_source_new_ptransform] class ReadFromCountingSource(PTransform): def __init__(self, count, **kwargs): super(ReadFromCountingSource, self).__init__(**kwargs) self._count = count def expand(self, pcoll): return pcoll | iobase.Read(_CountingSource(count)) # [END model_custom_source_new_ptransform] # [START model_custom_source_use_ptransform] p = beam.Pipeline(options=PipelineOptions()) numbers = p | 'ProduceNumbers' >> ReadFromCountingSource(count) # [END model_custom_source_use_ptransform] lines = numbers | beam.core.Map(lambda number: 'line %d' % number) assert_that( lines, equal_to( ['line ' + str(number) for number in range(0, count)])) # Don't test runner api due to pickling errors. p.run(test_runner_api=False).wait_until_finish() def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform, final_table_name_with_ptransform): """Demonstrates creating a new custom sink and using it in a pipeline. Defines a new sink ``SimpleKVSink`` that demonstrates writing to a simple key-value based storage system which has following API. simplekv.connect(url) - connects to the storage system and returns an access token which can be used to perform further operations simplekv.open_table(access_token, table_name) - creates a table named 'table_name'. Returns a table object. simplekv.write_to_table(access_token, table, key, value) - writes a key-value pair to the given table. simplekv.rename_table(access_token, old_name, new_name) - renames the table named 'old_name' to 'new_name'. Uses the new sink in an example pipeline. Additionally demonstrates how a sink should be implemented using a ``PTransform``. This is the recommended way to develop sinks that are to be distributed to a large number of end users. This method runs two pipelines. (1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write`` transform. (2) A pipeline that uses a custom ``PTransform`` that wraps ``SimpleKVSink``. Args: simplekv: an object that mocks the key-value storage. KVs: the set of key-value pairs to be written in the example pipeline. final_table_name_no_ptransform: the prefix of final set of tables to be created by the example pipeline that uses ``SimpleKVSink`` directly. final_table_name_with_ptransform: the prefix of final set of tables to be created by the example pipeline that uses a ``PTransform`` that wraps ``SimpleKVSink``. """ import apache_beam as beam from apache_beam.io import iobase from apache_beam.transforms.core import PTransform from apache_beam.options.pipeline_options import PipelineOptions # Defining the new sink. # [START model_custom_sink_new_sink] class SimpleKVSink(iobase.Sink): def __init__(self, url, final_table_name): self._url = url self._final_table_name = final_table_name def initialize_write(self): access_token = simplekv.connect(self._url) return access_token def open_writer(self, access_token, uid): table_name = 'table' + uid return SimpleKVWriter(access_token, table_name) def finalize_write(self, access_token, table_names): for i, table_name in enumerate(table_names): simplekv.rename_table( access_token, table_name, self._final_table_name + str(i)) # [END model_custom_sink_new_sink] # Defining a writer for the new sink. # [START model_custom_sink_new_writer] class SimpleKVWriter(iobase.Writer): def __init__(self, access_token, table_name): self._access_token = access_token self._table_name = table_name self._table = simplekv.open_table(access_token, table_name) def write(self, record): key, value = record simplekv.write_to_table(self._access_token, self._table, key, value) def close(self): return self._table_name # [END model_custom_sink_new_writer] final_table_name = final_table_name_no_ptransform # Using the new sink in an example pipeline. # [START model_custom_sink_use_new_sink] with beam.Pipeline(options=PipelineOptions()) as p: kvs = p | 'CreateKVs' >> beam.Create(KVs) kvs | 'WriteToSimpleKV' >> beam.io.Write( SimpleKVSink('http://url_to_simple_kv/', final_table_name)) # [END model_custom_sink_use_new_sink] # We recommend users to start Sink class names with an underscore to # discourage using the Sink class directly when a PTransform for the sink is # available. We simulate that here by simply extending the previous Sink # class. class _SimpleKVSink(SimpleKVSink): pass # [START model_custom_sink_new_ptransform] class WriteToKVSink(PTransform): def __init__(self, url, final_table_name, **kwargs): super(WriteToKVSink, self).__init__(**kwargs) self._url = url self._final_table_name = final_table_name def expand(self, pcoll): return pcoll | iobase.Write(_SimpleKVSink(self._url, self._final_table_name)) # [END model_custom_sink_new_ptransform] final_table_name = final_table_name_with_ptransform # [START model_custom_sink_use_ptransform] with beam.Pipeline(options=PipelineOptions()) as p: kvs = p | 'CreateKVs' >> beam.core.Create(KVs) kvs | 'WriteToSimpleKV' >> WriteToKVSink( 'http://url_to_simple_kv/', final_table_name) # [END model_custom_sink_use_ptransform] def model_textio(renames): """Using a Read and Write transform to read/write text files.""" def filter_words(x): import re return re.findall(r'[A-Za-z\']+', x) import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions # [START model_textio_read] with beam.Pipeline(options=PipelineOptions()) as p: # [START model_pipelineio_read] lines = p | 'ReadFromText' >> beam.io.ReadFromText('path/to/input-*.csv') # [END model_pipelineio_read] # [END model_textio_read] # [START model_textio_write] filtered_words = lines | 'FilterWords' >> beam.FlatMap(filter_words) # [START model_pipelineio_write] filtered_words | 'WriteToText' >> beam.io.WriteToText( '/path/to/numbers', file_name_suffix='.csv') # [END model_pipelineio_write] # [END model_textio_write] p.visit(SnippetUtils.RenameFiles(renames)) def model_textio_compressed(renames, expected): """Using a Read Transform to read compressed text files.""" with TestPipeline() as p: # [START model_textio_write_compressed] lines = p | 'ReadFromText' >> beam.io.ReadFromText( '/path/to/input-*.csv.gz', compression_type=beam.io.filesystem.CompressionTypes.GZIP) # [END model_textio_write_compressed] assert_that(lines, equal_to(expected)) p.visit(SnippetUtils.RenameFiles(renames)) def model_datastoreio(): """Using a Read and Write transform to read/write to Cloud Datastore.""" import uuid from google.cloud.proto.datastore.v1 import entity_pb2 from google.cloud.proto.datastore.v1 import query_pb2 import googledatastore import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore project = 'my_project' kind = 'my_kind' query = query_pb2.Query() query.kind.add().name = kind # [START model_datastoreio_read] p = beam.Pipeline(options=PipelineOptions()) entities = p | 'Read From Datastore' >> ReadFromDatastore(project, query) # [END model_datastoreio_read] # [START model_datastoreio_write] p = beam.Pipeline(options=PipelineOptions()) musicians = p | 'Musicians' >> beam.Create( ['Mozart', 'Chopin', 'Beethoven', 'Vivaldi']) def to_entity(content): entity = entity_pb2.Entity() googledatastore.helper.add_key_path(entity.key, kind, str(uuid.uuid4())) googledatastore.helper.add_properties(entity, {'content': unicode(content)}) return entity entities = musicians | 'To Entity' >> beam.Map(to_entity) entities | 'Write To Datastore' >> WriteToDatastore(project) # [END model_datastoreio_write] def model_bigqueryio(): """Using a Read and Write transform to read/write to BigQuery.""" import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions # [START model_bigqueryio_read] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadWeatherStations' >> beam.io.Read( beam.io.BigQuerySource( 'clouddataflow-readonly:samples.weather_stations')) # [END model_bigqueryio_read] # [START model_bigqueryio_query] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadYearAndTemp' >> beam.io.Read( beam.io.BigQuerySource( query='SELECT year, mean_temp FROM samples.weather_stations')) # [END model_bigqueryio_query] # [START model_bigqueryio_query_standard_sql] p = beam.Pipeline(options=PipelineOptions()) weather_data = p | 'ReadYearAndTemp' >> beam.io.Read( beam.io.BigQuerySource( query='SELECT year, mean_temp FROM `samples.weather_stations`', use_standard_sql=True)) # [END model_bigqueryio_query_standard_sql] # [START model_bigqueryio_schema] schema = 'source:STRING, quote:STRING' # [END model_bigqueryio_schema] # [START model_bigqueryio_write] quotes = p | beam.Create( [{'source': 'Mahatma Ghandi', 'quote': 'My life is my message.'}]) quotes | 'Write' >> beam.io.Write( beam.io.BigQuerySink( 'my-project:output.output_table', schema=schema, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE, create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)) # [END model_bigqueryio_write] def model_composite_transform_example(contents, output_path): """Example of a composite transform. To declare a composite transform, define a subclass of PTransform. To override the apply method, define a method "apply" that takes a PCollection as its only parameter and returns a PCollection. """ import re import apache_beam as beam # [START composite_transform_example] # [START composite_ptransform_apply_method] # [START composite_ptransform_declare] class CountWords(beam.PTransform): # [END composite_ptransform_declare] def expand(self, pcoll): return (pcoll | beam.FlatMap(lambda x: re.findall(r'\w+', x)) | beam.combiners.Count.PerElement() | beam.Map(lambda word_c: '%s: %s' % (word_c[0], word_c[1]))) # [END composite_ptransform_apply_method] # [END composite_transform_example] with TestPipeline() as p: # Use TestPipeline for testing. (p | beam.Create(contents) | CountWords() | beam.io.WriteToText(output_path)) def model_multiple_pcollections_flatten(contents, output_path): """Merging a PCollection with Flatten.""" some_hash_fn = lambda s: ord(s[0]) partition_fn = lambda element, partitions: some_hash_fn(element) % partitions import apache_beam as beam with TestPipeline() as p: # Use TestPipeline for testing. # Partition into deciles partitioned = p | beam.Create(contents) | beam.Partition(partition_fn, 3) pcoll1 = partitioned[0] pcoll2 = partitioned[1] pcoll3 = partitioned[2] # Flatten them back into 1 # A collection of PCollection objects can be represented simply # as a tuple (or list) of PCollections. # (The SDK for Python has no separate type to store multiple # PCollection objects, whether containing the same or different # types.) # [START model_multiple_pcollections_flatten] merged = ( (pcoll1, pcoll2, pcoll3) # A list of tuples can be "piped" directly into a Flatten transform. | beam.Flatten()) # [END model_multiple_pcollections_flatten] merged | beam.io.WriteToText(output_path) def model_multiple_pcollections_partition(contents, output_path): """Splitting a PCollection with Partition.""" some_hash_fn = lambda s: ord(s[0]) def get_percentile(i): """Assume i in [0,100).""" return i import apache_beam as beam with TestPipeline() as p: # Use TestPipeline for testing. students = p | beam.Create(contents) # [START model_multiple_pcollections_partition] def partition_fn(student, num_partitions): return int(get_percentile(student) * num_partitions / 100) by_decile = students | beam.Partition(partition_fn, 10) # [END model_multiple_pcollections_partition] # [START model_multiple_pcollections_partition_40th] fortieth_percentile = by_decile[4] # [END model_multiple_pcollections_partition_40th] ([by_decile[d] for d in xrange(10) if d != 4] + [fortieth_percentile] | beam.Flatten() | beam.io.WriteToText(output_path)) def model_group_by_key(contents, output_path): """Applying a GroupByKey Transform.""" import re import apache_beam as beam with TestPipeline() as p: # Use TestPipeline for testing. def count_ones(word_ones): (word, ones) = word_ones return (word, sum(ones)) words_and_counts = ( p | beam.Create(contents) | beam.FlatMap(lambda x: re.findall(r'\w+', x)) | 'one word' >> beam.Map(lambda w: (w, 1))) # GroupByKey accepts a PCollection of (w, 1) and # outputs a PCollection of (w, (1, 1, ...)). # (A key/value pair is just a tuple in Python.) # This is a somewhat forced example, since one could # simply use beam.combiners.Count.PerElement here. # [START model_group_by_key_transform] grouped_words = words_and_counts | beam.GroupByKey() # [END model_group_by_key_transform] (grouped_words | 'count words' >> beam.Map(count_ones) | beam.io.WriteToText(output_path)) def model_co_group_by_key_tuple(emails, phones, output_path): """Applying a CoGroupByKey Transform to a tuple.""" import apache_beam as beam # [START model_group_by_key_cogroupbykey_tuple] # The result PCollection contains one key-value element for each key in the # input PCollections. The key of the pair will be the key from the input and # the value will be a dictionary with two entries: 'emails' - an iterable of # all values for the current key in the emails PCollection and 'phones': an # iterable of all values for the current key in the phones PCollection. results = ({'emails': emails, 'phones': phones} | beam.CoGroupByKey()) def join_info(name_info): (name, info) = name_info return '%s; %s; %s' %\ (name, sorted(info['emails']), sorted(info['phones'])) contact_lines = results | beam.Map(join_info) # [END model_group_by_key_cogroupbykey_tuple] contact_lines | beam.io.WriteToText(output_path) def model_join_using_side_inputs( name_list, email_list, phone_list, output_path): """Joining PCollections using side inputs.""" import apache_beam as beam from apache_beam.pvalue import AsIter with TestPipeline() as p: # Use TestPipeline for testing. # [START model_join_using_side_inputs] # This code performs a join by receiving the set of names as an input and # passing PCollections that contain emails and phone numbers as side inputs # instead of using CoGroupByKey. names = p | 'names' >> beam.Create(name_list) emails = p | 'email' >> beam.Create(email_list) phones = p | 'phone' >> beam.Create(phone_list) def join_info(name, emails, phone_numbers): filtered_emails = [] for name_in_list, email in emails: if name_in_list == name: filtered_emails.append(email) filtered_phone_numbers = [] for name_in_list, phone_number in phone_numbers: if name_in_list == name: filtered_phone_numbers.append(phone_number) return '; '.join(['%s' % name, '%s' % ','.join(filtered_emails), '%s' % ','.join(filtered_phone_numbers)]) contact_lines = names | 'CreateContacts' >> beam.core.Map( join_info, AsIter(emails), AsIter(phones)) # [END model_join_using_side_inputs] contact_lines | beam.io.WriteToText(output_path) # [START model_library_transforms_keys] class Keys(beam.PTransform): def expand(self, pcoll): return pcoll | 'Keys' >> beam.Map(lambda k_v: k_v[0]) # [END model_library_transforms_keys] # pylint: enable=invalid-name # [START model_library_transforms_count] class Count(beam.PTransform): def expand(self, pcoll): return ( pcoll | 'PairWithOne' >> beam.Map(lambda v: (v, 1)) | beam.CombinePerKey(sum)) # [END model_library_transforms_count]
jbonofre/beam
sdks/python/apache_beam/examples/snippets/snippets.py
Python
apache-2.0
43,029
[ "VisIt" ]
88f7234e758d1a68b644292fa8dcb9ce2c2538b54ca1dca5a7581ef96f210ca3
######################################################################## # File: FTSFileTests.py # Author: Krzysztof.Ciba@NOSPAMgmail.com # Date: 2013/04/16 06:45:16 ######################################################################## """ :mod: FTSFileTests ================== .. module: FTSFileTests :synopsis: unittests for FTSFile .. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com unittests for FTSFile """ # # # @file FTSFileTests.py # @author Krzysztof.Ciba@NOSPAMgmail.com # @date 2013/04/16 06:45:31 # @brief Definition of FTSFileTests class. # # imports import unittest # # SUT from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile ######################################################################## class FTSFileTests( unittest.TestCase ): """ .. class:: FTSFileTests """ def setUp( self ): """ test set up """ self.fromDict = { "FileID": 123456, "OperationID": 123, "LFN": "/a/b/c", "ChecksumType": "ADLER32", "Checksum": "aaabbbccc", "Size": 10, "SourceSE": "CERN-DST", "TargetSE": "RAL-DST" } def tearDown( self ): """ test tear down """ del self.fromDict def test01Ctor( self ): """ test ctor and (de-)serialization """ ftsFile = FTSFile( self.fromDict ) self.assertEqual( isinstance( ftsFile, FTSFile ), True ) for k, v in self.fromDict.items(): self.assertEqual( getattr( ftsFile, k ), v ) json = ftsFile.toJSON() ftsFileJSON = FTSFile( json["Value"] ) self.assertEqual( isinstance( ftsFileJSON, FTSFile ), True ) for k, v in self.fromDict.items(): self.assertEqual( getattr( ftsFileJSON, k ), v ) # # test execution if __name__ == "__main__": testLoader = unittest.TestLoader() suite = testLoader.loadTestsFromTestCase( FTSFileTests ) suite = unittest.TestSuite( [ suite ] ) unittest.TextTestRunner( verbosity = 3 ).run( suite )
andresailer/DIRAC
DataManagementSystem/Client/test/Test_FTSFile.py
Python
gpl-3.0
2,034
[ "DIRAC" ]
8ac18c79264f45a3085a1013565242d83d9c4f29e050dc841de78727f12fc327
# HsolveInstability.py --- # Commentary: # # A toy compartmental neuronal + chemical model in just a cubic volume # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth # Floor, Boston, MA 02110-1301, USA. # Code: import sys sys.path.append('../../python') import os os.environ['NUMPTHREADS'] = '1' import math import numpy import pylab import moose EREST_ACT = -70e-3 # Gate equations have the form: # # y(x) = (A + B * x) / (C + exp((x + D) / F)) # # where x is membrane voltage and y is the rate constant for gate # closing or opening Na_m_params = [1e5 * (25e-3 + EREST_ACT), # 'A_A': -1e5, # 'A_B': -1.0, # 'A_C': -25e-3 - EREST_ACT, # 'A_D': -10e-3, # 'A_F': 4e3, # 'B_A': 0.0, # 'B_B': 0.0, # 'B_C': 0.0 - EREST_ACT, # 'B_D': 18e-3 # 'B_F': ] Na_h_params = [ 70.0, # 'A_A': 0.0, # 'A_B': 0.0, # 'A_C': 0.0 - EREST_ACT, # 'A_D': 0.02, # 'A_F': 1000.0, # 'B_A': 0.0, # 'B_B': 1.0, # 'B_C': -30e-3 - EREST_ACT, # 'B_D': -0.01 # 'B_F': ] K_n_params = [ 1e4 * (10e-3 + EREST_ACT), # 'A_A': -1e4, # 'A_B': -1.0, # 'A_C': -10e-3 - EREST_ACT, # 'A_D': -10e-3, # 'A_F': 0.125e3, # 'B_A': 0.0, # 'B_B': 0.0, # 'B_C': 0.0 - EREST_ACT, # 'B_D': 80e-3 # 'B_F': ] VMIN = -30e-3 + EREST_ACT VMAX = 120e-3 + EREST_ACT VDIVS = 3000 def createSquid(): """Create a single compartment squid model.""" parent = moose.Neutral ('/n' ) elec = moose.Neutral ('/n/elec' ) compt = moose.SymCompartment( '/n/elec/compt' ) Em = EREST_ACT + 10.613e-3 compt.Em = Em compt.initVm = EREST_ACT compt.Cm = 7.85e-9 * 0.5 compt.Rm = 4.2e5 * 5.0 compt.Ra = 7639.44e3 compt.length = 100e-6 compt.diameter = 4e-6 nachan = moose.HHChannel( '/n/elec/compt/Na' ) nachan.Xpower = 3 xGate = moose.HHGate(nachan.path + '/gateX') xGate.setupAlpha(Na_m_params + [VDIVS, VMIN, VMAX]) xGate.useInterpolation = 1 nachan.Ypower = 1 yGate = moose.HHGate(nachan.path + '/gateY') yGate.setupAlpha(Na_h_params + [VDIVS, VMIN, VMAX]) yGate.useInterpolation = 1 nachan.Gbar = 0.942e-3 nachan.Ek = 115e-3+EREST_ACT moose.connect(nachan, 'channel', compt, 'channel', 'OneToOne') kchan = moose.HHChannel( '/n/elec/compt/K' ) kchan.Xpower = 4.0 xGate = moose.HHGate(kchan.path + '/gateX') xGate.setupAlpha(K_n_params + [VDIVS, VMIN, VMAX]) xGate.useInterpolation = 1 kchan.Gbar = 0.2836e-3 kchan.Ek = -12e-3+EREST_ACT moose.connect(kchan, 'channel', compt, 'channel', 'OneToOne') return compt def createSynapseOnCompartment( compt ): FaradayConst = 96485.3415 # s A / mol length = compt.length dia = compt.diameter gluR = moose.SynChan( compt.path + '/gluR' ) gluR.tau1 = 4e-3 gluR.tau2 = 4e-3 gluR.Gbar = 1e-6 gluR.Ek= 10.0e-3 moose.connect( compt, 'channel', gluR, 'channel', 'Single' ) gluSyn = moose.SimpleSynHandler( compt.path + '/gluR/sh' ) moose.connect( gluSyn, 'activationOut', gluR, 'activation' ) gluSyn.synapse.num = 1 # Ca comes in through this channel, at least for this example. caPool = moose.CaConc( compt.path + '/ca' ) caPool.CaBasal = 1e-4 # 0.1 micromolar caPool.tau = 0.01 B = 1.0 / ( FaradayConst * length * dia * dia * math.pi / 4) B = B / 20.0 # scaling factor for Ca buffering caPool.B = B moose.connect( gluR, 'IkOut', caPool, 'current', 'Single' ) # Provide a regular synaptic input. synInput = moose.SpikeGen( '/n/elec/compt/synInput' ) synInput.refractT = 47e-3 synInput.threshold = -1.0 synInput.edgeTriggered = 0 synInput.Vm( 0 ) syn = moose.element( gluSyn.path + '/synapse' ) moose.connect( synInput, 'spikeOut', syn, 'addSpike', 'Single' ) syn.weight = 0.2 syn.delay = 1.0e-3 return gluR def createPool( compt, name, concInit ): pool = moose.Pool( compt.path + '/' + name ) pool.concInit = concInit pool.diffConst = 5e-11 return pool # This is a Ca-activated enzyme that phosphorylates and inactivates kChan # as per the following scheme: # Ca + inact_kinase <===> Ca.kinase # kChan ----- Ca.kinase -----> kChan_p # kChan_p -------> kChan def createChemModel( neuroCompt ): dendCa = createPool( neuroCompt, 'Ca', 1e-4 ) dendKinaseInact = createPool( neuroCompt, 'inact_kinase', 1e-4 ) dendKinase = createPool( neuroCompt, 'Ca.kinase', 0.0 ) dendTurnOnKinase = moose.Reac( neuroCompt.path + '/turnOnKinase' ) moose.connect( dendTurnOnKinase, 'sub', dendCa, 'reac' ) moose.connect( dendTurnOnKinase, 'sub', dendKinaseInact, 'reac' ) moose.connect( dendTurnOnKinase, 'prd', dendKinase, 'reac' ) dendTurnOnKinase.Kf = 50000 dendTurnOnKinase.Kb = 1 dendKinaseEnz = moose.Enz( dendKinase.path + '/enz' ) dendKinaseEnzCplx = moose.Pool( dendKinase.path + '/enz/cplx' ) kChan = createPool( neuroCompt, 'kChan', 1e-3 ) kChan_p = createPool( neuroCompt, 'kChan_p', 0.0 ) moose.connect( dendKinaseEnz, 'enz', dendKinase, 'reac', 'OneToOne' ) moose.connect( dendKinaseEnz, 'sub', kChan, 'reac', 'OneToOne' ) moose.connect( dendKinaseEnz, 'prd', kChan_p, 'reac', 'OneToOne' ) moose.connect( dendKinaseEnz, 'cplx', dendKinaseEnzCplx, 'reac', 'OneToOne' ) dendKinaseEnz.Km = 1e-4 dendKinaseEnz.kcat = 20 dendPhosphatase = moose.Reac( neuroCompt.path + '/phosphatase' ) moose.connect( dendPhosphatase, 'sub', kChan_p, 'reac' ) moose.connect( dendPhosphatase, 'prd', kChan, 'reac' ) dendPhosphatase.Kf = 1 dendPhosphatase.Kb = 0.0 def makeModelInCubeMesh(): compt = createSquid() createSynapseOnCompartment( compt ) chem = moose.Neutral( '/n/chem' ) neuroMesh = moose.CubeMesh( '/n/chem/neuroMesh' ) coords = [0] * 9 coords[3] = compt.length coords[4] = compt.diameter coords[5] = compt.diameter coords[6] = compt.length coords[7] = compt.diameter coords[8] = compt.diameter neuroMesh.coords = coords neuroMesh.preserveNumEntries = 1 createChemModel( neuroMesh ) dendCa = moose.element( '/n/chem/neuroMesh/Ca' ) assert dendCa.volume == compt.length * compt.diameter * compt.diameter dendKinaseEnzCplx = moose.element( '/n/chem/neuroMesh/Ca.kinase/enz/cplx' ) assert dendKinaseEnzCplx.volume == dendCa.volume # Make adaptors # Note that we can do this two ways: We can use an existing output # msg from the object, which will come whenever the object processes, # or the adapator can request the object for the field, which happens # whenever the adaptor processes. Here we illustrate both alternatives. adaptK = moose.Adaptor( '/n/chem/neuroMesh/adaptK' ) chemK = moose.element( '/n/chem/neuroMesh/kChan' ) elecK = moose.element( '/n/elec/compt/K' ) moose.connect( adaptK, 'requestOut', chemK, 'getConc', 'OneToAll' ) moose.connect( adaptK, 'output', elecK, 'setGbar', 'OneToAll' ) adaptK.scale = 0.3 # from mM to Siemens adaptCa = moose.Adaptor( '/n/chem/neuroMesh/adaptCa' ) chemCa = moose.element( '/n/chem/neuroMesh/Ca' ) elecCa = moose.element( '/n/elec/compt/ca' ) moose.connect( elecCa, 'concOut', adaptCa, 'input', 'OneToAll' ) moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToAll' ) adaptCa.outputOffset = 0.0001 # 100 nM offset in chem conc adaptCa.scale = 0.05 # Empirical: 0.06 max to 0.003 mM def addPlot( objpath, field, plot ): assert moose.exists( objpath ) tab = moose.Table( '/graphs/' + plot ) obj = moose.element( objpath ) moose.connect( tab, 'requestOut', obj, field ) return tab def displayPlots(): for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ): t = numpy.arange( 0, x.vector.size, 1 ) * x.dt pylab.plot( t, x.vector, label=x.name ) pylab.legend() pylab.show() def makeElecPlots(): graphs = moose.Neutral( '/graphs' ) elec = moose.Neutral( '/graphs/elec' ) addPlot( '/n/elec/compt', 'getVm', 'elec/Vm' ) addPlot( '/n/elec/compt/ca', 'getCa', 'elec/Ca' ) addPlot( '/n/elec/compt/K', 'getGk', 'elec/K_Gk' ) def makeChemPlots(): graphs = moose.Neutral( '/graphs' ) addPlot( '/n/chem/neuroMesh/Ca', 'getConc', 'chemCa' ) addPlot( '/n/chem/neuroMesh/kChan_p', 'getConc', 'chemkChan_p' ) addPlot( '/n/chem/neuroMesh/kChan', 'getConc', 'chemkChan' ) addPlot( '/n/chem/neuroMesh/Ca.kinase', 'getConc', 'activeKinase' ) def testCubeMultiscale( useSolver ): elecDt = 10e-6 chemDt = 1e-4 plotDt = 5e-4 plotName = 'cm.plot' if ( useSolver ): elecDt = 50e-6 chemDt = 2e-3 plotName = 'solve_cm.plot' makeModelInCubeMesh() makeChemPlots() makeElecPlots() ''' moose.setClock( 0, elecDt ) moose.setClock( 1, elecDt ) moose.setClock( 2, elecDt ) moose.setClock( 5, chemDt ) moose.setClock( 6, chemDt ) moose.setClock( 7, plotDt ) moose.setClock( 8, plotDt ) moose.useClock( 1, '/n/##[ISA=SpikeGen]', 'process' ) moose.useClock( 2, '/n/##[ISA=SynBase]','process') moose.useClock( 6, '/n/##[ISA=Adaptor]', 'process' ) moose.useClock( 7, '/graphs/#', 'process' ) moose.useClock( 8, '/graphs/elec/#', 'process' ) moose.useClock( 0, '/n/##[ISA=Compartment]', 'init' ) moose.useClock( 1, '/n/##[ISA=Compartment]', 'process' ) moose.useClock( 2, '/n/##[ISA=ChanBase],/n/##[ISA=SynBase],/n/##[ISA=CaConc]','process') moose.useClock( 5, '/n/##[ISA=PoolBase],/n/##[ISA=ReacBase],/n/##[ISA=EnzBase]', 'process' ) ''' if ( useSolver ): ksolve = moose.Ksolve( '/n/ksolve' ) stoich = moose.Stoich( '/n/stoich' ) stoich.compartment = moose.element( '/n/chem/neuroMesh' ) stoich.ksolve = ksolve stoich.path = '/n/##' ksolve.method = 'rk5' #moose.useClock( 5, '/n/ksolve', 'process' ) hsolve = moose.HSolve( '/n/hsolve' ) #moose.useClock( 1, '/n/hsolve', 'process' ) hsolve.dt = elecDt hsolve.target = '/n/compt' moose.reinit() moose.start( 1 ) displayPlots() def main(): testCubeMultiscale( 1 ) # change argument to 0 to run without solver. if __name__ == '__main__': main() # cubeMeshSigNeur.py ends here
dilawar/moose-full
moose-examples/snippets/cubeMeshSigNeur.py
Python
gpl-2.0
11,878
[ "MOOSE" ]
c7de3684afe6793d8792648dcead33533570d8b834937215ebd430b87a0c585e
#!/usr/bin/env python3 #* This file is part of the MOOSE framework #* https://www.mooseframework.org #* #* All rights reserved, see COPYRIGHT for full restrictions #* https://github.com/idaholab/moose/blob/master/COPYRIGHT #* #* Licensed under LGPL 2.1, please see LICENSE for details #* https://www.gnu.org/licenses/lgpl-2.1.html import os import sys import numpy as np import matplotlib.pyplot as plt def expected(fn, mi, ma): f = open(fn, "r") data = sorted([map(float, line.strip().split(",")[mi:ma]) for line in f.readlines()[1:]]) data = [d for d in data if d[0] >= d[1]] return zip(*data) def moose(fn, mi, ma): f = open(fn, "r") data = [map(float, line.strip().split(",")[mi:ma]) for line in f.readlines()[3:-1]] return zip(*data) plt.figure() e15 = expected("gold/expected_small_deform_15.csv", 4, 6) e16 = expected("gold/expected_small_deform_16.csv", 4, 6) e17 = expected("gold/expected_small_deform_17.csv", 3, 5) m15 = moose("gold/small_deform15.csv", 3, 5) m16 = moose("gold/small_deform16.csv", 3, 5) m17 = moose("gold/small_deform17.csv", 2, 4) plt.plot(e15[0], e15[1], 'k-', linewidth = 3.0, label = 'expected (Smax=0)') plt.plot(m15[0], m15[1], 'ks', label = 'MOOSE (Smax=0)') plt.plot(e16[0], e16[1], 'r-', linewidth = 3.0, label = 'expected (Smax = Smid)') plt.plot(m16[0], m16[1], 'r^', label = 'MOOSE (Smax approx Smid)') plt.plot(e17[0], e17[1], 'b-', linewidth = 3.0, label = 'expected (Smid = Smin)') plt.plot(m17[0], m17[1], 'bo', label = 'MOOSE (Smid = Smin)') plt.xlim([-1,0]) plt.gca().invert_yaxis() plt.gca().invert_xaxis() plt.legend(loc = 'lower left') plt.xlabel("Smid or Smax") plt.ylabel("Smin") plt.title("Compressive yield surface") plt.savefig("figures/small_deform_15_16_17.eps") sys.exit(0)
nuclear-wizard/moose
modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_15_16_17.py
Python
lgpl-2.1
1,765
[ "MOOSE" ]
0874ad2cd7c93b0262363f15ed3f9ec07f600845f6ad0803f120ea8ec5cfb5b1
import logging import numpy as np import mdtraj as mdj from wepy.reporter.reporter import ProgressiveFileReporter from wepy.util.mdtraj import json_to_mdtraj_topology, mdtraj_to_json_topology from wepy.util.json_top import json_top_subset class WExploreAtomImageReporter(ProgressiveFileReporter): """Reporter for generating 3D molecular structures from WExplore region images. This will only be meaningful for WExplore simulations where the region images are actually 3D coordinates. """ FILE_ORDER = ("init_state_path", "image_path") SUGGESTED_EXTENSIONS = ("image_top.pdb", "wexplore_images.dcd") def __init__(self, init_image=None, image_atom_idxs=None, json_topology=None, **kwargs): """Constructor for the WExploreAtomImageReporter. Parameters ---------- init_image : numpy.array, optional The initial region image. Used for generating the topology as well. If not given will be eventually generated. (Default = None) image_atom_idxs : list of int The indices of the atoms that are part of the topology subset that comprises the image. json_topology : str JSON format topology for the whole system. A subset of the atoms will be taken using the image_atom_idxs. """ super().__init__(**kwargs) assert json_topology is not None, "must give a JSON format topology" assert image_atom_idxs is not None, \ "must give the indices of the atoms for the subset of the topology that is the image" self.image_atom_idxs = image_atom_idxs self.json_main_rep_top = json_top_subset(json_topology, self.image_atom_idxs) self.init_image = None self._top_pdb_written = False self.image_traj_positions = [] # if an initial image was given use it, otherwise just don't # worry about it, the reason for this is that there is no # interface for getting image indices from distance metrics as # of now. if init_image is not None: self.init_image = init_image self.image_traj_positions.append(self.init_image) # and times self.times = [0] def init(self, **kwargs): super().init(**kwargs) if self.init_image is not None: image_mdj_topology = json_to_mdtraj_topology(self.json_main_rep_top) # initialize the initial image into the image traj init_image_traj = mdj.Trajectory([self.init_image], time=self.times, topology=image_mdj_topology) # save this as a PDB for a topology to view in VMD etc. to go # along with the trajectory we will make logging.info("Writing initial image to {}".format(self.init_state_path)) init_image_traj.save_pdb(self.init_state_path) self._top_pdb_written = True def report(self, cycle_idx=None, resampler_data=None, **kwargs): # load the json topology as an mdtraj one image_mdj_topology = json_to_mdtraj_topology(self.json_main_rep_top) # collect the new images defined new_images = [] for resampler_rec in resampler_data: image = resampler_rec['image'] new_images.append(image) times = np.array([cycle_idx + 1 for _ in range(len(new_images))]) # combine the new image positions and times with the old self.image_traj_positions.extend(new_images) self.times.extend(times) # only save if we have an image yet if len(self.image_traj_positions) > 0: # make a trajectory of the new images, using the cycle_idx as the time new_image_traj = mdj.Trajectory(self.image_traj_positions, time=self.times, topology=image_mdj_topology) # if we haven't already written a topology PDB write it now if not self._top_pdb_written: new_image_traj[0].save_pdb(self.init_state_path) self._top_pdb_written = True # then the images to the trajectory file new_image_traj.save_dcd(self.image_path)
ADicksonLab/wepy
src/wepy/reporter/wexplore/image.py
Python
mit
4,448
[ "MDTraj", "VMD" ]
9bb34f13731337cf65dac5e26eac77cc390291e967cc03d942e2f1bb0a1a4d46
#This is a program that handles the actual image processing and #answer detection. We are going to build mobile apps based on this #program and hopefully we will be able to add more functionality to it. import cv2 import numpy as np def rectify(h): h = h.reshape((4,2)) hnew = np.zeros((4,2),dtype = np.float32) add = h.sum(1) hnew[0] = h[np.argmin(add)] hnew[2] = h[np.argmax(add)] diff = np.diff(h,axis = 1) hnew[1] = h[np.argmin(diff)] hnew[3] = h[np.argmax(diff)] return hnew def check_include(centre_list, x_centre, y_centre): for point in centre_list: x_difference = point[0] - x_centre y_difference = point[1] - y_centre if abs(x_difference) < 10 and abs(y_difference) < 10: return False return True def find_centre(cnts): # x_axis is a list, store all the x_axis data of one contour # y_axis is a list, store all the y_axis data of same contour # cnts[0] is a list of point, which is one rectangle centre_list = [] for cnt in cnts: x_axis = [] y_axis = [] for point in cnt: x_axis.append(point[0][0]) y_axis.append(point[0][1]) # print cnts[0][0][0][0] x_axis = sorted(x_axis) y_axis = sorted(y_axis) x_centre = int((x_axis[0] + x_axis[-1]) / 2) y_centre = int((y_axis[0] + y_axis[-1]) / 2) # print "The smallest x coordinate is",x_axis[0] # print "The smallest y coordinate is",y_axis[0] # print "The biggest x coordinate is",x_axis[-1] # print "The biggest y coordinate is",y_axis[-1] # print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre) if (check_include(centre_list, x_centre, y_centre)): centre_list.append((x_centre, y_centre)) # print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre) return centre_list def process_centre_list(centre_list): # this function loop want to put same rows of answer area into same list. # And use a list to hold all of rows. So it is a 2D list. # the centre_list is in the order of y-axis from small to large. # In this particular case, every row has three question and each question has 4 rectangles. # In each line, the y-axis is almost same, so we can calculate the difference between different # y-axis to determine whether the two rectangle is in same line. # current_total_delta is total difference of y-axis in one row. # current_total_delta_copy tries to store the old data in for loop. # current_average_number is number of rectangles we calculate current_total_delta = 0 current_total_delta_copy = 0 current_average_number = 1 # current_average_delta = current_total_delta/current_average_number # current_average_delta_copy tries to store the old data. current_average_delta = 0 current_average_delta_copy = 0 # row_list is a list of column_list # column_list is a list of point of every line of answer area row_list = [] column_list = [] for i in range(len(centre_list) - 1): delta_y1 = (centre_list[i + 1][1] - centre_list[i][1]) # print delta_y1 current_total_delta_copy = current_total_delta current_total_delta += delta_y1 current_average_delta = 1.0 * current_total_delta / current_average_number current_average_number += 1 if current_average_delta > current_average_delta_copy * 3 and current_average_delta_copy != 0: # print "this is average number ",current_average_number # print "This is current_average_delta " , current_average_delta # print "This is current_average_delta_copy " , current_average_delta_copy current_total_delta = current_total_delta_copy # restore total delta from copy column_list.append(centre_list[i]) row_list.append(column_list) column_list = [] current_total_delta = 0 current_total_delta_copy = 0 current_average_number = 1 continue column_list.append(centre_list[i]) current_average_delta_copy = current_average_delta return row_list # This function want to find the answer student choose. # centre_list: list. Hold all the coordinate of centre of rectangle. # thresh1: image object. The image after threshold. def find_answer(centre_list, thresh1): # the point is the centre of rectangle. # We choose a 80*80 square, to detect whether there is black pixel in this square. for point in centre_list: px = 0 x_start, x_end = point[0] - 40, point[0] + 40 y_start, y_end = point[1] - 40, point[1] + 40 for x in range(x_start, x_end): for y in range(y_start, y_end): px += thresh1[y, x] # print "this is pixel " , px # 1532000 is a threshold. The value under the 1532000 means student has handwriting # in this region. if px < 1532000: cv2.circle(thresh1, (x - 40, y - 40), 40, (0, 0, 0)) # this function want to find the answer rectangle which are not found by findContours # function def find_missing_rectangle(centre_list, centre_list_col, x_uncertainty, y_uncertainty): row_list = [] total_list = [] # print centre_list_col base = centre_list_col[0][1] # use column point as the base y_max = base + y_uncertainty # add base and y_uncertainty for i in range(len(centre_list_col)): if centre_list_col[i][1] < y_max: row_list.append(centre_list_col[i]) else: # in this case, we end up one line, and change to another line # so I set a new base. y_max = centre_list_col[i][1] + y_uncertainty total_list.append(row_list) row_list = [] # renew the row_list # add the first element of next line into new row_list row_list.append(centre_list_col[i]) # add final row list into total list. total_list.append(row_list) # ============================================================ # for test # ============================================================ # sum = 0 # for i in range(len(total_list)): # # pass # print sorted(total_list[i]) # print "length is ", len(total_list[i]) # sum += len(total_list[i]) # print("\n") # # print "\n" # # print(total_list) # print sum # ============================================================ # end test # ============================================================ # to get the max_length of a row of question. # and then get a base_list of row_list max_length = len(total_list[0]) base_list = [] for row_list in total_list: if len(row_list) > max_length: max_length = len(row_list) base_list = row_list # print "length of half rectangle is ", x_uncertainty total_list_copy = [] # sort base list base_list = sorted(base_list) for row_list in total_list: # print "this is row_list" , row_list # print '\n' row_list = sorted(row_list) if len(row_list) == max_length: total_list_copy.append(row_list) continue for i in range(max_length): try: base = base_list[i][0] - x_uncertainty if row_list[i][0] > base: x_axis = base_list[i][0] y_axis = row_list[0][1] row_list.insert(i, (x_axis, y_axis)) centre_list.append((x_axis, y_axis)) print "length of row list is ", len(row_list) if len(row_list) == max_length: total_list_copy.append(row_list) break except: x_axis = base_list[i][0] y_axis = row_list[0][1] row_list.insert(i, (x_axis, y_axis)) centre_list.append((x_axis, y_axis)) if len(row_list) == max_length: total_list_copy.append(row_list) break return total_list_copy # answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list # every row_list contains also list which are centre points of rectangle. def find_answer2(answer_list,number_of_choice,thresh1,pixel=40, number_of_question=40): column = len(answer_list[0])/number_of_choice assert(column == 3) answer = "" number_of_question = 0 for i in range(column): for j in range(len(answer_list)): boundary = 1532000 number_of_answer = 0 while(True): # print boundary # print number_of_answer # print "i j k" , i ,j for k in range(i*4,i*4+number_of_choice): point = answer_list[j][k] px = 0 x_start, x_end = point[0] - pixel, point[0] + pixel y_start, y_end = point[1] - pixel, point[1] + pixel for x in range(x_start, x_end): for y in range(y_start, y_end): px += thresh1[y, x] # print "this is pixel " , px # 1532000 is a threshold. The value under the 1532000 means student has handwriting # in this region. # print px if px < boundary: cv2.circle(thresh1, (x - pixel, y - pixel), 40, (0, 0, 0)) number_of_answer += 1 choice = str(k) if number_of_answer == 1: number_of_question += 1 answer += choice break if number_of_question==40: break if number_of_answer == 0: boundary = boundary * (1.01) number_of_answer = 0 else: boundary = boundary / 1.01 number_of_answer = 0 if number_of_question==40: break return answer if __name__ == '__main__': image = cv2.imread("sheet.jpg") # ratio = 1000.0 / image.shape[1] # # new dimension for image # dim = (1000, int(image.shape[0] * ratio)) # # perform the actual resizing of the image and show it # # interpolation = cv2.INTER_AREA this is the algorithm we used. Do worry now # image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA) ratio = image.shape[0] / 500.0 #orig = image.copy() res = cv2.resize(image,None,fx=0.4, fy=0.4, interpolation = cv2.INTER_LANCZOS4) # res = cv2.resize(image, dst, interpolation=CV_INTER_LINEAR) # convert image to grayscale gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) # blur the image slightly to remove noise. #gray = cv2.bilateralFilter(gray, 11, 17, 17) gray = cv2.GaussianBlur(gray, (5, 5), 0) #is an alternative way to blur the image # canny edge detection edged = cv2.Canny(gray, 30, 200) # two threshold method. # The first one is normal threshold method # The second one is use Gaussian method which has better effect. # ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY) # thresh1= cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2) cv2.imshow("Outline", res) (_, cnts, _) =cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #Now we're only trying to get the largest contour so we only keep the first 10 elements cnts = sorted(cnts, key = cv2.contourArea,reverse=True)[:10] for c in cnts: # approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.005* peri, True) #break when we find the first rectangle if len(approx) == 4: screenCnt = approx break #draw out the contour cv2.drawContours(res, [screenCnt], -1, (0, 255, 0), 2) cv2.imshow("Contours",res) #warped = four_point_transform(res, screenCnt.reshape(4, 2) * ratio) lel = rectify(screenCnt) pts2 = np.float32([[0,0],[840,0],[840,1164],[0,1164]]) M = cv2.getPerspectiveTransform(lel,pts2) dst = cv2.warpPerspective(res,M,(840,1164)) crop_img = dst[440:945,130:700] #dst = cv2.resize(dst, (1050, 1455)) cv2.imshow("Warped",dst) #print len(screenCnt) # convert the warped image to grayscale, then threshold it # to give it that 'black and white' paper effect gray2=cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) cv2.imshow("Answer area",gray2) cv2.imshow("Answer area",crop_img) #reset the image to the answer area and redo the whole contour detecting process image = crop_img orig = image.copy() # convert image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # blur the image slightly to remove noise. gray = cv2.bilateralFilter(gray, 11, 17, 17) # gray = cv2.GaussianBlur(gray, (5, 5), 0) is an alternative way to blur the image # canny edge detection edged = cv2.Canny(gray, 30, 200) # two threshold method. # The first one is normal threshold method # The second one is use Gaussian method which has better effect. # ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY) thresh1 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) # find contours in the edged image, keep only the largest ones, and initialize # our screen contour # findContours takes three parameter: # First parameter: the image we want to find counter. Need to copy since this method will # destroy the image. # Second parameter: cv2.RETR_TREE tells OpenCV to compute the hierarchy (relationship) # between contours # Third parameter: compress the contours to save space using cv2.CV_CHAIN_APPROX_SIMPLE try: (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) except: (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # the number of returned parameter is different depending on the version of openCV # for 2.x it is (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # for 3.x it is (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # sort the counter. The reference is the countourArea. Since we are trying to get all the boxes in #the answer area we keep 1000 elements in the list so we don't miss any possible boxes. cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1000] # a new list to store all the rectangle counter cnts_rect = [] # initialize the screenCnt. screenCnt = None # loop over our contours for c in cnts: # approximate the contour peri = cv2.arcLength(c, True) # This function gives the number of vertices of the figure # For example, approx returns 4 if the shape is rectangle and 5 if the shape is pentagon # k is constant, it can be changing from 0.005 to 0.1 # k = 0.005 k = 0.005 approx = cv2.approxPolyDP(c, k * peri, True) # if our approximated contour has four points, then # we can assume that we have found our screen if len(approx) == 4 and cv2.contourArea(c) > 15000: screenCnt = approx cnts_rect.append(approx) # print "this is coutour area ", cv2.contourArea(c) # the print is for test # print screenCnt[0][0] # to draw the contours in the original image. # print len(cnts_rect) cv2.drawContours(image, cnts_rect, -1, (0, 255, 0), 3) # to find height and length of the rectangle height = cnts_rect[0][2][0][1] - cnts_rect[0][0][0][1] length = cnts_rect[0][2][0][0] - cnts_rect[0][0][0][0] # x_axis is a list, store all the x_axis data of one contour # y_axis is a list, store all the y_axis data of same contour # cnts[0] is a list of point, which is one rectangle centre_list = find_centre(cnts_rect) # print len(centre_list) # print "this length of centre_list is ", len(centre_list) centre_list_col = sorted(centre_list, key=lambda point: point[1]) # answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list # every row_list contains also list which are centre points of rectangle. answer_list = find_missing_rectangle(centre_list, centre_list_col, length // 2, height // 2) # ============================================================ # for test print point in centre list # ============================================================ # print len(answer_list) # for list1 in answer_list: # print("the length of list1 is ", len(list1)) # for element in list1: # print element # print len(answer_list) # ============================================================ # end test # ============================================================ number_of_choice = 4 answer = find_answer2(answer_list,number_of_choice,thresh1,pixel=40,number_of_question=40) print answer # i = 0 # print len(centre_list_col) # for i in range(150): # print centre_list_col[i] centre_list = sorted(centre_list, key=lambda point: point[0]) # print "The number of centre point " , len(centre_list) # # for test. # i = 0 # print len(centre_list) # for i in range(138): # print centre_list[i] # cv2.circle(image,centre_list[i],20,(0,0,0)) # row_list = process_centre_list(centre_list) # find_answer(centre_list, thresh1) # cv2.imshow("Game Boy Screen", image) # cv2.imshow("gray image", thresh1) cv2.imwrite('contours.png', image) cv2.imwrite('thresh1.png',thresh1) # cv2.waitKey(15000) # apply the four point transform to obtain a top-down # view of the original image warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) ret, thresh1 = cv2.threshold(warped, 80, 85, cv2.THRESH_BINARY) # cv2.imshow("Binary",thresh1 ) warped = warped.astype("uint8") * 255 cv2.waitKey(10000) cv2.imwrite('messigray.png', image)
HuimingCheng/AutoGrading
learning/edge_detection.py
Python
mit
18,708
[ "Gaussian" ]
9a519b639cfccfc0ab50423eecf202cb4159e1db9b17c8fc7f86a470d71a537a
#!/usr/bin/env python """Run a Gene Ontology Enrichment Analysis (GOEA), plots, etc. Nature 2014_0126; Computational analysis of cell-to-cell heterogeneity in single-cell RNA-sequencing data reveals hidden subpopulations of cells http://www.nature.com/nbt/journal/v33/n2/full/nbt.3102.html#methods ... revealed a significant enrichment in the set of 401 genes that were differentially expressed between the identified clusters (P = 0.001 Hypergeometric Test). Further, Gene Ontology (GO) enrichment analysis showed that the differentially expressed genes contained statistically significant enrichments of genes involved in: * GO:0006096 "glycolysis" or "glycolytic process" * cellular response to IL-4 stimulation NOW: BP GO:0071353 1.668e-03 D06 cellular response to interleukin-4 (5 genes) * BP GO:0070670: response to interleukin-4 * BP GO:0071353: cellular response to interleukin-4 * positive regulation of B-cell proliferation NOW: BP GO:0030890 2.706e-04 D09 positive regulation of B cell proliferation (7 genes) * 401 genes: Supplementary table 4 note: Total gene count is 400 genes, not 401: Rpl41 is listed twice http://www.nature.com/nbt/journal/v33/n2/extref/nbt.3102-S4.xlsx * GO enrichment results are in: Supplementary table 6 http://www.nature.com/nbt/journal/v33/n2/extref/nbt.3102-S6.xlsx """ import os import sys import xlrd import wget import pytest from collections import Counter, defaultdict, OrderedDict from goatools.test_data.genes_NCBI_10090_ProteinCoding import GeneID2nt as GeneID2nt_mus from goatools.test_data.nature3102_goea import get_geneid2symbol, get_goeaobj from goatools.obo_parser import GODag from goatools.go_enrichment import GOEnrichmentStudy, get_study_items from goatools.associations import get_assoc_ncbi_taxids from goatools.godag_plot import plot_gos, plot_results, plot_goid2goobj __copyright__ = "Copyright (C) 2016-2017, DV Klopfenstein, H Tang, All rights reserved." __author__ = "DV Klopfenstein" @pytest.mark.skip(reason="requires pydot - works in py2.7 but not py3.4 and 3.5") def test_example(log=sys.stdout): """Run Gene Ontology Enrichment Analysis (GOEA) on Nature data.""" # -------------------------------------------------------------------- # -------------------------------------------------------------------- # Gene Ontology Enrichment Analysis (GOEA) # -------------------------------------------------------------------- # -------------------------------------------------------------------- taxid = 10090 # Mouse study # Load ontologies, associations, and population ids geneids_pop = GeneID2nt_mus.keys() geneids_study = get_geneid2symbol("nbt.3102-S4_GeneIDs.xlsx") goeaobj = get_goeaobj("fdr_bh", geneids_pop, taxid) # Run GOEA on study #keep_if = lambda nt: getattr(nt, "p_fdr_bh" ) < 0.05 # keep if results are significant goea_results_all = goeaobj.run_study(geneids_study) goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.05] compare_results(goea_results_all) geneids = get_study_items(goea_results_sig) # Print GOEA results to files goeaobj.wr_xlsx("nbt3102.xlsx", goea_results_sig) goeaobj.wr_txt("nbt3102_sig.txt", goea_results_sig) goeaobj.wr_txt("nbt3102_all.txt", goea_results_all) # Plot all significant GO terms w/annotated study info (large plots) #plot_results("nbt3102_{NS}.png", goea_results_sig) #plot_results("nbt3102_{NS}_sym.png", goea_results_sig, study_items=5, items_p_line=2, id2symbol=geneids_study) # -------------------------------------------------------------------- # -------------------------------------------------------------------- # Further examination of GOEA results... # -------------------------------------------------------------------- # -------------------------------------------------------------------- obo = goeaobj.obo_dag dpi = 150 # For review: Figures can be saved in .jpg, .gif, .tif or .eps, at 150 dpi # -------------------------------------------------------------------- # Item 1) Words in GO names associated with large numbers of study genes # -------------------------------------------------------------------- # What GO term words are associated with the largest number of study genes? prt_word2genecnt("nbt3102_genecnt_GOword.txt", goea_results_sig, log) # Curated selection of GO words associated with large numbers of study genes freq_seen = ['RNA', 'translation', 'mitochondr', 'ribosom', # 'ribosomal', 'ribosome', 'adhesion', 'endoplasmic', 'nucleotide', 'apoptotic', 'myelin'] # Collect the GOs which contains the chosen frequently seen words word2NS2gos = get_word2NS2gos(freq_seen, goea_results_sig) go2res = {nt.GO:nt for nt in goea_results_sig} # Print words of interest, the sig GO terms which contain that word, and study genes. prt_word_GO_genes("nbt3102_GO_word_genes.txt", word2NS2gos, go2res, geneids_study, log) # Plot each set of GOs along w/study gene info for word, NS2gos in word2NS2gos.items(): for NS in ['BP', 'MF', 'CC']: if NS in NS2gos: gos = NS2gos[NS] goid2goobj = {go:go2res[go].goterm for go in gos} # dpi: 150 for review, 1200 for publication #dpis = [150, 1200] if word == "RNA" else [150] dpis = [150] for dpi in dpis: fmts = ['png', 'tif', 'eps'] if word == "RNA" else ['png'] for fmt in fmts: plot_goid2goobj( "nbt3102_{WORD}_{NS}_dpi{DPI}.{FMT}".format(WORD=word, NS=NS, DPI=dpi, FMT=fmt), goid2goobj, # source GOs and their GOTerm object items_p_line=3, study_items=6, # Max number of gene symbols to print in each GO term id2symbol=geneids_study, # Contains GeneID-to-Symbol goea_results=goea_results_all, # pvals used for GO Term coloring dpi=dpi) # -------------------------------------------------------------------- # Item 2) Explore findings of Nature paper: # # Gene Ontology (GO) enrichment analysis showed that the # differentially expressed genes contained statistically # significant enrichments of genes involved in # glycolysis, # cellular response to IL-4 stimulation and # positive regulation of B-cell proliferation # -------------------------------------------------------------------- goid_subset = [ 'GO:0006096', # BP 4.24e-12 10 glycolytic process 'GO:0071353', # BP 7.45e-06 5 cellular response to interleukin-4 'GO:0030890', # BP 8.22e-07 7 positive regulation of B cell proliferation ] plot_gos("nbt3102_GOs.png", goid_subset, obo, dpi=dpi) plot_gos("nbt3102_GOs_genecnt.png", goid_subset, obo, goea_results=goea_results_all, dpi=dpi) plot_gos("nbt3102_GOs_genelst.png", goid_subset, obo, study_items=True, goea_results=goea_results_all, dpi=dpi) plot_gos("nbt3102_GOs_symlst.png", goid_subset, obo, study_items=True, id2symbol=geneids_study, goea_results=goea_results_all, dpi=dpi) plot_gos("nbt3102_GOs_symlst_trunc.png", goid_subset, obo, study_items=5, id2symbol=geneids_study, goea_results=goea_results_all, dpi=dpi) plot_gos("nbt3102_GOs_GO0005743.png", ["GO:0005743"], obo, items_p_line=2, study_items=6, id2symbol=geneids_study, goea_results=goea_results_all, dpi=dpi) # -------------------------------------------------------------------- # Item 3) Create one GO sub-plot per significant GO term from study # -------------------------------------------------------------------- for rec in goea_results_sig: png = "nbt3102_{NS}_{GO}.png".format(GO=rec.GO.replace(':', '_'), NS=rec.NS) goid2goobj = {rec.GO:rec.goterm} plot_goid2goobj(png, goid2goobj, # source GOs and their GOTerm object study_items=15, # Max number of gene symbols to print in each GO term id2symbol=geneids_study, # Contains GeneID-to-Symbol goea_results=goea_results_all, # pvals used for GO Term coloring dpi=dpi) # -------------------------------------------------------------------- # Item 4) Explore using manually curated lists of GO terms # -------------------------------------------------------------------- goid_subset = [ 'GO:0030529', # CC D03 intracellular ribonucleoprotein complex (42 genes) 'GO:0015934', # CC D05 large ribosomal subunit (4 genes) 'GO:0015935', # CC D05 small ribosomal subunit (13 genes) 'GO:0022625', # CC D06 cytosolic large ribosomal subunit (16 genes) 'GO:0022627', # CC D06 cytosolic small ribosomal subunit (19 genes) 'GO:0036464', # CC D06 cytoplasmic ribonucleoprotein granule (4 genes) 'GO:0005840', # CC D05 ribosome (35 genes) 'GO:0005844', # CC D04 polysome (6 genes) ] plot_gos("nbt3102_CC_ribosome.png", goid_subset, obo, study_items=6, id2symbol=geneids_study, items_p_line=3, goea_results=goea_results_sig, dpi=dpi) goid_subset = [ 'GO:0003723', # MF D04 RNA binding (32 genes) 'GO:0044822', # MF D05 poly(A) RNA binding (86 genes) 'GO:0003729', # MF D06 mRNA binding (11 genes) 'GO:0019843', # MF D05 rRNA binding (6 genes) 'GO:0003746', # MF D06 translation elongation factor activity (5 genes) ] plot_gos("nbt3102_MF_RNA_genecnt.png", goid_subset, obo, goea_results=goea_results_all, dpi=150) for dpi in [150, 1200]: # 150 for review, 1200 for publication plot_gos("nbt3102_MF_RNA_dpi{DPI}.png".format(DPI=dpi), goid_subset, obo, study_items=6, id2symbol=geneids_study, items_p_line=3, goea_results=goea_results_all, dpi=dpi) # -------------------------------------------------------------------- # Item 5) Are any significant geneids related to cell cycle? # -------------------------------------------------------------------- import test_genes_cell_cycle as CC genes_cell_cycle = CC.get_genes_cell_cycle(taxid, log=log) genes_cell_cycle_sig = genes_cell_cycle.intersection(geneids) CC.prt_genes("nbt3102_cell_cycle.txt", genes_cell_cycle_sig, taxid, log=None) def compare_results(goea_results_sig): """Compare GOATOOLS to results from Nature paper.""" act_goids = [rec.GO for rec in goea_results_sig] exp_goids = set(paper_top20()) overlap = set(act_goids).intersection(exp_goids) fout_txt = "nbt3102_compare.txt" with open(fout_txt, 'w') as prt: prt.write("{N} GO terms overlapped with {M} top20 GO terms in Nature paper\n".format( N = len(overlap), M = len(exp_goids))) idx = 1 gos = set() for rec in goea_results_sig: if rec.GO in exp_goids: gos.add(rec.GO) sig = '*' if rec.p_fdr_bh < 0.05 else ' ' prt.write("{I:>2} {NS} {SIG} {GO} D{D:>02} {ALPHA:5.2e} {NAME}({N} genes)\n".format( I=idx, NS=rec.NS, D=rec.goterm.depth, GO=rec.GO, NAME=rec.name, ALPHA=rec.p_fdr_bh, SIG=sig, N=rec.study_count)) idx += 1 nogo = exp_goids.difference(gos) prt.write("NOT LISTED: {GO}\n".format(GO=", ".join(nogo))) def prt_word2genecnt(fout, goea_results_sig, log): """Get words in GO term names and the number of study genes associated with GO words.""" word2genes = defaultdict(set) for rec in goea_results_sig: study_items = rec.study_items for word in rec.name.split(): word2genes[word] |= study_items word2genecnt = Counter({w:len(gs) for w, gs in word2genes.items()}) with open(fout, "w") as wordstrm: for word, cnt in word2genecnt.most_common(): wordstrm.write("{CNT:>3} {WORD}\n".format(CNT=cnt, WORD=word)) log.write(" WROTE: {F}\n".format(F=fout)) def get_word2NS2gos(words, goea_results_sig): """Get all GO terms which contain a word in 'words'.""" word2NS2gos = defaultdict(lambda: defaultdict(set)) sig_GOs = set([rec.GO for rec in goea_results_sig]) for word in words: for rec in goea_results_sig: NS = rec.NS if word in rec.name: word2NS2gos[word][NS].add(rec.GO) # Get significant children under term with word # (Try it, but don't include for paper for more concise plots.) #_get_word2NS2childgos(word2NS2gos[word][NS], rec, sig_GOs) return OrderedDict([(w, word2NS2gos[w]) for w in words]) def _get_word2NS2childgos(gos, rec, sig_GOs): """If a GO term contains a word of interest, also collect sig. child terms.""" children = rec.goterm.get_all_children() for goid_child in children.intersection(sig_GOs): gos.add(goid_child) def prt_word_GO_genes(fout, word2NS2gos, go2res, geneids_study, log): """Print words in GO names that have large numbers of study genes.""" with open(fout, "w") as prt: prt.write("""This file is generated by test_nbt3102.py and is intended to confirm this statement in the GOATOOLS manuscript: We observed: N genes associated with RNA, """) for word, NS2gos in word2NS2gos.items(): for NS in ['BP', 'MF', 'CC']: if NS in NS2gos: gos = sorted(NS2gos[NS]) # Sort first by BP, MF, CC. Sort second by GO id. #####gos = sorted(gos, key=lambda go: [go2res[go].NS, go]) genes = set() for go in gos: genes |= go2res[go].study_items genes = sorted([geneids_study[g] for g in genes]) prt.write("\n{WD}: {N} study genes, {M} GOs\n".format(WD=word, N=len(genes), M=len(gos))) prt.write("{WD} GOs: {GOs}\n".format(WD=word, GOs=", ".join(gos))) for i, go in enumerate(gos): res = go2res[go] prt.write("{I}) {NS} {GO} {NAME} ({N} genes)\n".format( I=i, NS=res.NS, GO=go, NAME=res.name, N=res.study_count)) prt.write("{N} study genes:\n".format(N=len(genes))) N = 10 # 10 genes per line mult = [genes[i:i+N] for i in range(0, len(genes), N)] prt.write(" {}\n".format("\n ".join([", ".join(str(g) for g in sl) for sl in mult]))) log.write(" WROTE: {F}\n".format(F=fout)) def paper_top20(): """Return top 20 GO terms in Nature paper found using R's topGO with alogrithm=elim. Supplemental Table 6 description from supplemental information: --------------------------------------------------------------- GO enrichment of differentially expressed genes between the sub-populations of cells revealed by non-linear PCA on the scLVM corrected expression levels. The R-package topGO was used with the "elim" algorithm and the top 20 terms are shown. http://www.nature.com/nbt/journal/v33/n2/extref/nbt.3102-S1.pdf GOEA: How topGO tests and GOATOOLS test methods differ: ------------------------------------------------------- The test statistics supported by topGO when using the topGO "elim" algorithm are [fisher, ks, t, globaltest, sum]. When using the "elim" algorithm, topGO does not automatically do multiple-test correction. The documentation for topGO says: https://www.bioconductor.org/packages/3.3/bioc/vignettes/topGO/inst/doc/topGO.pdf For the methods that account for the GO topology like elim and weight, the problem of multiple testing is even more complicated. Here one computes the p-value of a GO term conditioned on the neighbouring terms. The tests are therefore not independent and the multiple testing theory does not directly apply. We like to interpret the p-values returned by these methods as corrected or not affected by multiple testing. For the GOATOOLS publication, we use the following: 1. GOATOOLS version 0.6.4 2. "Fisher's exact test" statistical analysis method 2a. "Benjamini/Hochberg" for multiple-test correction 3a. Ontologies from go-basic.obo version 1.2 release 2016-04-16 3b. Annotations from ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz modified 4/17/16 4. The population is 28,212 protein-coding genes for mouse 4a. 18,396 of the 28,212 population genes contain GO annotations 5. The study size is the 400 genes in supplemental table 4 (Rpl41 is listed twice in the Nature paper). 5a. 372 of the 400 study genes contain GO annotations """ return [ # GO ID idx Term Annotated Sig Exp result1 # --------- --- ----------------------------- --------- --- ---- ------- "GO:0006412", # 1 translation 403 45 7.88 9.5e-12 "GO:0006414", # 2 translational elongation 44 11 0.86 5.3e-10 "GO:0000028", # 3 ribosomal small subunit assembly 9 6 0.18 4.0e-09 "GO:0006096", # 4 glycolysis 55 11 1.08 6.8e-09 "GO:0071353", # 5 cellular response to interleukin-4 20 6 0.39 1.5e-06 "GO:0030890", # 6 positive regulation of B cell proliferat... 37 7 0.72 6.0e-06 "GO:0006172", # 7 ADP biosynthetic process 8 4 0.16 9.1e-06 "GO:0051099", # 8 positive regulation of binding 85 9 1.66 3.9e-05 "GO:0008637", # 9 apoptotic mitochondrial changes 66 8 1.29 3.9e-05 "GO:0051129", # 10 negative regulation of cellular componen... 308 21 6.02 0.00011 "GO:0002474", # 11 antigen processing and presentation of p... 19 6 0.37 0.00012 "GO:0046835", # 12 carbohydrate phosphorylation 14 4 0.27 0.00012 "GO:0042273", # 13 ribosomal large subunit biogenesis 14 4 0.27 0.00012 "GO:0043066", # 14 negative regulation of apoptotic process 584 31 11.42 0.00016 "GO:0043029", # 15 T cell homeostasis 28 5 0.55 0.00018 "GO:0015986", # 16 ATP synthesis coupled proton transport 16 4 0.31 0.00021 "GO:0042274", # 17 ribosomal small subunit biogenesis 20 10 0.39 0.00022 "GO:0030388", # 18 fructose 1,6-bisphosphate metabolic proc... 7 3 0.14 0.00024 "GO:1901385", # 19 regulation of voltage-gated calcium chan... 7 3 0.14 0.00024 "GO:0042102", # 20 positive regulation of T cell proliferat... 66 7 1.29 0.00028 ] if __name__ == '__main__': test_example() # Copyright (C) 2016-2017, DV Klopfenstein, H Tang, All rights reserved.
lileiting/goatools
tests/test_nbt3102.py
Python
bsd-2-clause
19,531
[ "Bioconductor" ]
08a258c01b82f264f487f1583f8d2e28bb9a74bf0ad7e97440708e6b0449b49c
#!/usr/bin/env python # Copyright 2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' Mole class for MM particles ''' import numpy from pyscf import gto from pyscf.data.elements import charge class Mole(gto.Mole): '''Mole class for MM particles. Args: atoms : geometry of MM particles (unit Bohr). | [[atom1, (x, y, z)], | [atom2, (x, y, z)], | ... | [atomN, (x, y, z)]] Kwargs: charges : fractional charges of MM particles ''' def __init__(self, atoms, charges=None): gto.Mole.__init__(self) self.atom = self._atom = atoms self.unit = 'Bohr' # Initialize ._atm and ._env to save the coordinates and charges and # other info of MM particles natm = len(atoms) _atm = numpy.zeros((natm,6), dtype=numpy.int32) _atm[:,gto.CHARGE_OF] = [charge(a[0]) for a in atoms] coords = numpy.asarray([a[1] for a in atoms], dtype=numpy.double) if charges is None: _atm[:,gto.NUC_MOD_OF] = gto.NUC_POINT charges = _atm[:,gto.CHARGE_OF:gto.CHARGE_OF+1] else: _atm[:,gto.NUC_MOD_OF] = gto.NUC_FRAC_CHARGE charges = numpy.asarray(charges)[:,numpy.newaxis] self._env = numpy.append(numpy.zeros(gto.PTR_ENV_START), numpy.hstack((coords, charges)).ravel()) _atm[:,gto.PTR_COORD] = gto.PTR_ENV_START + numpy.arange(natm) * 4 _atm[:,gto.PTR_FRAC_CHARGE] = gto.PTR_ENV_START + numpy.arange(natm) * 4 + 3 self._atm = _atm self._built = True def create_mm_mol(atoms_or_coords, charges=None, unit='Angstrom'): '''Create an MM object based on the given coordinates and charges of MM particles. ''' if isinstance(atoms_or_coords, numpy.ndarray): # atoms_or_coords == np.array([(xx, xx, xx)]) # Patch ghost atoms atoms = [(0, c) for c in atoms_or_coords] elif (isinstance(atoms_or_coords, (list, tuple)) and atoms_or_coords and isinstance(atoms_or_coords[0][1], (int, float))): # atoms_or_coords == [(xx, xx, xx)] # Patch ghost atoms atoms = [(0, c) for c in atoms_or_coords] else: atoms = atoms_or_coords atoms = gto.format_atom(atoms, unit=unit) return Mole(atoms, charges)
gkc1000/pyscf
pyscf/qmmm/mm_mole.py
Python
apache-2.0
2,953
[ "PySCF" ]
e9b98614bad5d95142477b98176343c694ae4a5a56966f505ba45d9d9c7d9fe6
import logging import random import numpy as np from ray.rllib.agents import with_common_config from ray.rllib.agents.dreamer.dreamer_torch_policy import DreamerTorchPolicy from ray.rllib.agents.trainer_template import build_trainer from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \ LEARNER_INFO, _get_shared_metrics from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID from ray.rllib.evaluation.metrics import collect_metrics from ray.rllib.agents.dreamer.dreamer_model import DreamerModel from ray.rllib.execution.rollout_ops import ParallelRollouts from ray.rllib.utils.typing import SampleBatchType logger = logging.getLogger(__name__) # yapf: disable # __sphinx_doc_begin__ DEFAULT_CONFIG = with_common_config({ # PlaNET Model LR "td_model_lr": 6e-4, # Actor LR "actor_lr": 8e-5, # Critic LR "critic_lr": 8e-5, # Grad Clipping "grad_clip": 100.0, # Discount "discount": 0.99, # Lambda "lambda": 0.95, # Training iterations per data collection from real env "dreamer_train_iters": 100, # Horizon for Enviornment (1000 for Mujoco/DMC) "horizon": 1000, # Number of episodes to sample for Loss Calculation "batch_size": 50, # Length of each episode to sample for Loss Calculation "batch_length": 50, # Imagination Horizon for Training Actor and Critic "imagine_horizon": 15, # Free Nats "free_nats": 3.0, # KL Coeff for the Model Loss "kl_coeff": 1.0, # Distributed Dreamer not implemented yet "num_workers": 0, # Prefill Timesteps "prefill_timesteps": 5000, # This should be kept at 1 to preserve sample efficiency "num_envs_per_worker": 1, # Exploration Gaussian "explore_noise": 0.3, # Batch mode "batch_mode": "complete_episodes", # Custom Model "dreamer_model": { "custom_model": DreamerModel, # RSSM/PlaNET parameters "deter_size": 200, "stoch_size": 30, # CNN Decoder Encoder "depth_size": 32, # General Network Parameters "hidden_size": 400, # Action STD "action_init_std": 5.0, }, "env_config": { # Repeats action send by policy for frame_skip times in env "frame_skip": 2, } }) # __sphinx_doc_end__ # yapf: enable class EpisodicBuffer(object): def __init__(self, max_length: int = 1000, length: int = 50): """Data structure that stores episodes and samples chunks of size length from episodes Args: max_length: Maximum episodes it can store length: Episode chunking lengh in sample() """ # Stores all episodes into a list: List[SampleBatchType] self.episodes = [] self.max_length = max_length self.timesteps = 0 self.length = length def add(self, batch: SampleBatchType): """Splits a SampleBatch into episodes and adds episodes to the episode buffer Args: batch: SampleBatch to be added """ self.timesteps += batch.count episodes = batch.split_by_episode() for i, e in enumerate(episodes): episodes[i] = self.preprocess_episode(e) self.episodes.extend(episodes) if len(self.episodes) > self.max_length: delta = len(self.episodes) - self.max_length # Drop oldest episodes self.episodes = self.episodes[delta:] def preprocess_episode(self, episode: SampleBatchType): """Batch format should be in the form of (s_t, a_(t-1), r_(t-1)) When t=0, the resetted obs is paired with action and reward of 0. Args: episode: SampleBatch representing an episode """ obs = episode["obs"] new_obs = episode["new_obs"] action = episode["actions"] reward = episode["rewards"] act_shape = action.shape act_reset = np.array([0.0] * act_shape[-1])[None] rew_reset = np.array(0.0)[None] obs_end = np.array(new_obs[act_shape[0] - 1])[None] batch_obs = np.concatenate([obs, obs_end], axis=0) batch_action = np.concatenate([act_reset, action], axis=0) batch_rew = np.concatenate([rew_reset, reward], axis=0) new_batch = { "obs": batch_obs, "rewards": batch_rew, "actions": batch_action } return SampleBatch(new_batch) def sample(self, batch_size: int): """Samples [batch_size, length] from the list of episodes Args: batch_size: batch_size to be sampled """ episodes_buffer = [] while len(episodes_buffer) < batch_size: rand_index = random.randint(0, len(self.episodes) - 1) episode = self.episodes[rand_index] if episode.count < self.length: continue available = episode.count - self.length index = int(random.randint(0, available)) episodes_buffer.append(episode.slice(index, index + self.length)) batch = {} for k in episodes_buffer[0].keys(): batch[k] = np.stack([e[k] for e in episodes_buffer], axis=0) return SampleBatch(batch) def total_sampled_timesteps(worker): return worker.policy_map[DEFAULT_POLICY_ID].global_timestep class DreamerIteration: def __init__(self, worker, episode_buffer, dreamer_train_iters, batch_size, act_repeat): self.worker = worker self.episode_buffer = episode_buffer self.dreamer_train_iters = dreamer_train_iters self.repeat = act_repeat self.batch_size = batch_size def __call__(self, samples): # Dreamer Training Loop for n in range(self.dreamer_train_iters): print(n) batch = self.episode_buffer.sample(self.batch_size) if n == self.dreamer_train_iters - 1: batch["log_gif"] = True fetches = self.worker.learn_on_batch(batch) # Custom Logging policy_fetches = self.policy_stats(fetches) if "log_gif" in policy_fetches: gif = policy_fetches["log_gif"] policy_fetches["log_gif"] = self.postprocess_gif(gif) # Metrics Calculation metrics = _get_shared_metrics() metrics.info[LEARNER_INFO] = fetches metrics.counters[STEPS_SAMPLED_COUNTER] = self.episode_buffer.timesteps metrics.counter[STEPS_SAMPLED_COUNTER] *= self.repeat res = collect_metrics(local_worker=self.worker) res["info"] = metrics.info res["info"].update(metrics.counters) res["timesteps_total"] = metrics.counters[STEPS_SAMPLED_COUNTER] self.episode_buffer.add(samples) return res def postprocess_gif(self, gif: np.ndarray): gif = np.clip(255 * gif, 0, 255).astype(np.uint8) B, T, C, H, W = gif.shape frames = gif.transpose((1, 2, 3, 0, 4)).reshape((1, T, C, H, B * W)) return frames def policy_stats(self, fetches): return fetches["default_policy"]["learner_stats"] def execution_plan(workers, config): # Special Replay Buffer for Dreamer agent episode_buffer = EpisodicBuffer(length=config["batch_length"]) local_worker = workers.local_worker() # Prefill episode buffer with initial exploration (uniform sampling) while total_sampled_timesteps(local_worker) < config["prefill_timesteps"]: samples = local_worker.sample() episode_buffer.add(samples) batch_size = config["batch_size"] dreamer_train_iters = config["dreamer_train_iters"] act_repeat = config["action_repeat"] rollouts = ParallelRollouts(workers) rollouts = rollouts.for_each( DreamerIteration(local_worker, episode_buffer, dreamer_train_iters, batch_size, act_repeat)) return rollouts def get_policy_class(config): return DreamerTorchPolicy def validate_config(config): config["action_repeat"] = config["env_config"]["frame_skip"] if config["framework"] != "torch": raise ValueError("Dreamer not supported in Tensorflow yet!") if config["batch_mode"] != "complete_episodes": raise ValueError("truncate_episodes not supported") if config["num_workers"] != 0: raise ValueError("Distributed Dreamer not supported yet!") if config["clip_actions"]: raise ValueError("Clipping is done inherently via policy tanh!") if config["action_repeat"] > 1: config["horizon"] = config["horizon"] / config["action_repeat"] DREAMERTrainer = build_trainer( name="Dreamer", default_config=DEFAULT_CONFIG, default_policy=DreamerTorchPolicy, get_policy_class=get_policy_class, execution_plan=execution_plan, validate_config=validate_config)
robertnishihara/ray
rllib/agents/dreamer/dreamer.py
Python
apache-2.0
8,884
[ "Gaussian" ]
c940d69f6a3fafb5d47189697ad18184a74a9e77c710232efae0cc4fb718c5d5
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Brian Coca <bcoca@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- version_added: "1.2" module: jabber short_description: Send a message to jabber user or chat room description: - Send a message to jabber options: user: description: - User as which to connect required: true password: description: - password for user to connect required: true to: description: - user ID or name of the room, when using room use a slash to indicate your nick. required: true msg: description: - The message body. required: true default: null host: description: - host to connect, overrides user info required: false port: description: - port to connect to, overrides default required: false default: 5222 encoding: description: - message encoding required: false # informational: requirements for nodes requirements: - python xmpp (xmpppy) author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' # send a message to a user - jabber: user: mybot@example.net password: secret to: friend@example.net msg: Ansible task finished # send a message to a room - jabber: user: mybot@example.net password: secret to: mychaps@conference.example.net/ansiblebot msg: Ansible task finished # send a message, specifying the host and port - jabber: user: mybot@example.net host: talk.example.net port: 5223 password: secret to: mychaps@example.net msg: Ansible task finished ''' import time import traceback HAS_XMPP = True try: import xmpp except ImportError: HAS_XMPP = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True), password=dict(required=True, no_log=True), to=dict(required=True), msg=dict(required=True), host=dict(required=False), port=dict(required=False, default=5222), encoding=dict(required=False), ), supports_check_mode=True ) if not HAS_XMPP: module.fail_json(msg="The required python xmpp library (xmpppy) is not installed") jid = xmpp.JID(module.params['user']) user = jid.getNode() server = jid.getDomain() port = module.params['port'] password = module.params['password'] try: to, nick = module.params['to'].split('/', 1) except ValueError: to, nick = module.params['to'], None if module.params['host']: host = module.params['host'] else: host = server if module.params['encoding']: xmpp.simplexml.ENCODING = module.params['encoding'] msg = xmpp.protocol.Message(body=module.params['msg']) try: conn = xmpp.Client(server, debug=[]) if not conn.connect(server=(host, port)): module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) if not conn.auth(user, password, 'Ansible'): module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server)) # some old servers require this, also the sleep following send conn.sendInitPresence(requestRoster=0) if nick: # sending to room instead of user, need to join msg.setType('groupchat') msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') conn.send(xmpp.Presence(to=module.params['to'])) time.sleep(1) else: msg.setType('chat') msg.setTo(to) if not module.check_mode: conn.send(msg) time.sleep(1) conn.disconnect() except Exception as e: module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) if __name__ == '__main__': main()
ravibhure/ansible
lib/ansible/modules/notification/jabber.py
Python
gpl-3.0
4,343
[ "Brian" ]
f6a92254c79ffbe39deb691e2372c37ebe15800d83924f3780b5dbcc9db4fb72
# Embedded file name: /usr/lib/enigma2/python/Components/Converter/pliExpertInfo.py from enigma import iServiceInformation, eServiceCenter, iPlayableService, iPlayableServicePtr from Components.Converter.Converter import Converter from Components.Element import cached from Components.config import config from Tools.Transponder import ConvertToHumanReadable from Tools.GetEcmInfo import GetEcmInfo from Poll import Poll class pliExpertInfo(Poll, Converter, object): SMART_LABEL = 0 SMART_INFO_H = 1 SMART_INFO_V = 2 SERVICE_INFO = 3 CRYPTO_INFO = 4 FREQUENCY_INFO = 5 def __init__(self, type): Converter.__init__(self, type) Poll.__init__(self) self.type = {'ShowMe': self.SMART_LABEL, 'ExpertInfo': self.SMART_INFO_H, 'ExpertInfoVertical': self.SMART_INFO_V, 'ServiceInfo': self.SERVICE_INFO, 'CryptoInfo': self.CRYPTO_INFO, 'FrequencyInfo': self.FREQUENCY_INFO}[type] self.poll_interval = 1000 self.poll_enabled = True self.idnames = (('0x100', '0x1FF', 'Seca', 'S'), ('0x500', '0x5FF', 'Via', 'V'), ('0x600', '0x6FF', 'Irdeto', 'I'), ('0x900', '0x9FF', 'NDS', 'Nd'), ('0xB00', '0xBFF', 'Conax', 'Co'), ('0xD00', '0xDFF', 'CryptoW', 'Cw'), ('0x1700', '0x17FF', 'Beta', 'B'), ('0x1800', '0x18FF', 'Nagra', 'N')) self.ecmdata = GetEcmInfo() @cached def getText(self): service = self.source.service try: info = service and service.info() except: try: info = eServiceCenter.getInstance().info(service) except: pass if not info: return '' else: Ret_Text = '' Sec_Text = '' Res_Text = '' showCryptoInfo = False if self.type == self.SMART_INFO_H or self.type == self.SERVICE_INFO or self.type == self.CRYPTO_INFO or self.type == self.FREQUENCY_INFO: sep = ' ' sep2 = ' - ' elif self.type == self.SMART_INFO_V: sep = '\n' sep2 = '\n' else: return '' if self.type == self.FREQUENCY_INFO: try: feinfo = service and service.frontendInfo() prvd = info.getInfoString(iServiceInformation.sProvider) Ret_Text = self.short(prvd) frontendDataOrg = feinfo and feinfo.getAll(True) except: try: frontendDataOrg = info.getInfoObject(service, iServiceInformation.sTransponderData) prvd = info.getInfoString(service, iServiceInformation.sProvider) except: pass if frontendDataOrg is not None: frontendData = ConvertToHumanReadable(frontendDataOrg) if frontendDataOrg.get('tuner_type') == 'DVB-S' or frontendDataOrg.get('tuner_type') == 'DVB-C': frequency = str(frontendData.get('frequency') / 1000) + ' MHz' symbolrate = str(frontendData.get('symbol_rate') / 1000) fec_inner = frontendData.get('fec_inner') if frontendDataOrg.get('tuner_type') == 'DVB-S': Ret_Text += sep + frontendData.get('system') orbital_pos = int(frontendDataOrg['orbital_position']) if orbital_pos > 1800: if orbital_pos == 3590: orb_pos = 'Thor/Intelsat' elif orbital_pos == 3560: orb_pos = 'Amos (4' elif orbital_pos == 3550: orb_pos = 'Atlantic Bird' elif orbital_pos == 3530: orb_pos = 'Nilesat/Atlantic Bird' elif orbital_pos == 3520: orb_pos = 'Atlantic Bird' elif orbital_pos == 3475: orb_pos = 'Atlantic Bird' elif orbital_pos == 3460: orb_pos = 'Express' elif orbital_pos == 3450: orb_pos = 'Telstar' elif orbital_pos == 3420: orb_pos = 'Intelsat' elif orbital_pos == 3380: orb_pos = 'Nss' elif orbital_pos == 3355: orb_pos = 'Intelsat' elif orbital_pos == 3325: orb_pos = 'Intelsat' elif orbital_pos == 3300: orb_pos = 'Hispasat' elif orbital_pos == 3285: orb_pos = 'Intelsat' elif orbital_pos == 3170: orb_pos = 'Intelsat' elif orbital_pos == 3150: orb_pos = 'Intelsat' elif orbital_pos == 3070: orb_pos = 'Intelsat' elif orbital_pos == 3045: orb_pos = 'Intelsat' elif orbital_pos == 3020: orb_pos = 'Intelsat 9' elif orbital_pos == 2990: orb_pos = 'Amazonas' elif orbital_pos == 2900: orb_pos = 'Star One' elif orbital_pos == 2880: orb_pos = 'AMC 6 (72' elif orbital_pos == 2875: orb_pos = 'Echostar 6' elif orbital_pos == 2860: orb_pos = 'Horizons' elif orbital_pos == 2810: orb_pos = 'AMC5' elif orbital_pos == 2780: orb_pos = 'NIMIQ 4' elif orbital_pos == 2690: orb_pos = 'NIMIQ 1' elif orbital_pos == 3592: orb_pos = 'Thor/Intelsat' elif orbital_pos == 2985: orb_pos = 'Echostar 3,12' elif orbital_pos == 2830: orb_pos = 'Echostar 8' elif orbital_pos == 2630: orb_pos = 'Galaxy 19' elif orbital_pos == 2500: orb_pos = 'Echostar 10,11' elif orbital_pos == 2502: orb_pos = 'DirectTV 5' elif orbital_pos == 2410: orb_pos = 'Echostar 7 Anik F3' elif orbital_pos == 2391: orb_pos = 'Galaxy 23' elif orbital_pos == 2390: orb_pos = 'Echostar 9' elif orbital_pos == 2412: orb_pos = 'DirectTV 7S' elif orbital_pos == 2310: orb_pos = 'Galaxy 27' elif orbital_pos == 2311: orb_pos = 'Ciel 2' elif orbital_pos == 2120: orb_pos = 'Echostar 2' else: orb_pos = str(float(3600 - orbital_pos) / 10.0) + 'W' elif orbital_pos > 0: if orbital_pos == 192: orb_pos = 'Astra 1F' elif orbital_pos == 130: orb_pos = 'Hot Bird 6,7A,8' elif orbital_pos == 235: orb_pos = 'Astra 1E' elif orbital_pos == 1100: orb_pos = 'BSat 1A,2A' elif orbital_pos == 1101: orb_pos = 'N-Sat 110' elif orbital_pos == 1131: orb_pos = 'KoreaSat 5' elif orbital_pos == 1440: orb_pos = 'SuperBird 7,C2' elif orbital_pos == 1006: orb_pos = 'AsiaSat 2' elif orbital_pos == 1030: orb_pos = 'Express A2' elif orbital_pos == 1056: orb_pos = 'Asiasat 3S' elif orbital_pos == 1082: orb_pos = 'NSS 11' elif orbital_pos == 881: orb_pos = 'ST1' elif orbital_pos == 900: orb_pos = 'Yamal 201' elif orbital_pos == 917: orb_pos = 'Mesat' elif orbital_pos == 950: orb_pos = 'Insat 4B' elif orbital_pos == 951: orb_pos = 'NSS 6' elif orbital_pos == 765: orb_pos = 'Telestar' elif orbital_pos == 785: orb_pos = 'ThaiCom 5' elif orbital_pos == 800: orb_pos = 'Express' elif orbital_pos == 830: orb_pos = 'Insat 4A' elif orbital_pos == 850: orb_pos = 'Intelsat 709' elif orbital_pos == 750: orb_pos = 'Abs' elif orbital_pos == 720: orb_pos = 'Intelsat' elif orbital_pos == 705: orb_pos = 'Eutelsat W5' elif orbital_pos == 685: orb_pos = 'Intelsat' elif orbital_pos == 620: orb_pos = 'Intelsat 902' elif orbital_pos == 600: orb_pos = 'Intelsat 904' elif orbital_pos == 570: orb_pos = 'Nss' elif orbital_pos == 530: orb_pos = 'Express AM22' elif orbital_pos == 480: orb_pos = 'Eutelsat 2F2' elif orbital_pos == 450: orb_pos = 'Intelsat' elif orbital_pos == 420: orb_pos = 'Turksat 2A' elif orbital_pos == 400: orb_pos = 'Express AM1' elif orbital_pos == 390: orb_pos = 'Hellas Sat 2' elif orbital_pos == 380: orb_pos = 'Paksat 1' elif orbital_pos == 360: orb_pos = 'Eutelsat Sesat' elif orbital_pos == 335: orb_pos = 'Astra 1M' elif orbital_pos == 330: orb_pos = 'Eurobird 3' elif orbital_pos == 328: orb_pos = 'Galaxy 11' elif orbital_pos == 315: orb_pos = 'Astra 5A' elif orbital_pos == 310: orb_pos = 'Turksat' elif orbital_pos == 305: orb_pos = 'Arabsat' elif orbital_pos == 285: orb_pos = 'Eurobird 1' elif orbital_pos == 284: orb_pos = 'Eurobird/Astra' elif orbital_pos == 282: orb_pos = 'Eurobird/Astra' elif orbital_pos == 1220: orb_pos = 'AsiaSat' elif orbital_pos == 1380: orb_pos = 'Telstar 18' elif orbital_pos == 260: orb_pos = 'Badr 3/4' elif orbital_pos == 255: orb_pos = 'Eurobird 2' elif orbital_pos == 215: orb_pos = 'Eutelsat' elif orbital_pos == 216: orb_pos = 'Eutelsat W6' elif orbital_pos == 210: orb_pos = 'AfriStar 1' elif orbital_pos == 160: orb_pos = 'Eutelsat W2' elif orbital_pos == 100: orb_pos = 'Eutelsat W1' elif orbital_pos == 90: orb_pos = 'Eurobird 9' elif orbital_pos == 70: orb_pos = 'Eutelsat W3A' elif orbital_pos == 50: orb_pos = 'Sirius 4' elif orbital_pos == 48: orb_pos = 'Sirius 4' elif orbital_pos == 30: orb_pos = 'Telecom 2' else: orb_pos = str(float(orbital_pos) / 10.0) + 'E' Ret_Text += sep + orb_pos + '\n' Ret_Text += frequency + sep + frontendData.get('polarization_abbreviation') Ret_Text += sep + symbolrate Ret_Text += sep + frontendData.get('modulation') + '-' + fec_inner else: Ret_Text += sep + 'DVB-C ' + frequency + ' MHz' + sep + fec_inner + sep + symbolrate elif frontendData.get('tuner_type') == 'DVB-T': frequency = str(frontendData.get('frequency') / 1000) + ' MHz' Ret_Text = 'Frequency: ' + frequency if self.type == self.SMART_INFO_H or self.type == self.SMART_INFO_V or self.type == self.SERVICE_INFO: xresol = info.getInfo(iServiceInformation.sVideoWidth) yresol = info.getInfo(iServiceInformation.sVideoHeight) feinfo = service and service.frontendInfo() prvd = info.getInfoString(iServiceInformation.sProvider) Ret_Text = self.short(prvd) frontendDataOrg = feinfo and feinfo.getAll(True) if frontendDataOrg is not None: frontendData = ConvertToHumanReadable(frontendDataOrg) if frontendDataOrg.get('tuner_type') == 'DVB-S' or frontendDataOrg.get('tuner_type') == 'DVB-C': frequency = str(frontendData.get('frequency') / 1000) symbolrate = str(frontendData.get('symbol_rate') / 1000) fec_inner = frontendData.get('fec_inner') if frontendDataOrg.get('tuner_type') == 'DVB-S': Ret_Text += sep + frontendData.get('system') Ret_Text += sep + frequency + frontendData.get('polarization_abbreviation') Ret_Text += sep + symbolrate Ret_Text += sep + fec_inner + ' ' + frontendData.get('modulation') orbital_pos = int(frontendDataOrg['orbital_position']) if orbital_pos > 1800: orb_pos = str(float(3600 - orbital_pos) / 10.0) + 'W' elif orbital_pos > 0: orb_pos = str(float(orbital_pos) / 10.0) + 'E' Ret_Text += sep + orb_pos else: Ret_Text += sep + 'DVB-C ' + frequency + ' MHz' + sep + fec_inner + sep + symbolrate elif frontendDataOrg.get('tuner_type') == 'DVB-T': frequency = str(frontendData.get('frequency') / 1000) Ret_Text += sep + 'DVB-T' + sep + 'Frequency:' + sep + frequency + ' MHz' if feinfo is not None and xresol > 0: Res_Text += ('MPEG2 ', 'MPEG4 ', 'MPEG1 ', 'MPEG4-II ', 'VC1 ', 'VC1-SM ', '')[info.getInfo(iServiceInformation.sVideoType)] Res_Text += str(xresol) + 'x' + str(yresol) Res_Text += ('i', 'p', '')[info.getInfo(iServiceInformation.sProgressive)] Res_Text += str((info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000) if self.type == self.SMART_INFO_H or self.type == self.SMART_INFO_V or self.type == self.CRYPTO_INFO: decCI = '0' Sec_Text = '' if info.getInfo(iServiceInformation.sIsCrypted) == 1: data = self.ecmdata.getEcmData() if not config.usage.show_cryptoinfo.value: showCryptoInfo = True Sec_Text = data[0] + '\n' decCI = data[1] provid = data[2] pid = data[3] if decCI != '0': decCIfull = '%04x' % int(decCI, 16) for idline in self.idnames: if int(decCI, 16) >= int(idline[0], 16) and int(decCI, 16) <= int(idline[1], 16): decCIfull = idline[2] + ':' + decCIfull break Sec_Text += decCIfull if provid != '0': Sec_Text += ':%04x' % int(provid, 16) else: Sec_Text += ':' if pid != '0': Sec_Text += ':%04x:%04x' % (info.getInfo(iServiceInformation.sSID), int(pid, 16)) elif not config.usage.show_cryptoinfo.value: showCryptoInfo = True Sec_Text = 'FTA' res = '' searchIDs = info.getInfoObject(iServiceInformation.sCAIDs) for idline in self.idnames: if int(decCI, 16) >= int(idline[0], 16) and int(decCI, 16) <= int(idline[1], 16): color = '\\c0000??00' else: color = '\\c007?7?7?' try: for oneID in searchIDs: if oneID >= int(idline[0], 16) and oneID <= int(idline[1], 16): color = '\\c00????00' except: pass res += color + idline[3] + ' ' if self.type != self.CRYPTO_INFO: Ret_Text += '\n' Ret_Text += res + '\\c00?????? ' + Sec_Text if Res_Text != '': if showCryptoInfo: Ret_Text += sep + Res_Text else: Ret_Text += '\n' + Res_Text return Ret_Text text = property(getText) def changed(self, what): Converter.changed(self, what) def short(self, langTxt): if self.type == self.SMART_INFO_V and len(langTxt) > 23: retT = langTxt[:20] + '...' return retT else: return langTxt
kingvuplus/boom
lib/python/Components/Converter/pliExpertInfo.py
Python
gpl-2.0
21,895
[ "Galaxy" ]
d84c28fa116aac026b3c46944424f3c6dfc26faf08347c0410cab7c8a0157866
""" ################################################################################ # # SOAPpy - Cayce Ullman (cayce@actzero.com) # Brian Matthews (blm@actzero.com) # Gregory Warnes (gregory_r_warnes@groton.pfizer.com) # Christopher Blunck (blunck@gst.com) # ################################################################################ # Copyright (c) 2003, Pfizer # Copyright (c) 2001, Cayce Ullman. # Copyright (c) 2001, Brian Matthews. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # Neither the name of actzero, inc. nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ################################################################################ """ ident = '$Id: Errors.py,v 1.4 2004/01/31 04:20:05 warnes Exp $' from version import __version__ import exceptions ################################################################################ # Exceptions ################################################################################ class Error(exceptions.Exception): def __init__(self, msg): self.msg = msg def __str__(self): return "<Error : %s>" % self.msg __repr__ = __str__ def __call__(self): return (msg,) class RecursionError(Error): pass class UnknownTypeError(Error): pass class HTTPError(Error): # indicates an HTTP protocol error def __init__(self, code, msg): self.code = code self.msg = msg def __str__(self): return "<HTTPError %s %s>" % (self.code, self.msg) __repr__ = __str__ def __call___(self): return (self.code, self.msg, ) class UnderflowError(exceptions.ArithmeticError): pass
intip/da-apps
plugins/da_centrallogin/modules/soappy/SOAPpy/Errors.py
Python
gpl-2.0
3,014
[ "Brian" ]
872ec02e6670b01b74172de0811e2727b0a1e40ff2e9338420babfb239a2459d